repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
huyingxi/Synonyms | synonyms/utils.py | is_zh | python | def is_zh(ch):
x = ord(ch)
# CJK Radicals Supplement and Kangxi radicals
if 0x2e80 <= x <= 0x2fef:
return True
# CJK Unified Ideographs Extension A
elif 0x3400 <= x <= 0x4dbf:
return True
# CJK Unified Ideographs
elif 0x4e00 <= x <= 0x9fbb:
return True
# CJK Compatibility Ideographs
elif 0xf900 <= x <= 0xfad9:
return True
# CJK Unified Ideographs Extension B
elif 0x20000 <= x <= 0x2a6df:
return True
else:
return False | return True if ch is Chinese character.
full-width puncts/latins are not counted in. | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/utils.py#L270-L291 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Radim Rehurek <me@radimrehurek.com>
# Modifications (C) 2017 Hai Liang Wang <hailiang.hl.wang@gmail.com>
# Licensed under the GNU LGPL v3.0 - http://www.gnu.org/licenses/lgpl.html
# Author: Hai Liang Wang
# Date: 2017-10-16:14:13:24
#
#=========================================================================
from __future__ import print_function
from __future__ import division
__copyright__ = "Copyright (c) 2017 . All Rights Reserved"
__author__ = "Hai Liang Wang"
__date__ = "2017-10-16:14:13:24"
import os
import sys
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
import re
import unicodedata
import os
import random
import shutil
import sys
import subprocess
from contextlib import contextmanager
import numpy as np
import numbers
from six import string_types, u
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding("utf-8")
# raise "Must be using Python 3"
else:
unicode = str
import collections
import warnings
try:
from html.entities import name2codepoint as n2cp
except ImportError:
from htmlentitydefs import name2codepoint as n2cp
try:
import cPickle as _pickle
except ImportError:
import pickle as _pickle
try:
from smart_open import smart_open
except ImportError:
print("smart_open library not found; falling back to local-filesystem-only")
def make_closing(base, **attrs):
"""
Add support for `with Base(attrs) as fout:` to the base class if it's missing.
The base class' `close()` method will be called on context exit, to always close the file properly.
This is needed for gzip.GzipFile, bz2.BZ2File etc in older Pythons (<=2.6), which otherwise
raise "AttributeError: GzipFile instance has no attribute '__exit__'".
"""
if not hasattr(base, '__enter__'):
attrs['__enter__'] = lambda self: self
if not hasattr(base, '__exit__'):
attrs['__exit__'] = lambda self, type, value, traceback: self.close()
return type('Closing' + base.__name__, (base, object), attrs)
def smart_open(fname, mode='rb'):
_, ext = os.path.splitext(fname)
if ext == '.bz2':
from bz2 import BZ2File
return make_closing(BZ2File)(fname, mode)
if ext == '.gz':
from gzip import GzipFile
return make_closing(GzipFile)(fname, mode)
return open(fname, mode)
PAT_ALPHABETIC = re.compile(r'(((?![\d])\w)+)', re.UNICODE)
RE_HTML_ENTITY = re.compile(r'&(#?)([xX]?)(\w{1,8});', re.UNICODE)
def get_random_state(seed):
"""
Turn seed into a np.random.RandomState instance.
Method originally from maciejkula/glove-python, and written by @joshloyal.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError(
'%r cannot be used to seed a np.random.RandomState instance' %
seed)
class NoCM(object):
def acquire(self):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
nocm = NoCM()
@contextmanager
def file_or_filename(input):
"""
Return a file-like object ready to be read from the beginning. `input` is either
a filename (gz/bz2 also supported) or a file-like object supporting seek.
"""
if isinstance(input, string_types):
# input was a filename: open as file
yield smart_open(input)
else:
# input already a file-like object; just reset to the beginning
input.seek(0)
yield input
def deaccent(text):
"""
Remove accentuation from the given string. Input text is either a unicode string or utf8 encoded bytestring.
Return input string with accents removed, as unicode.
>>> deaccent("Šéf chomutovských komunistů dostal poštou bílý prášek")
u'Sef chomutovskych komunistu dostal postou bily prasek'
"""
if not isinstance(text, unicode):
# assume utf8 for byte strings, use default (strict) error handling
text = text.decode('utf8')
norm = unicodedata.normalize("NFD", text)
result = u('').join(ch for ch in norm if unicodedata.category(ch) != 'Mn')
return unicodedata.normalize("NFC", result)
def copytree_hardlink(source, dest):
"""
Recursively copy a directory ala shutils.copytree, but hardlink files
instead of copying. Available on UNIX systems only.
"""
copy2 = shutil.copy2
try:
shutil.copy2 = os.link
shutil.copytree(source, dest)
finally:
shutil.copy2 = copy2
def tokenize(
text,
lowercase=False,
deacc=False,
encoding='utf8',
errors="strict",
to_lower=False,
lower=False):
"""
Iteratively yield tokens as unicode strings, removing accent marks
and optionally lowercasing the unidoce string by assigning True
to one of the parameters, lowercase, to_lower, or lower.
Input text may be either unicode or utf8-encoded byte string.
The tokens on output are maximal contiguous sequences of alphabetic
characters (no digits!).
>>> list(tokenize('Nic nemůže letět rychlostí vyšší, než 300 tisíc kilometrů za sekundu!', deacc = True))
[u'Nic', u'nemuze', u'letet', u'rychlosti', u'vyssi', u'nez', u'tisic', u'kilometru', u'za', u'sekundu']
"""
lowercase = lowercase or to_lower or lower
text = to_unicode(text, encoding, errors=errors)
if lowercase:
text = text.lower()
if deacc:
text = deaccent(text)
return simple_tokenize(text)
def simple_tokenize(text):
for match in PAT_ALPHABETIC.finditer(text):
yield match.group()
def simple_preprocess(doc, deacc=False, min_len=2, max_len=15):
"""
Convert a document into a list of tokens.
This lowercases, tokenizes, de-accents (optional). -- the output are final
tokens = unicode strings, that won't be processed any further.
"""
tokens = [
token for token in tokenize(doc, lower=True, deacc=deacc, errors='ignore')
if min_len <= len(token) <= max_len and not token.startswith('_')
]
return tokens
def any2utf8(text, errors='strict', encoding='utf8'):
"""Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8."""
if isinstance(text, unicode):
return text.encode('utf8')
# do bytestring -> unicode -> utf8 full circle, to ensure valid utf8
return unicode(text, encoding, errors=errors).encode('utf8')
to_utf8 = any2utf8
def any2unicode(text, encoding='utf8', errors='strict'):
"""Convert a string (bytestring in `encoding` or unicode), to unicode."""
if isinstance(text, unicode):
return text
return unicode(text, encoding, errors=errors)
to_unicode = any2unicode
# cosine distance
# https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.norm.html
from numpy import dot
from numpy.linalg import norm
cosine = lambda a, b: dot(a, b)/(norm(a)*norm(b))
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def call_on_class_only(*args, **kwargs):
"""Raise exception when load methods are called on instance"""
raise AttributeError('This method should be called on a class object.')
def is_digit(obj):
'''
Check if an object is Number
'''
return isinstance(obj, (numbers.Integral, numbers.Complex, numbers.Real))
def is_zhs(str):
'''
Check if str is Chinese Word
'''
for i in str:
if not is_zh(i):
return False
return True
def is_punct(ch):
x = ord(ch)
# in no-formal literals, space is used as punctuation sometimes.
if x < 127 and ascii.ispunct(x):
return True
# General Punctuation
elif 0x2000 <= x <= 0x206f:
return True
# CJK Symbols and Punctuation
elif 0x3000 <= x <= 0x303f:
return True
# Halfwidth and Fullwidth Forms
elif 0xff00 <= x <= 0xffef:
return True
# CJK Compatibility Forms
elif 0xfe30 <= x <= 0xfe4f:
return True
else:
return False |
huyingxi/Synonyms | synonyms/synonyms.py | _load_stopwords | python | def _load_stopwords(file_path):
'''
load stop words
'''
global _stopwords
if sys.version_info[0] < 3:
words = open(file_path, 'r')
else:
words = open(file_path, 'r', encoding='utf-8')
stopwords = words.readlines()
for w in stopwords:
_stopwords.add(any2unicode(w).strip()) | load stop words | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/synonyms.py#L91-L102 | [
"def any2unicode(text, encoding='utf8', errors='strict'):\n \"\"\"Convert a string (bytestring in `encoding` or unicode), to unicode.\"\"\"\n if isinstance(text, unicode):\n return text\n return unicode(text, encoding, errors=errors)\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#=========================================================================
#
# Copyright (c) 2017 <> All Rights Reserved
#
#
# File: /Users/hain/ai/Synonyms/synonyms/__init__.py
# Author: Hai Liang Wang
# Date: 2017-09-27
#
#=========================================================================
"""
Chinese Synonyms for Natural Language Processing and Understanding.
"""
from __future__ import print_function
from __future__ import division
__copyright__ = "Copyright (c) 2017 . All Rights Reserved"
__author__ = "Hu Ying Xi<>, Hai Liang Wang<hailiang.hl.wang@gmail.com>"
__date__ = "2017-09-27"
__version__ = "3.3.10"
import os
import sys
import numpy as np
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
PLT = 2
if sys.version_info[0] < 3:
default_stdout = sys.stdout
default_stderr = sys.stderr
reload(sys)
sys.stdout = default_stdout
sys.stderr = default_stderr
sys.setdefaultencoding("utf-8")
# raise "Must be using Python 3"
else:
PLT = 3
# Get Environment variables
ENVIRON = os.environ.copy()
import json
import gzip
import shutil
from absl import logging
from .word2vec import KeyedVectors
from .utils import any2utf8
from .utils import any2unicode
from .utils import sigmoid
from .utils import cosine
from .utils import is_digit
import jieba
from .jieba import posseg as _tokenizer
'''
globals
'''
_vocab = dict()
_size = 0
_vectors = None
_stopwords = set()
_cache_nearby = dict()
'''
lambda fns
'''
# combine similarity scores
_similarity_smooth = lambda x, y, z, u: (x * y) + z - u
_flat_sum_array = lambda x: np.sum(x, axis=0) # 分子
'''
tokenizer settings
'''
tokenizer_dict = os.path.join(curdir, 'data', 'vocab.txt')
if "SYNONYMS_WORDSEG_DICT" in ENVIRON:
if os.path.exists(ENVIRON["SYNONYMS_WORDSEG_DICT"]):
print("info: set wordseg dict with %s" % tokenizer_dict)
tokenizer_dict = ENVIRON["SYNONYMS_WORDSEG_DICT"]
else: print("warning: can not find dict at [%s]" % tokenizer_dict)
print(">> Synonyms load wordseg dict [%s] ... " % tokenizer_dict)
_tokenizer.initialize(tokenizer_dict)
# stopwords
_fin_stopwords_path = os.path.join(curdir, 'data', 'stopwords.txt')
print(">> Synonyms on loading stopwords [%s] ..." % _fin_stopwords_path)
_load_stopwords(_fin_stopwords_path)
def _segment_words(sen):
'''
segment words with jieba
'''
words, tags = [], []
m = _tokenizer.cut(sen, HMM=True) # HMM更好的识别新词
for x in m:
words.append(x.word)
tags.append(x.flag)
return words, tags
'''
word embedding
'''
# vectors
_f_model = os.path.join(curdir, 'data', 'words.vector')
if "SYNONYMS_WORD2VEC_BIN_MODEL_ZH_CN" in ENVIRON:
_f_model = ENVIRON["SYNONYMS_WORD2VEC_BIN_MODEL_ZH_CN"]
def _load_w2v(model_file=_f_model, binary=True):
'''
load word2vec model
'''
if not os.path.exists(model_file):
print("os.path : ", os.path)
raise Exception("Model file [%s] does not exist." % model_file)
return KeyedVectors.load_word2vec_format(
model_file, binary=binary, unicode_errors='ignore')
print(">> Synonyms on loading vectors [%s] ..." % _f_model)
_vectors = _load_w2v(model_file=_f_model)
def _get_wv(sentence, ignore=False):
'''
get word2vec data by sentence
sentence is segmented string.
'''
global _vectors
vectors = []
for y in sentence:
y_ = any2unicode(y).strip()
if y_ not in _stopwords:
syns = nearby(y_)[0]
# print("sentence %s word: %s" %(sentence, y_))
# print("sentence %s word nearby: %s" %(sentence, " ".join(syns)))
c = []
try:
c.append(_vectors.word_vec(y_))
except KeyError as error:
if ignore:
continue
else:
logging.warning("not exist in w2v model: %s" % y_)
# c.append(np.zeros((100,), dtype=float))
random_state = np.random.RandomState(seed=(hash(y_) % (2**32 - 1)))
c.append(random_state.uniform(low=-10.0, high=10.0, size=(100,)))
for n in syns:
if n is None: continue
try:
v = _vectors.word_vec(any2unicode(n))
except KeyError as error:
# v = np.zeros((100,), dtype=float)
random_state = np.random.RandomState(seed=(hash(n) % (2 ** 32 - 1)))
v = random_state.uniform(low=10.0, high=10.0, size=(100,))
c.append(v)
r = np.average(c, axis=0)
vectors.append(r)
return vectors
'''
Distance
'''
# Levenshtein Distance
def _levenshtein_distance(sentence1, sentence2):
'''
Return the Levenshtein distance between two strings.
Based on:
http://rosettacode.org/wiki/Levenshtein_distance#Python
'''
first = any2utf8(sentence1).decode('utf-8', 'ignore')
second = any2utf8(sentence2).decode('utf-8', 'ignore')
sentence1_len, sentence2_len = len(first), len(second)
maxlen = max(sentence1_len, sentence2_len)
if sentence1_len > sentence2_len:
first, second = second, first
distances = range(len(first) + 1)
for index2, char2 in enumerate(second):
new_distances = [index2 + 1]
for index1, char1 in enumerate(first):
if char1 == char2:
new_distances.append(distances[index1])
else:
new_distances.append(1 + min((distances[index1],
distances[index1 + 1],
new_distances[-1])))
distances = new_distances
levenshtein = distances[-1]
d = float((maxlen - levenshtein)/maxlen)
# smoothing
s = (sigmoid(d * 6) - 0.5) * 2
# print("smoothing[%s| %s]: %s -> %s" % (sentence1, sentence2, d, s))
return s
def sv(sentence, ignore=False):
'''
获得一个分词后句子的向量,向量以BoW方式组成
sentence: 句子是分词后通过空格联合起来
ignore: 是否忽略OOV,False时,随机生成一个向量
'''
return _get_wv(sentence, ignore = ignore)
def v(word):
'''
获得一个词语的向量,OOV时抛出 KeyError 异常
'''
y_ = any2unicode(word).strip()
return _vectors.word_vec(y_)
def _nearby_levenshtein_distance(s1, s2):
'''
使用空间距离近的词汇优化编辑距离计算
'''
s1_len, s2_len = len(s1), len(s2)
maxlen = s1_len
if s1_len == s2_len:
first, second = sorted([s1, s2])
elif s1_len < s2_len:
first = s1
second = s2
maxlen = s2_len
else:
first = s2
second = s1
ft = set() # all related words with first sentence
for x in first:
ft.add(x)
n, _ = nearby(x)
for o in n[:10]:
ft.add(o)
scores = []
for x in second:
choices = [_levenshtein_distance(x, y) for y in ft]
if len(choices) > 0: scores.append(max(choices))
s = np.sum(scores) / maxlen if len(scores) > 0 else 0
return s
def _similarity_distance(s1, s2, ignore):
'''
compute similarity with distance measurement
'''
g = 0.0
try:
g_ = cosine(_flat_sum_array(_get_wv(s1, ignore)), _flat_sum_array(_get_wv(s2, ignore)))
if is_digit(g_): g = g_
except: pass
u = _nearby_levenshtein_distance(s1, s2)
logging.debug("g: %s, u: %s" % (g, u))
if u >= 0.99:
r = 1.0
elif u > 0.9:
r = _similarity_smooth(g, 0.05, u, 0.05)
elif u > 0.8:
r = _similarity_smooth(g, 0.1, u, 0.2)
elif u > 0.4:
r = _similarity_smooth(g, 0.2, u, 0.15)
elif u > 0.2:
r = _similarity_smooth(g, 0.3, u, 0.1)
else:
r = _similarity_smooth(g, 0.4, u, 0)
if r < 0: r = abs(r)
r = min(r, 1.0)
return float("%.3f" % r)
'''
Public Methods
'''
seg = _segment_words # word segmenter
def nearby(word):
'''
Nearby word
'''
w = any2unicode(word)
# read from cache
if w in _cache_nearby: return _cache_nearby[w]
words, scores = [], []
try:
for x in _vectors.neighbours(w):
words.append(x[0])
scores.append(x[1])
except: pass # ignore key error, OOV
# put into cache
_cache_nearby[w] = (words, scores)
return words, scores
def compare(s1, s2, seg=True, ignore=False, stopwords=False):
'''
compare similarity
s1 : sentence1
s2 : sentence2
seg : True : The original sentences need jieba.cut
Flase : The original sentences have been cut.
ignore: True: ignore OOV words
False: get vector randomly for OOV words
'''
if s1 == s2: return 1.0
s1_words = []
s2_words = []
if seg:
s1 = [x for x in jieba.cut(s1, cut_all=False, HMM=False)]
s2 = [x for x in jieba.cut(s2, cut_all=False, HMM=False)]
else:
s1 = s1.split()
s2 = s2.split()
# check stopwords
if not stopwords:
global _stopwords
for x in s1:
if not x in _stopwords:
s1_words.append(x)
for x in s2:
if not x in _stopwords:
s2_words.append(x)
else:
s1_words = s1
s2_words = s2
assert len(s1) > 0 and len(s2) > 0, "The length of s1 and s2 should > 0."
return _similarity_distance(s1_words, s2_words, ignore)
def display(word):
print("'%s'近义词:" % word)
o = nearby(word)
assert len(o) == 2, "should contain 2 list"
if len(o[0]) == 0:
print(" out of vocabulary")
for k, v in enumerate(o[0]):
print(" %d. %s:%s" % (k + 1, v, o[1][k]))
def main():
display("人脸")
display("NOT_EXIST")
if __name__ == '__main__':
main()
|
huyingxi/Synonyms | synonyms/synonyms.py | _segment_words | python | def _segment_words(sen):
'''
segment words with jieba
'''
words, tags = [], []
m = _tokenizer.cut(sen, HMM=True) # HMM更好的识别新词
for x in m:
words.append(x.word)
tags.append(x.flag)
return words, tags | segment words with jieba | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/synonyms.py#L107-L116 | [
"def cut(sentence, HMM=True):\n \"\"\"\n Global `cut` function that supports parallel processing.\n\n Note that this only works using dt, custom POSTokenizer\n instances are not supported.\n \"\"\"\n global dt\n if jieba.pool is None:\n for w in dt.cut(sentence, HMM=HMM):\n yi... | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#=========================================================================
#
# Copyright (c) 2017 <> All Rights Reserved
#
#
# File: /Users/hain/ai/Synonyms/synonyms/__init__.py
# Author: Hai Liang Wang
# Date: 2017-09-27
#
#=========================================================================
"""
Chinese Synonyms for Natural Language Processing and Understanding.
"""
from __future__ import print_function
from __future__ import division
__copyright__ = "Copyright (c) 2017 . All Rights Reserved"
__author__ = "Hu Ying Xi<>, Hai Liang Wang<hailiang.hl.wang@gmail.com>"
__date__ = "2017-09-27"
__version__ = "3.3.10"
import os
import sys
import numpy as np
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
PLT = 2
if sys.version_info[0] < 3:
default_stdout = sys.stdout
default_stderr = sys.stderr
reload(sys)
sys.stdout = default_stdout
sys.stderr = default_stderr
sys.setdefaultencoding("utf-8")
# raise "Must be using Python 3"
else:
PLT = 3
# Get Environment variables
ENVIRON = os.environ.copy()
import json
import gzip
import shutil
from absl import logging
from .word2vec import KeyedVectors
from .utils import any2utf8
from .utils import any2unicode
from .utils import sigmoid
from .utils import cosine
from .utils import is_digit
import jieba
from .jieba import posseg as _tokenizer
'''
globals
'''
_vocab = dict()
_size = 0
_vectors = None
_stopwords = set()
_cache_nearby = dict()
'''
lambda fns
'''
# combine similarity scores
_similarity_smooth = lambda x, y, z, u: (x * y) + z - u
_flat_sum_array = lambda x: np.sum(x, axis=0) # 分子
'''
tokenizer settings
'''
tokenizer_dict = os.path.join(curdir, 'data', 'vocab.txt')
if "SYNONYMS_WORDSEG_DICT" in ENVIRON:
if os.path.exists(ENVIRON["SYNONYMS_WORDSEG_DICT"]):
print("info: set wordseg dict with %s" % tokenizer_dict)
tokenizer_dict = ENVIRON["SYNONYMS_WORDSEG_DICT"]
else: print("warning: can not find dict at [%s]" % tokenizer_dict)
print(">> Synonyms load wordseg dict [%s] ... " % tokenizer_dict)
_tokenizer.initialize(tokenizer_dict)
# stopwords
_fin_stopwords_path = os.path.join(curdir, 'data', 'stopwords.txt')
def _load_stopwords(file_path):
'''
load stop words
'''
global _stopwords
if sys.version_info[0] < 3:
words = open(file_path, 'r')
else:
words = open(file_path, 'r', encoding='utf-8')
stopwords = words.readlines()
for w in stopwords:
_stopwords.add(any2unicode(w).strip())
print(">> Synonyms on loading stopwords [%s] ..." % _fin_stopwords_path)
_load_stopwords(_fin_stopwords_path)
'''
word embedding
'''
# vectors
_f_model = os.path.join(curdir, 'data', 'words.vector')
if "SYNONYMS_WORD2VEC_BIN_MODEL_ZH_CN" in ENVIRON:
_f_model = ENVIRON["SYNONYMS_WORD2VEC_BIN_MODEL_ZH_CN"]
def _load_w2v(model_file=_f_model, binary=True):
'''
load word2vec model
'''
if not os.path.exists(model_file):
print("os.path : ", os.path)
raise Exception("Model file [%s] does not exist." % model_file)
return KeyedVectors.load_word2vec_format(
model_file, binary=binary, unicode_errors='ignore')
print(">> Synonyms on loading vectors [%s] ..." % _f_model)
_vectors = _load_w2v(model_file=_f_model)
def _get_wv(sentence, ignore=False):
'''
get word2vec data by sentence
sentence is segmented string.
'''
global _vectors
vectors = []
for y in sentence:
y_ = any2unicode(y).strip()
if y_ not in _stopwords:
syns = nearby(y_)[0]
# print("sentence %s word: %s" %(sentence, y_))
# print("sentence %s word nearby: %s" %(sentence, " ".join(syns)))
c = []
try:
c.append(_vectors.word_vec(y_))
except KeyError as error:
if ignore:
continue
else:
logging.warning("not exist in w2v model: %s" % y_)
# c.append(np.zeros((100,), dtype=float))
random_state = np.random.RandomState(seed=(hash(y_) % (2**32 - 1)))
c.append(random_state.uniform(low=-10.0, high=10.0, size=(100,)))
for n in syns:
if n is None: continue
try:
v = _vectors.word_vec(any2unicode(n))
except KeyError as error:
# v = np.zeros((100,), dtype=float)
random_state = np.random.RandomState(seed=(hash(n) % (2 ** 32 - 1)))
v = random_state.uniform(low=10.0, high=10.0, size=(100,))
c.append(v)
r = np.average(c, axis=0)
vectors.append(r)
return vectors
'''
Distance
'''
# Levenshtein Distance
def _levenshtein_distance(sentence1, sentence2):
'''
Return the Levenshtein distance between two strings.
Based on:
http://rosettacode.org/wiki/Levenshtein_distance#Python
'''
first = any2utf8(sentence1).decode('utf-8', 'ignore')
second = any2utf8(sentence2).decode('utf-8', 'ignore')
sentence1_len, sentence2_len = len(first), len(second)
maxlen = max(sentence1_len, sentence2_len)
if sentence1_len > sentence2_len:
first, second = second, first
distances = range(len(first) + 1)
for index2, char2 in enumerate(second):
new_distances = [index2 + 1]
for index1, char1 in enumerate(first):
if char1 == char2:
new_distances.append(distances[index1])
else:
new_distances.append(1 + min((distances[index1],
distances[index1 + 1],
new_distances[-1])))
distances = new_distances
levenshtein = distances[-1]
d = float((maxlen - levenshtein)/maxlen)
# smoothing
s = (sigmoid(d * 6) - 0.5) * 2
# print("smoothing[%s| %s]: %s -> %s" % (sentence1, sentence2, d, s))
return s
def sv(sentence, ignore=False):
'''
获得一个分词后句子的向量,向量以BoW方式组成
sentence: 句子是分词后通过空格联合起来
ignore: 是否忽略OOV,False时,随机生成一个向量
'''
return _get_wv(sentence, ignore = ignore)
def v(word):
'''
获得一个词语的向量,OOV时抛出 KeyError 异常
'''
y_ = any2unicode(word).strip()
return _vectors.word_vec(y_)
def _nearby_levenshtein_distance(s1, s2):
'''
使用空间距离近的词汇优化编辑距离计算
'''
s1_len, s2_len = len(s1), len(s2)
maxlen = s1_len
if s1_len == s2_len:
first, second = sorted([s1, s2])
elif s1_len < s2_len:
first = s1
second = s2
maxlen = s2_len
else:
first = s2
second = s1
ft = set() # all related words with first sentence
for x in first:
ft.add(x)
n, _ = nearby(x)
for o in n[:10]:
ft.add(o)
scores = []
for x in second:
choices = [_levenshtein_distance(x, y) for y in ft]
if len(choices) > 0: scores.append(max(choices))
s = np.sum(scores) / maxlen if len(scores) > 0 else 0
return s
def _similarity_distance(s1, s2, ignore):
'''
compute similarity with distance measurement
'''
g = 0.0
try:
g_ = cosine(_flat_sum_array(_get_wv(s1, ignore)), _flat_sum_array(_get_wv(s2, ignore)))
if is_digit(g_): g = g_
except: pass
u = _nearby_levenshtein_distance(s1, s2)
logging.debug("g: %s, u: %s" % (g, u))
if u >= 0.99:
r = 1.0
elif u > 0.9:
r = _similarity_smooth(g, 0.05, u, 0.05)
elif u > 0.8:
r = _similarity_smooth(g, 0.1, u, 0.2)
elif u > 0.4:
r = _similarity_smooth(g, 0.2, u, 0.15)
elif u > 0.2:
r = _similarity_smooth(g, 0.3, u, 0.1)
else:
r = _similarity_smooth(g, 0.4, u, 0)
if r < 0: r = abs(r)
r = min(r, 1.0)
return float("%.3f" % r)
'''
Public Methods
'''
seg = _segment_words # word segmenter
def nearby(word):
'''
Nearby word
'''
w = any2unicode(word)
# read from cache
if w in _cache_nearby: return _cache_nearby[w]
words, scores = [], []
try:
for x in _vectors.neighbours(w):
words.append(x[0])
scores.append(x[1])
except: pass # ignore key error, OOV
# put into cache
_cache_nearby[w] = (words, scores)
return words, scores
def compare(s1, s2, seg=True, ignore=False, stopwords=False):
'''
compare similarity
s1 : sentence1
s2 : sentence2
seg : True : The original sentences need jieba.cut
Flase : The original sentences have been cut.
ignore: True: ignore OOV words
False: get vector randomly for OOV words
'''
if s1 == s2: return 1.0
s1_words = []
s2_words = []
if seg:
s1 = [x for x in jieba.cut(s1, cut_all=False, HMM=False)]
s2 = [x for x in jieba.cut(s2, cut_all=False, HMM=False)]
else:
s1 = s1.split()
s2 = s2.split()
# check stopwords
if not stopwords:
global _stopwords
for x in s1:
if not x in _stopwords:
s1_words.append(x)
for x in s2:
if not x in _stopwords:
s2_words.append(x)
else:
s1_words = s1
s2_words = s2
assert len(s1) > 0 and len(s2) > 0, "The length of s1 and s2 should > 0."
return _similarity_distance(s1_words, s2_words, ignore)
def display(word):
print("'%s'近义词:" % word)
o = nearby(word)
assert len(o) == 2, "should contain 2 list"
if len(o[0]) == 0:
print(" out of vocabulary")
for k, v in enumerate(o[0]):
print(" %d. %s:%s" % (k + 1, v, o[1][k]))
def main():
display("人脸")
display("NOT_EXIST")
if __name__ == '__main__':
main()
|
huyingxi/Synonyms | synonyms/synonyms.py | _load_w2v | python | def _load_w2v(model_file=_f_model, binary=True):
'''
load word2vec model
'''
if not os.path.exists(model_file):
print("os.path : ", os.path)
raise Exception("Model file [%s] does not exist." % model_file)
return KeyedVectors.load_word2vec_format(
model_file, binary=binary, unicode_errors='ignore') | load word2vec model | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/synonyms.py#L125-L133 | [
"def load_word2vec_format(\n cls,\n fname,\n fvocab=None,\n binary=False,\n encoding='utf8',\n unicode_errors='strict',\n limit=None,\n datatype=REAL):\n \"\"\"\n Load the input-hidden weight matrix from the original C word2vec-tool format.\n Note tha... | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#=========================================================================
#
# Copyright (c) 2017 <> All Rights Reserved
#
#
# File: /Users/hain/ai/Synonyms/synonyms/__init__.py
# Author: Hai Liang Wang
# Date: 2017-09-27
#
#=========================================================================
"""
Chinese Synonyms for Natural Language Processing and Understanding.
"""
from __future__ import print_function
from __future__ import division
__copyright__ = "Copyright (c) 2017 . All Rights Reserved"
__author__ = "Hu Ying Xi<>, Hai Liang Wang<hailiang.hl.wang@gmail.com>"
__date__ = "2017-09-27"
__version__ = "3.3.10"
import os
import sys
import numpy as np
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
PLT = 2
if sys.version_info[0] < 3:
default_stdout = sys.stdout
default_stderr = sys.stderr
reload(sys)
sys.stdout = default_stdout
sys.stderr = default_stderr
sys.setdefaultencoding("utf-8")
# raise "Must be using Python 3"
else:
PLT = 3
# Get Environment variables
ENVIRON = os.environ.copy()
import json
import gzip
import shutil
from absl import logging
from .word2vec import KeyedVectors
from .utils import any2utf8
from .utils import any2unicode
from .utils import sigmoid
from .utils import cosine
from .utils import is_digit
import jieba
from .jieba import posseg as _tokenizer
'''
globals
'''
_vocab = dict()
_size = 0
_vectors = None
_stopwords = set()
_cache_nearby = dict()
'''
lambda fns
'''
# combine similarity scores
_similarity_smooth = lambda x, y, z, u: (x * y) + z - u
_flat_sum_array = lambda x: np.sum(x, axis=0) # 分子
'''
tokenizer settings
'''
tokenizer_dict = os.path.join(curdir, 'data', 'vocab.txt')
if "SYNONYMS_WORDSEG_DICT" in ENVIRON:
if os.path.exists(ENVIRON["SYNONYMS_WORDSEG_DICT"]):
print("info: set wordseg dict with %s" % tokenizer_dict)
tokenizer_dict = ENVIRON["SYNONYMS_WORDSEG_DICT"]
else: print("warning: can not find dict at [%s]" % tokenizer_dict)
print(">> Synonyms load wordseg dict [%s] ... " % tokenizer_dict)
_tokenizer.initialize(tokenizer_dict)
# stopwords
_fin_stopwords_path = os.path.join(curdir, 'data', 'stopwords.txt')
def _load_stopwords(file_path):
'''
load stop words
'''
global _stopwords
if sys.version_info[0] < 3:
words = open(file_path, 'r')
else:
words = open(file_path, 'r', encoding='utf-8')
stopwords = words.readlines()
for w in stopwords:
_stopwords.add(any2unicode(w).strip())
print(">> Synonyms on loading stopwords [%s] ..." % _fin_stopwords_path)
_load_stopwords(_fin_stopwords_path)
def _segment_words(sen):
'''
segment words with jieba
'''
words, tags = [], []
m = _tokenizer.cut(sen, HMM=True) # HMM更好的识别新词
for x in m:
words.append(x.word)
tags.append(x.flag)
return words, tags
'''
word embedding
'''
# vectors
_f_model = os.path.join(curdir, 'data', 'words.vector')
if "SYNONYMS_WORD2VEC_BIN_MODEL_ZH_CN" in ENVIRON:
_f_model = ENVIRON["SYNONYMS_WORD2VEC_BIN_MODEL_ZH_CN"]
print(">> Synonyms on loading vectors [%s] ..." % _f_model)
_vectors = _load_w2v(model_file=_f_model)
def _get_wv(sentence, ignore=False):
'''
get word2vec data by sentence
sentence is segmented string.
'''
global _vectors
vectors = []
for y in sentence:
y_ = any2unicode(y).strip()
if y_ not in _stopwords:
syns = nearby(y_)[0]
# print("sentence %s word: %s" %(sentence, y_))
# print("sentence %s word nearby: %s" %(sentence, " ".join(syns)))
c = []
try:
c.append(_vectors.word_vec(y_))
except KeyError as error:
if ignore:
continue
else:
logging.warning("not exist in w2v model: %s" % y_)
# c.append(np.zeros((100,), dtype=float))
random_state = np.random.RandomState(seed=(hash(y_) % (2**32 - 1)))
c.append(random_state.uniform(low=-10.0, high=10.0, size=(100,)))
for n in syns:
if n is None: continue
try:
v = _vectors.word_vec(any2unicode(n))
except KeyError as error:
# v = np.zeros((100,), dtype=float)
random_state = np.random.RandomState(seed=(hash(n) % (2 ** 32 - 1)))
v = random_state.uniform(low=10.0, high=10.0, size=(100,))
c.append(v)
r = np.average(c, axis=0)
vectors.append(r)
return vectors
'''
Distance
'''
# Levenshtein Distance
def _levenshtein_distance(sentence1, sentence2):
'''
Return the Levenshtein distance between two strings.
Based on:
http://rosettacode.org/wiki/Levenshtein_distance#Python
'''
first = any2utf8(sentence1).decode('utf-8', 'ignore')
second = any2utf8(sentence2).decode('utf-8', 'ignore')
sentence1_len, sentence2_len = len(first), len(second)
maxlen = max(sentence1_len, sentence2_len)
if sentence1_len > sentence2_len:
first, second = second, first
distances = range(len(first) + 1)
for index2, char2 in enumerate(second):
new_distances = [index2 + 1]
for index1, char1 in enumerate(first):
if char1 == char2:
new_distances.append(distances[index1])
else:
new_distances.append(1 + min((distances[index1],
distances[index1 + 1],
new_distances[-1])))
distances = new_distances
levenshtein = distances[-1]
d = float((maxlen - levenshtein)/maxlen)
# smoothing
s = (sigmoid(d * 6) - 0.5) * 2
# print("smoothing[%s| %s]: %s -> %s" % (sentence1, sentence2, d, s))
return s
def sv(sentence, ignore=False):
'''
获得一个分词后句子的向量,向量以BoW方式组成
sentence: 句子是分词后通过空格联合起来
ignore: 是否忽略OOV,False时,随机生成一个向量
'''
return _get_wv(sentence, ignore = ignore)
def v(word):
'''
获得一个词语的向量,OOV时抛出 KeyError 异常
'''
y_ = any2unicode(word).strip()
return _vectors.word_vec(y_)
def _nearby_levenshtein_distance(s1, s2):
'''
使用空间距离近的词汇优化编辑距离计算
'''
s1_len, s2_len = len(s1), len(s2)
maxlen = s1_len
if s1_len == s2_len:
first, second = sorted([s1, s2])
elif s1_len < s2_len:
first = s1
second = s2
maxlen = s2_len
else:
first = s2
second = s1
ft = set() # all related words with first sentence
for x in first:
ft.add(x)
n, _ = nearby(x)
for o in n[:10]:
ft.add(o)
scores = []
for x in second:
choices = [_levenshtein_distance(x, y) for y in ft]
if len(choices) > 0: scores.append(max(choices))
s = np.sum(scores) / maxlen if len(scores) > 0 else 0
return s
def _similarity_distance(s1, s2, ignore):
'''
compute similarity with distance measurement
'''
g = 0.0
try:
g_ = cosine(_flat_sum_array(_get_wv(s1, ignore)), _flat_sum_array(_get_wv(s2, ignore)))
if is_digit(g_): g = g_
except: pass
u = _nearby_levenshtein_distance(s1, s2)
logging.debug("g: %s, u: %s" % (g, u))
if u >= 0.99:
r = 1.0
elif u > 0.9:
r = _similarity_smooth(g, 0.05, u, 0.05)
elif u > 0.8:
r = _similarity_smooth(g, 0.1, u, 0.2)
elif u > 0.4:
r = _similarity_smooth(g, 0.2, u, 0.15)
elif u > 0.2:
r = _similarity_smooth(g, 0.3, u, 0.1)
else:
r = _similarity_smooth(g, 0.4, u, 0)
if r < 0: r = abs(r)
r = min(r, 1.0)
return float("%.3f" % r)
'''
Public Methods
'''
seg = _segment_words # word segmenter
def nearby(word):
'''
Nearby word
'''
w = any2unicode(word)
# read from cache
if w in _cache_nearby: return _cache_nearby[w]
words, scores = [], []
try:
for x in _vectors.neighbours(w):
words.append(x[0])
scores.append(x[1])
except: pass # ignore key error, OOV
# put into cache
_cache_nearby[w] = (words, scores)
return words, scores
def compare(s1, s2, seg=True, ignore=False, stopwords=False):
'''
compare similarity
s1 : sentence1
s2 : sentence2
seg : True : The original sentences need jieba.cut
Flase : The original sentences have been cut.
ignore: True: ignore OOV words
False: get vector randomly for OOV words
'''
if s1 == s2: return 1.0
s1_words = []
s2_words = []
if seg:
s1 = [x for x in jieba.cut(s1, cut_all=False, HMM=False)]
s2 = [x for x in jieba.cut(s2, cut_all=False, HMM=False)]
else:
s1 = s1.split()
s2 = s2.split()
# check stopwords
if not stopwords:
global _stopwords
for x in s1:
if not x in _stopwords:
s1_words.append(x)
for x in s2:
if not x in _stopwords:
s2_words.append(x)
else:
s1_words = s1
s2_words = s2
assert len(s1) > 0 and len(s2) > 0, "The length of s1 and s2 should > 0."
return _similarity_distance(s1_words, s2_words, ignore)
def display(word):
print("'%s'近义词:" % word)
o = nearby(word)
assert len(o) == 2, "should contain 2 list"
if len(o[0]) == 0:
print(" out of vocabulary")
for k, v in enumerate(o[0]):
print(" %d. %s:%s" % (k + 1, v, o[1][k]))
def main():
display("人脸")
display("NOT_EXIST")
if __name__ == '__main__':
main()
|
huyingxi/Synonyms | synonyms/synonyms.py | _get_wv | python | def _get_wv(sentence, ignore=False):
'''
get word2vec data by sentence
sentence is segmented string.
'''
global _vectors
vectors = []
for y in sentence:
y_ = any2unicode(y).strip()
if y_ not in _stopwords:
syns = nearby(y_)[0]
# print("sentence %s word: %s" %(sentence, y_))
# print("sentence %s word nearby: %s" %(sentence, " ".join(syns)))
c = []
try:
c.append(_vectors.word_vec(y_))
except KeyError as error:
if ignore:
continue
else:
logging.warning("not exist in w2v model: %s" % y_)
# c.append(np.zeros((100,), dtype=float))
random_state = np.random.RandomState(seed=(hash(y_) % (2**32 - 1)))
c.append(random_state.uniform(low=-10.0, high=10.0, size=(100,)))
for n in syns:
if n is None: continue
try:
v = _vectors.word_vec(any2unicode(n))
except KeyError as error:
# v = np.zeros((100,), dtype=float)
random_state = np.random.RandomState(seed=(hash(n) % (2 ** 32 - 1)))
v = random_state.uniform(low=10.0, high=10.0, size=(100,))
c.append(v)
r = np.average(c, axis=0)
vectors.append(r)
return vectors | get word2vec data by sentence
sentence is segmented string. | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/synonyms.py#L137-L172 | [
"def nearby(word):\n '''\n Nearby word\n '''\n w = any2unicode(word)\n # read from cache\n if w in _cache_nearby: return _cache_nearby[w]\n\n words, scores = [], []\n try:\n for x in _vectors.neighbours(w):\n words.append(x[0])\n scores.append(x[1])\n except: ... | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#=========================================================================
#
# Copyright (c) 2017 <> All Rights Reserved
#
#
# File: /Users/hain/ai/Synonyms/synonyms/__init__.py
# Author: Hai Liang Wang
# Date: 2017-09-27
#
#=========================================================================
"""
Chinese Synonyms for Natural Language Processing and Understanding.
"""
from __future__ import print_function
from __future__ import division
__copyright__ = "Copyright (c) 2017 . All Rights Reserved"
__author__ = "Hu Ying Xi<>, Hai Liang Wang<hailiang.hl.wang@gmail.com>"
__date__ = "2017-09-27"
__version__ = "3.3.10"
import os
import sys
import numpy as np
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
PLT = 2
if sys.version_info[0] < 3:
default_stdout = sys.stdout
default_stderr = sys.stderr
reload(sys)
sys.stdout = default_stdout
sys.stderr = default_stderr
sys.setdefaultencoding("utf-8")
# raise "Must be using Python 3"
else:
PLT = 3
# Get Environment variables
ENVIRON = os.environ.copy()
import json
import gzip
import shutil
from absl import logging
from .word2vec import KeyedVectors
from .utils import any2utf8
from .utils import any2unicode
from .utils import sigmoid
from .utils import cosine
from .utils import is_digit
import jieba
from .jieba import posseg as _tokenizer
'''
globals
'''
_vocab = dict()
_size = 0
_vectors = None
_stopwords = set()
_cache_nearby = dict()
'''
lambda fns
'''
# combine similarity scores
_similarity_smooth = lambda x, y, z, u: (x * y) + z - u
_flat_sum_array = lambda x: np.sum(x, axis=0) # 分子
'''
tokenizer settings
'''
tokenizer_dict = os.path.join(curdir, 'data', 'vocab.txt')
if "SYNONYMS_WORDSEG_DICT" in ENVIRON:
if os.path.exists(ENVIRON["SYNONYMS_WORDSEG_DICT"]):
print("info: set wordseg dict with %s" % tokenizer_dict)
tokenizer_dict = ENVIRON["SYNONYMS_WORDSEG_DICT"]
else: print("warning: can not find dict at [%s]" % tokenizer_dict)
print(">> Synonyms load wordseg dict [%s] ... " % tokenizer_dict)
_tokenizer.initialize(tokenizer_dict)
# stopwords
_fin_stopwords_path = os.path.join(curdir, 'data', 'stopwords.txt')
def _load_stopwords(file_path):
'''
load stop words
'''
global _stopwords
if sys.version_info[0] < 3:
words = open(file_path, 'r')
else:
words = open(file_path, 'r', encoding='utf-8')
stopwords = words.readlines()
for w in stopwords:
_stopwords.add(any2unicode(w).strip())
print(">> Synonyms on loading stopwords [%s] ..." % _fin_stopwords_path)
_load_stopwords(_fin_stopwords_path)
def _segment_words(sen):
'''
segment words with jieba
'''
words, tags = [], []
m = _tokenizer.cut(sen, HMM=True) # HMM更好的识别新词
for x in m:
words.append(x.word)
tags.append(x.flag)
return words, tags
'''
word embedding
'''
# vectors
_f_model = os.path.join(curdir, 'data', 'words.vector')
if "SYNONYMS_WORD2VEC_BIN_MODEL_ZH_CN" in ENVIRON:
_f_model = ENVIRON["SYNONYMS_WORD2VEC_BIN_MODEL_ZH_CN"]
def _load_w2v(model_file=_f_model, binary=True):
'''
load word2vec model
'''
if not os.path.exists(model_file):
print("os.path : ", os.path)
raise Exception("Model file [%s] does not exist." % model_file)
return KeyedVectors.load_word2vec_format(
model_file, binary=binary, unicode_errors='ignore')
print(">> Synonyms on loading vectors [%s] ..." % _f_model)
_vectors = _load_w2v(model_file=_f_model)
'''
Distance
'''
# Levenshtein Distance
def _levenshtein_distance(sentence1, sentence2):
'''
Return the Levenshtein distance between two strings.
Based on:
http://rosettacode.org/wiki/Levenshtein_distance#Python
'''
first = any2utf8(sentence1).decode('utf-8', 'ignore')
second = any2utf8(sentence2).decode('utf-8', 'ignore')
sentence1_len, sentence2_len = len(first), len(second)
maxlen = max(sentence1_len, sentence2_len)
if sentence1_len > sentence2_len:
first, second = second, first
distances = range(len(first) + 1)
for index2, char2 in enumerate(second):
new_distances = [index2 + 1]
for index1, char1 in enumerate(first):
if char1 == char2:
new_distances.append(distances[index1])
else:
new_distances.append(1 + min((distances[index1],
distances[index1 + 1],
new_distances[-1])))
distances = new_distances
levenshtein = distances[-1]
d = float((maxlen - levenshtein)/maxlen)
# smoothing
s = (sigmoid(d * 6) - 0.5) * 2
# print("smoothing[%s| %s]: %s -> %s" % (sentence1, sentence2, d, s))
return s
def sv(sentence, ignore=False):
'''
获得一个分词后句子的向量,向量以BoW方式组成
sentence: 句子是分词后通过空格联合起来
ignore: 是否忽略OOV,False时,随机生成一个向量
'''
return _get_wv(sentence, ignore = ignore)
def v(word):
'''
获得一个词语的向量,OOV时抛出 KeyError 异常
'''
y_ = any2unicode(word).strip()
return _vectors.word_vec(y_)
def _nearby_levenshtein_distance(s1, s2):
'''
使用空间距离近的词汇优化编辑距离计算
'''
s1_len, s2_len = len(s1), len(s2)
maxlen = s1_len
if s1_len == s2_len:
first, second = sorted([s1, s2])
elif s1_len < s2_len:
first = s1
second = s2
maxlen = s2_len
else:
first = s2
second = s1
ft = set() # all related words with first sentence
for x in first:
ft.add(x)
n, _ = nearby(x)
for o in n[:10]:
ft.add(o)
scores = []
for x in second:
choices = [_levenshtein_distance(x, y) for y in ft]
if len(choices) > 0: scores.append(max(choices))
s = np.sum(scores) / maxlen if len(scores) > 0 else 0
return s
def _similarity_distance(s1, s2, ignore):
'''
compute similarity with distance measurement
'''
g = 0.0
try:
g_ = cosine(_flat_sum_array(_get_wv(s1, ignore)), _flat_sum_array(_get_wv(s2, ignore)))
if is_digit(g_): g = g_
except: pass
u = _nearby_levenshtein_distance(s1, s2)
logging.debug("g: %s, u: %s" % (g, u))
if u >= 0.99:
r = 1.0
elif u > 0.9:
r = _similarity_smooth(g, 0.05, u, 0.05)
elif u > 0.8:
r = _similarity_smooth(g, 0.1, u, 0.2)
elif u > 0.4:
r = _similarity_smooth(g, 0.2, u, 0.15)
elif u > 0.2:
r = _similarity_smooth(g, 0.3, u, 0.1)
else:
r = _similarity_smooth(g, 0.4, u, 0)
if r < 0: r = abs(r)
r = min(r, 1.0)
return float("%.3f" % r)
'''
Public Methods
'''
seg = _segment_words # word segmenter
def nearby(word):
'''
Nearby word
'''
w = any2unicode(word)
# read from cache
if w in _cache_nearby: return _cache_nearby[w]
words, scores = [], []
try:
for x in _vectors.neighbours(w):
words.append(x[0])
scores.append(x[1])
except: pass # ignore key error, OOV
# put into cache
_cache_nearby[w] = (words, scores)
return words, scores
def compare(s1, s2, seg=True, ignore=False, stopwords=False):
'''
compare similarity
s1 : sentence1
s2 : sentence2
seg : True : The original sentences need jieba.cut
Flase : The original sentences have been cut.
ignore: True: ignore OOV words
False: get vector randomly for OOV words
'''
if s1 == s2: return 1.0
s1_words = []
s2_words = []
if seg:
s1 = [x for x in jieba.cut(s1, cut_all=False, HMM=False)]
s2 = [x for x in jieba.cut(s2, cut_all=False, HMM=False)]
else:
s1 = s1.split()
s2 = s2.split()
# check stopwords
if not stopwords:
global _stopwords
for x in s1:
if not x in _stopwords:
s1_words.append(x)
for x in s2:
if not x in _stopwords:
s2_words.append(x)
else:
s1_words = s1
s2_words = s2
assert len(s1) > 0 and len(s2) > 0, "The length of s1 and s2 should > 0."
return _similarity_distance(s1_words, s2_words, ignore)
def display(word):
print("'%s'近义词:" % word)
o = nearby(word)
assert len(o) == 2, "should contain 2 list"
if len(o[0]) == 0:
print(" out of vocabulary")
for k, v in enumerate(o[0]):
print(" %d. %s:%s" % (k + 1, v, o[1][k]))
def main():
display("人脸")
display("NOT_EXIST")
if __name__ == '__main__':
main()
|
huyingxi/Synonyms | synonyms/synonyms.py | _levenshtein_distance | python | def _levenshtein_distance(sentence1, sentence2):
'''
Return the Levenshtein distance between two strings.
Based on:
http://rosettacode.org/wiki/Levenshtein_distance#Python
'''
first = any2utf8(sentence1).decode('utf-8', 'ignore')
second = any2utf8(sentence2).decode('utf-8', 'ignore')
sentence1_len, sentence2_len = len(first), len(second)
maxlen = max(sentence1_len, sentence2_len)
if sentence1_len > sentence2_len:
first, second = second, first
distances = range(len(first) + 1)
for index2, char2 in enumerate(second):
new_distances = [index2 + 1]
for index1, char1 in enumerate(first):
if char1 == char2:
new_distances.append(distances[index1])
else:
new_distances.append(1 + min((distances[index1],
distances[index1 + 1],
new_distances[-1])))
distances = new_distances
levenshtein = distances[-1]
d = float((maxlen - levenshtein)/maxlen)
# smoothing
s = (sigmoid(d * 6) - 0.5) * 2
# print("smoothing[%s| %s]: %s -> %s" % (sentence1, sentence2, d, s))
return s | Return the Levenshtein distance between two strings.
Based on:
http://rosettacode.org/wiki/Levenshtein_distance#Python | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/synonyms.py#L178-L207 | [
"def any2utf8(text, errors='strict', encoding='utf8'):\n \"\"\"Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8.\"\"\"\n if isinstance(text, unicode):\n return text.encode('utf8')\n # do bytestring -> unicode -> utf8 full circle, to ensure valid utf8\n return unicode(... | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#=========================================================================
#
# Copyright (c) 2017 <> All Rights Reserved
#
#
# File: /Users/hain/ai/Synonyms/synonyms/__init__.py
# Author: Hai Liang Wang
# Date: 2017-09-27
#
#=========================================================================
"""
Chinese Synonyms for Natural Language Processing and Understanding.
"""
from __future__ import print_function
from __future__ import division
__copyright__ = "Copyright (c) 2017 . All Rights Reserved"
__author__ = "Hu Ying Xi<>, Hai Liang Wang<hailiang.hl.wang@gmail.com>"
__date__ = "2017-09-27"
__version__ = "3.3.10"
import os
import sys
import numpy as np
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
PLT = 2
if sys.version_info[0] < 3:
default_stdout = sys.stdout
default_stderr = sys.stderr
reload(sys)
sys.stdout = default_stdout
sys.stderr = default_stderr
sys.setdefaultencoding("utf-8")
# raise "Must be using Python 3"
else:
PLT = 3
# Get Environment variables
ENVIRON = os.environ.copy()
import json
import gzip
import shutil
from absl import logging
from .word2vec import KeyedVectors
from .utils import any2utf8
from .utils import any2unicode
from .utils import sigmoid
from .utils import cosine
from .utils import is_digit
import jieba
from .jieba import posseg as _tokenizer
'''
globals
'''
_vocab = dict()
_size = 0
_vectors = None
_stopwords = set()
_cache_nearby = dict()
'''
lambda fns
'''
# combine similarity scores
_similarity_smooth = lambda x, y, z, u: (x * y) + z - u
_flat_sum_array = lambda x: np.sum(x, axis=0) # 分子
'''
tokenizer settings
'''
tokenizer_dict = os.path.join(curdir, 'data', 'vocab.txt')
if "SYNONYMS_WORDSEG_DICT" in ENVIRON:
if os.path.exists(ENVIRON["SYNONYMS_WORDSEG_DICT"]):
print("info: set wordseg dict with %s" % tokenizer_dict)
tokenizer_dict = ENVIRON["SYNONYMS_WORDSEG_DICT"]
else: print("warning: can not find dict at [%s]" % tokenizer_dict)
print(">> Synonyms load wordseg dict [%s] ... " % tokenizer_dict)
_tokenizer.initialize(tokenizer_dict)
# stopwords
_fin_stopwords_path = os.path.join(curdir, 'data', 'stopwords.txt')
def _load_stopwords(file_path):
'''
load stop words
'''
global _stopwords
if sys.version_info[0] < 3:
words = open(file_path, 'r')
else:
words = open(file_path, 'r', encoding='utf-8')
stopwords = words.readlines()
for w in stopwords:
_stopwords.add(any2unicode(w).strip())
print(">> Synonyms on loading stopwords [%s] ..." % _fin_stopwords_path)
_load_stopwords(_fin_stopwords_path)
def _segment_words(sen):
'''
segment words with jieba
'''
words, tags = [], []
m = _tokenizer.cut(sen, HMM=True) # HMM更好的识别新词
for x in m:
words.append(x.word)
tags.append(x.flag)
return words, tags
'''
word embedding
'''
# vectors
_f_model = os.path.join(curdir, 'data', 'words.vector')
if "SYNONYMS_WORD2VEC_BIN_MODEL_ZH_CN" in ENVIRON:
_f_model = ENVIRON["SYNONYMS_WORD2VEC_BIN_MODEL_ZH_CN"]
def _load_w2v(model_file=_f_model, binary=True):
'''
load word2vec model
'''
if not os.path.exists(model_file):
print("os.path : ", os.path)
raise Exception("Model file [%s] does not exist." % model_file)
return KeyedVectors.load_word2vec_format(
model_file, binary=binary, unicode_errors='ignore')
print(">> Synonyms on loading vectors [%s] ..." % _f_model)
_vectors = _load_w2v(model_file=_f_model)
def _get_wv(sentence, ignore=False):
'''
get word2vec data by sentence
sentence is segmented string.
'''
global _vectors
vectors = []
for y in sentence:
y_ = any2unicode(y).strip()
if y_ not in _stopwords:
syns = nearby(y_)[0]
# print("sentence %s word: %s" %(sentence, y_))
# print("sentence %s word nearby: %s" %(sentence, " ".join(syns)))
c = []
try:
c.append(_vectors.word_vec(y_))
except KeyError as error:
if ignore:
continue
else:
logging.warning("not exist in w2v model: %s" % y_)
# c.append(np.zeros((100,), dtype=float))
random_state = np.random.RandomState(seed=(hash(y_) % (2**32 - 1)))
c.append(random_state.uniform(low=-10.0, high=10.0, size=(100,)))
for n in syns:
if n is None: continue
try:
v = _vectors.word_vec(any2unicode(n))
except KeyError as error:
# v = np.zeros((100,), dtype=float)
random_state = np.random.RandomState(seed=(hash(n) % (2 ** 32 - 1)))
v = random_state.uniform(low=10.0, high=10.0, size=(100,))
c.append(v)
r = np.average(c, axis=0)
vectors.append(r)
return vectors
'''
Distance
'''
# Levenshtein Distance
def sv(sentence, ignore=False):
'''
获得一个分词后句子的向量,向量以BoW方式组成
sentence: 句子是分词后通过空格联合起来
ignore: 是否忽略OOV,False时,随机生成一个向量
'''
return _get_wv(sentence, ignore = ignore)
def v(word):
'''
获得一个词语的向量,OOV时抛出 KeyError 异常
'''
y_ = any2unicode(word).strip()
return _vectors.word_vec(y_)
def _nearby_levenshtein_distance(s1, s2):
'''
使用空间距离近的词汇优化编辑距离计算
'''
s1_len, s2_len = len(s1), len(s2)
maxlen = s1_len
if s1_len == s2_len:
first, second = sorted([s1, s2])
elif s1_len < s2_len:
first = s1
second = s2
maxlen = s2_len
else:
first = s2
second = s1
ft = set() # all related words with first sentence
for x in first:
ft.add(x)
n, _ = nearby(x)
for o in n[:10]:
ft.add(o)
scores = []
for x in second:
choices = [_levenshtein_distance(x, y) for y in ft]
if len(choices) > 0: scores.append(max(choices))
s = np.sum(scores) / maxlen if len(scores) > 0 else 0
return s
def _similarity_distance(s1, s2, ignore):
'''
compute similarity with distance measurement
'''
g = 0.0
try:
g_ = cosine(_flat_sum_array(_get_wv(s1, ignore)), _flat_sum_array(_get_wv(s2, ignore)))
if is_digit(g_): g = g_
except: pass
u = _nearby_levenshtein_distance(s1, s2)
logging.debug("g: %s, u: %s" % (g, u))
if u >= 0.99:
r = 1.0
elif u > 0.9:
r = _similarity_smooth(g, 0.05, u, 0.05)
elif u > 0.8:
r = _similarity_smooth(g, 0.1, u, 0.2)
elif u > 0.4:
r = _similarity_smooth(g, 0.2, u, 0.15)
elif u > 0.2:
r = _similarity_smooth(g, 0.3, u, 0.1)
else:
r = _similarity_smooth(g, 0.4, u, 0)
if r < 0: r = abs(r)
r = min(r, 1.0)
return float("%.3f" % r)
'''
Public Methods
'''
seg = _segment_words # word segmenter
def nearby(word):
'''
Nearby word
'''
w = any2unicode(word)
# read from cache
if w in _cache_nearby: return _cache_nearby[w]
words, scores = [], []
try:
for x in _vectors.neighbours(w):
words.append(x[0])
scores.append(x[1])
except: pass # ignore key error, OOV
# put into cache
_cache_nearby[w] = (words, scores)
return words, scores
def compare(s1, s2, seg=True, ignore=False, stopwords=False):
'''
compare similarity
s1 : sentence1
s2 : sentence2
seg : True : The original sentences need jieba.cut
Flase : The original sentences have been cut.
ignore: True: ignore OOV words
False: get vector randomly for OOV words
'''
if s1 == s2: return 1.0
s1_words = []
s2_words = []
if seg:
s1 = [x for x in jieba.cut(s1, cut_all=False, HMM=False)]
s2 = [x for x in jieba.cut(s2, cut_all=False, HMM=False)]
else:
s1 = s1.split()
s2 = s2.split()
# check stopwords
if not stopwords:
global _stopwords
for x in s1:
if not x in _stopwords:
s1_words.append(x)
for x in s2:
if not x in _stopwords:
s2_words.append(x)
else:
s1_words = s1
s2_words = s2
assert len(s1) > 0 and len(s2) > 0, "The length of s1 and s2 should > 0."
return _similarity_distance(s1_words, s2_words, ignore)
def display(word):
print("'%s'近义词:" % word)
o = nearby(word)
assert len(o) == 2, "should contain 2 list"
if len(o[0]) == 0:
print(" out of vocabulary")
for k, v in enumerate(o[0]):
print(" %d. %s:%s" % (k + 1, v, o[1][k]))
def main():
display("人脸")
display("NOT_EXIST")
if __name__ == '__main__':
main()
|
huyingxi/Synonyms | synonyms/synonyms.py | _nearby_levenshtein_distance | python | def _nearby_levenshtein_distance(s1, s2):
'''
使用空间距离近的词汇优化编辑距离计算
'''
s1_len, s2_len = len(s1), len(s2)
maxlen = s1_len
if s1_len == s2_len:
first, second = sorted([s1, s2])
elif s1_len < s2_len:
first = s1
second = s2
maxlen = s2_len
else:
first = s2
second = s1
ft = set() # all related words with first sentence
for x in first:
ft.add(x)
n, _ = nearby(x)
for o in n[:10]:
ft.add(o)
scores = []
for x in second:
choices = [_levenshtein_distance(x, y) for y in ft]
if len(choices) > 0: scores.append(max(choices))
s = np.sum(scores) / maxlen if len(scores) > 0 else 0
return s | 使用空间距离近的词汇优化编辑距离计算 | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/synonyms.py#L225-L254 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#=========================================================================
#
# Copyright (c) 2017 <> All Rights Reserved
#
#
# File: /Users/hain/ai/Synonyms/synonyms/__init__.py
# Author: Hai Liang Wang
# Date: 2017-09-27
#
#=========================================================================
"""
Chinese Synonyms for Natural Language Processing and Understanding.
"""
from __future__ import print_function
from __future__ import division
__copyright__ = "Copyright (c) 2017 . All Rights Reserved"
__author__ = "Hu Ying Xi<>, Hai Liang Wang<hailiang.hl.wang@gmail.com>"
__date__ = "2017-09-27"
__version__ = "3.3.10"
import os
import sys
import numpy as np
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
PLT = 2
if sys.version_info[0] < 3:
default_stdout = sys.stdout
default_stderr = sys.stderr
reload(sys)
sys.stdout = default_stdout
sys.stderr = default_stderr
sys.setdefaultencoding("utf-8")
# raise "Must be using Python 3"
else:
PLT = 3
# Get Environment variables
ENVIRON = os.environ.copy()
import json
import gzip
import shutil
from absl import logging
from .word2vec import KeyedVectors
from .utils import any2utf8
from .utils import any2unicode
from .utils import sigmoid
from .utils import cosine
from .utils import is_digit
import jieba
from .jieba import posseg as _tokenizer
'''
globals
'''
_vocab = dict()
_size = 0
_vectors = None
_stopwords = set()
_cache_nearby = dict()
'''
lambda fns
'''
# combine similarity scores
_similarity_smooth = lambda x, y, z, u: (x * y) + z - u
_flat_sum_array = lambda x: np.sum(x, axis=0) # 分子
'''
tokenizer settings
'''
tokenizer_dict = os.path.join(curdir, 'data', 'vocab.txt')
if "SYNONYMS_WORDSEG_DICT" in ENVIRON:
if os.path.exists(ENVIRON["SYNONYMS_WORDSEG_DICT"]):
print("info: set wordseg dict with %s" % tokenizer_dict)
tokenizer_dict = ENVIRON["SYNONYMS_WORDSEG_DICT"]
else: print("warning: can not find dict at [%s]" % tokenizer_dict)
print(">> Synonyms load wordseg dict [%s] ... " % tokenizer_dict)
_tokenizer.initialize(tokenizer_dict)
# stopwords
_fin_stopwords_path = os.path.join(curdir, 'data', 'stopwords.txt')
def _load_stopwords(file_path):
'''
load stop words
'''
global _stopwords
if sys.version_info[0] < 3:
words = open(file_path, 'r')
else:
words = open(file_path, 'r', encoding='utf-8')
stopwords = words.readlines()
for w in stopwords:
_stopwords.add(any2unicode(w).strip())
print(">> Synonyms on loading stopwords [%s] ..." % _fin_stopwords_path)
_load_stopwords(_fin_stopwords_path)
def _segment_words(sen):
'''
segment words with jieba
'''
words, tags = [], []
m = _tokenizer.cut(sen, HMM=True) # HMM更好的识别新词
for x in m:
words.append(x.word)
tags.append(x.flag)
return words, tags
'''
word embedding
'''
# vectors
_f_model = os.path.join(curdir, 'data', 'words.vector')
if "SYNONYMS_WORD2VEC_BIN_MODEL_ZH_CN" in ENVIRON:
_f_model = ENVIRON["SYNONYMS_WORD2VEC_BIN_MODEL_ZH_CN"]
def _load_w2v(model_file=_f_model, binary=True):
'''
load word2vec model
'''
if not os.path.exists(model_file):
print("os.path : ", os.path)
raise Exception("Model file [%s] does not exist." % model_file)
return KeyedVectors.load_word2vec_format(
model_file, binary=binary, unicode_errors='ignore')
print(">> Synonyms on loading vectors [%s] ..." % _f_model)
_vectors = _load_w2v(model_file=_f_model)
def _get_wv(sentence, ignore=False):
'''
get word2vec data by sentence
sentence is segmented string.
'''
global _vectors
vectors = []
for y in sentence:
y_ = any2unicode(y).strip()
if y_ not in _stopwords:
syns = nearby(y_)[0]
# print("sentence %s word: %s" %(sentence, y_))
# print("sentence %s word nearby: %s" %(sentence, " ".join(syns)))
c = []
try:
c.append(_vectors.word_vec(y_))
except KeyError as error:
if ignore:
continue
else:
logging.warning("not exist in w2v model: %s" % y_)
# c.append(np.zeros((100,), dtype=float))
random_state = np.random.RandomState(seed=(hash(y_) % (2**32 - 1)))
c.append(random_state.uniform(low=-10.0, high=10.0, size=(100,)))
for n in syns:
if n is None: continue
try:
v = _vectors.word_vec(any2unicode(n))
except KeyError as error:
# v = np.zeros((100,), dtype=float)
random_state = np.random.RandomState(seed=(hash(n) % (2 ** 32 - 1)))
v = random_state.uniform(low=10.0, high=10.0, size=(100,))
c.append(v)
r = np.average(c, axis=0)
vectors.append(r)
return vectors
'''
Distance
'''
# Levenshtein Distance
def _levenshtein_distance(sentence1, sentence2):
'''
Return the Levenshtein distance between two strings.
Based on:
http://rosettacode.org/wiki/Levenshtein_distance#Python
'''
first = any2utf8(sentence1).decode('utf-8', 'ignore')
second = any2utf8(sentence2).decode('utf-8', 'ignore')
sentence1_len, sentence2_len = len(first), len(second)
maxlen = max(sentence1_len, sentence2_len)
if sentence1_len > sentence2_len:
first, second = second, first
distances = range(len(first) + 1)
for index2, char2 in enumerate(second):
new_distances = [index2 + 1]
for index1, char1 in enumerate(first):
if char1 == char2:
new_distances.append(distances[index1])
else:
new_distances.append(1 + min((distances[index1],
distances[index1 + 1],
new_distances[-1])))
distances = new_distances
levenshtein = distances[-1]
d = float((maxlen - levenshtein)/maxlen)
# smoothing
s = (sigmoid(d * 6) - 0.5) * 2
# print("smoothing[%s| %s]: %s -> %s" % (sentence1, sentence2, d, s))
return s
def sv(sentence, ignore=False):
'''
获得一个分词后句子的向量,向量以BoW方式组成
sentence: 句子是分词后通过空格联合起来
ignore: 是否忽略OOV,False时,随机生成一个向量
'''
return _get_wv(sentence, ignore = ignore)
def v(word):
'''
获得一个词语的向量,OOV时抛出 KeyError 异常
'''
y_ = any2unicode(word).strip()
return _vectors.word_vec(y_)
def _nearby_levenshtein_distance(s1, s2):
'''
使用空间距离近的词汇优化编辑距离计算
'''
s1_len, s2_len = len(s1), len(s2)
maxlen = s1_len
if s1_len == s2_len:
first, second = sorted([s1, s2])
elif s1_len < s2_len:
first = s1
second = s2
maxlen = s2_len
else:
first = s2
second = s1
ft = set() # all related words with first sentence
for x in first:
ft.add(x)
n, _ = nearby(x)
for o in n[:10]:
ft.add(o)
scores = []
for x in second:
choices = [_levenshtein_distance(x, y) for y in ft]
if len(choices) > 0: scores.append(max(choices))
s = np.sum(scores) / maxlen if len(scores) > 0 else 0
return s
def _similarity_distance(s1, s2, ignore):
'''
compute similarity with distance measurement
'''
g = 0.0
try:
g_ = cosine(_flat_sum_array(_get_wv(s1, ignore)), _flat_sum_array(_get_wv(s2, ignore)))
if is_digit(g_): g = g_
except: pass
u = _nearby_levenshtein_distance(s1, s2)
logging.debug("g: %s, u: %s" % (g, u))
if u >= 0.99:
r = 1.0
elif u > 0.9:
r = _similarity_smooth(g, 0.05, u, 0.05)
elif u > 0.8:
r = _similarity_smooth(g, 0.1, u, 0.2)
elif u > 0.4:
r = _similarity_smooth(g, 0.2, u, 0.15)
elif u > 0.2:
r = _similarity_smooth(g, 0.3, u, 0.1)
else:
r = _similarity_smooth(g, 0.4, u, 0)
if r < 0: r = abs(r)
r = min(r, 1.0)
return float("%.3f" % r)
'''
Public Methods
'''
seg = _segment_words # word segmenter
def nearby(word):
'''
Nearby word
'''
w = any2unicode(word)
# read from cache
if w in _cache_nearby: return _cache_nearby[w]
words, scores = [], []
try:
for x in _vectors.neighbours(w):
words.append(x[0])
scores.append(x[1])
except: pass # ignore key error, OOV
# put into cache
_cache_nearby[w] = (words, scores)
return words, scores
def compare(s1, s2, seg=True, ignore=False, stopwords=False):
'''
compare similarity
s1 : sentence1
s2 : sentence2
seg : True : The original sentences need jieba.cut
Flase : The original sentences have been cut.
ignore: True: ignore OOV words
False: get vector randomly for OOV words
'''
if s1 == s2: return 1.0
s1_words = []
s2_words = []
if seg:
s1 = [x for x in jieba.cut(s1, cut_all=False, HMM=False)]
s2 = [x for x in jieba.cut(s2, cut_all=False, HMM=False)]
else:
s1 = s1.split()
s2 = s2.split()
# check stopwords
if not stopwords:
global _stopwords
for x in s1:
if not x in _stopwords:
s1_words.append(x)
for x in s2:
if not x in _stopwords:
s2_words.append(x)
else:
s1_words = s1
s2_words = s2
assert len(s1) > 0 and len(s2) > 0, "The length of s1 and s2 should > 0."
return _similarity_distance(s1_words, s2_words, ignore)
def display(word):
print("'%s'近义词:" % word)
o = nearby(word)
assert len(o) == 2, "should contain 2 list"
if len(o[0]) == 0:
print(" out of vocabulary")
for k, v in enumerate(o[0]):
print(" %d. %s:%s" % (k + 1, v, o[1][k]))
def main():
display("人脸")
display("NOT_EXIST")
if __name__ == '__main__':
main()
|
huyingxi/Synonyms | synonyms/synonyms.py | _similarity_distance | python | def _similarity_distance(s1, s2, ignore):
'''
compute similarity with distance measurement
'''
g = 0.0
try:
g_ = cosine(_flat_sum_array(_get_wv(s1, ignore)), _flat_sum_array(_get_wv(s2, ignore)))
if is_digit(g_): g = g_
except: pass
u = _nearby_levenshtein_distance(s1, s2)
logging.debug("g: %s, u: %s" % (g, u))
if u >= 0.99:
r = 1.0
elif u > 0.9:
r = _similarity_smooth(g, 0.05, u, 0.05)
elif u > 0.8:
r = _similarity_smooth(g, 0.1, u, 0.2)
elif u > 0.4:
r = _similarity_smooth(g, 0.2, u, 0.15)
elif u > 0.2:
r = _similarity_smooth(g, 0.3, u, 0.1)
else:
r = _similarity_smooth(g, 0.4, u, 0)
if r < 0: r = abs(r)
r = min(r, 1.0)
return float("%.3f" % r) | compute similarity with distance measurement | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/synonyms.py#L256-L283 | [
"cosine = lambda a, b: dot(a, b)/(norm(a)*norm(b))\n",
"_similarity_smooth = lambda x, y, z, u: (x * y) + z - u\n",
"_flat_sum_array = lambda x: np.sum(x, axis=0) # 分子\n",
"def is_digit(obj):\n '''\n Check if an object is Number\n '''\n return isinstance(obj, (numbers.Integral, numbers.Complex, n... | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#=========================================================================
#
# Copyright (c) 2017 <> All Rights Reserved
#
#
# File: /Users/hain/ai/Synonyms/synonyms/__init__.py
# Author: Hai Liang Wang
# Date: 2017-09-27
#
#=========================================================================
"""
Chinese Synonyms for Natural Language Processing and Understanding.
"""
from __future__ import print_function
from __future__ import division
__copyright__ = "Copyright (c) 2017 . All Rights Reserved"
__author__ = "Hu Ying Xi<>, Hai Liang Wang<hailiang.hl.wang@gmail.com>"
__date__ = "2017-09-27"
__version__ = "3.3.10"
import os
import sys
import numpy as np
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
PLT = 2
if sys.version_info[0] < 3:
default_stdout = sys.stdout
default_stderr = sys.stderr
reload(sys)
sys.stdout = default_stdout
sys.stderr = default_stderr
sys.setdefaultencoding("utf-8")
# raise "Must be using Python 3"
else:
PLT = 3
# Get Environment variables
ENVIRON = os.environ.copy()
import json
import gzip
import shutil
from absl import logging
from .word2vec import KeyedVectors
from .utils import any2utf8
from .utils import any2unicode
from .utils import sigmoid
from .utils import cosine
from .utils import is_digit
import jieba
from .jieba import posseg as _tokenizer
'''
globals
'''
_vocab = dict()
_size = 0
_vectors = None
_stopwords = set()
_cache_nearby = dict()
'''
lambda fns
'''
# combine similarity scores
_similarity_smooth = lambda x, y, z, u: (x * y) + z - u
_flat_sum_array = lambda x: np.sum(x, axis=0) # 分子
'''
tokenizer settings
'''
tokenizer_dict = os.path.join(curdir, 'data', 'vocab.txt')
if "SYNONYMS_WORDSEG_DICT" in ENVIRON:
if os.path.exists(ENVIRON["SYNONYMS_WORDSEG_DICT"]):
print("info: set wordseg dict with %s" % tokenizer_dict)
tokenizer_dict = ENVIRON["SYNONYMS_WORDSEG_DICT"]
else: print("warning: can not find dict at [%s]" % tokenizer_dict)
print(">> Synonyms load wordseg dict [%s] ... " % tokenizer_dict)
_tokenizer.initialize(tokenizer_dict)
# stopwords
_fin_stopwords_path = os.path.join(curdir, 'data', 'stopwords.txt')
def _load_stopwords(file_path):
'''
load stop words
'''
global _stopwords
if sys.version_info[0] < 3:
words = open(file_path, 'r')
else:
words = open(file_path, 'r', encoding='utf-8')
stopwords = words.readlines()
for w in stopwords:
_stopwords.add(any2unicode(w).strip())
print(">> Synonyms on loading stopwords [%s] ..." % _fin_stopwords_path)
_load_stopwords(_fin_stopwords_path)
def _segment_words(sen):
'''
segment words with jieba
'''
words, tags = [], []
m = _tokenizer.cut(sen, HMM=True) # HMM更好的识别新词
for x in m:
words.append(x.word)
tags.append(x.flag)
return words, tags
'''
word embedding
'''
# vectors
_f_model = os.path.join(curdir, 'data', 'words.vector')
if "SYNONYMS_WORD2VEC_BIN_MODEL_ZH_CN" in ENVIRON:
_f_model = ENVIRON["SYNONYMS_WORD2VEC_BIN_MODEL_ZH_CN"]
def _load_w2v(model_file=_f_model, binary=True):
'''
load word2vec model
'''
if not os.path.exists(model_file):
print("os.path : ", os.path)
raise Exception("Model file [%s] does not exist." % model_file)
return KeyedVectors.load_word2vec_format(
model_file, binary=binary, unicode_errors='ignore')
print(">> Synonyms on loading vectors [%s] ..." % _f_model)
_vectors = _load_w2v(model_file=_f_model)
def _get_wv(sentence, ignore=False):
'''
get word2vec data by sentence
sentence is segmented string.
'''
global _vectors
vectors = []
for y in sentence:
y_ = any2unicode(y).strip()
if y_ not in _stopwords:
syns = nearby(y_)[0]
# print("sentence %s word: %s" %(sentence, y_))
# print("sentence %s word nearby: %s" %(sentence, " ".join(syns)))
c = []
try:
c.append(_vectors.word_vec(y_))
except KeyError as error:
if ignore:
continue
else:
logging.warning("not exist in w2v model: %s" % y_)
# c.append(np.zeros((100,), dtype=float))
random_state = np.random.RandomState(seed=(hash(y_) % (2**32 - 1)))
c.append(random_state.uniform(low=-10.0, high=10.0, size=(100,)))
for n in syns:
if n is None: continue
try:
v = _vectors.word_vec(any2unicode(n))
except KeyError as error:
# v = np.zeros((100,), dtype=float)
random_state = np.random.RandomState(seed=(hash(n) % (2 ** 32 - 1)))
v = random_state.uniform(low=10.0, high=10.0, size=(100,))
c.append(v)
r = np.average(c, axis=0)
vectors.append(r)
return vectors
'''
Distance
'''
# Levenshtein Distance
def _levenshtein_distance(sentence1, sentence2):
'''
Return the Levenshtein distance between two strings.
Based on:
http://rosettacode.org/wiki/Levenshtein_distance#Python
'''
first = any2utf8(sentence1).decode('utf-8', 'ignore')
second = any2utf8(sentence2).decode('utf-8', 'ignore')
sentence1_len, sentence2_len = len(first), len(second)
maxlen = max(sentence1_len, sentence2_len)
if sentence1_len > sentence2_len:
first, second = second, first
distances = range(len(first) + 1)
for index2, char2 in enumerate(second):
new_distances = [index2 + 1]
for index1, char1 in enumerate(first):
if char1 == char2:
new_distances.append(distances[index1])
else:
new_distances.append(1 + min((distances[index1],
distances[index1 + 1],
new_distances[-1])))
distances = new_distances
levenshtein = distances[-1]
d = float((maxlen - levenshtein)/maxlen)
# smoothing
s = (sigmoid(d * 6) - 0.5) * 2
# print("smoothing[%s| %s]: %s -> %s" % (sentence1, sentence2, d, s))
return s
def sv(sentence, ignore=False):
'''
获得一个分词后句子的向量,向量以BoW方式组成
sentence: 句子是分词后通过空格联合起来
ignore: 是否忽略OOV,False时,随机生成一个向量
'''
return _get_wv(sentence, ignore = ignore)
def v(word):
'''
获得一个词语的向量,OOV时抛出 KeyError 异常
'''
y_ = any2unicode(word).strip()
return _vectors.word_vec(y_)
def _nearby_levenshtein_distance(s1, s2):
'''
使用空间距离近的词汇优化编辑距离计算
'''
s1_len, s2_len = len(s1), len(s2)
maxlen = s1_len
if s1_len == s2_len:
first, second = sorted([s1, s2])
elif s1_len < s2_len:
first = s1
second = s2
maxlen = s2_len
else:
first = s2
second = s1
ft = set() # all related words with first sentence
for x in first:
ft.add(x)
n, _ = nearby(x)
for o in n[:10]:
ft.add(o)
scores = []
for x in second:
choices = [_levenshtein_distance(x, y) for y in ft]
if len(choices) > 0: scores.append(max(choices))
s = np.sum(scores) / maxlen if len(scores) > 0 else 0
return s
'''
Public Methods
'''
seg = _segment_words # word segmenter
def nearby(word):
'''
Nearby word
'''
w = any2unicode(word)
# read from cache
if w in _cache_nearby: return _cache_nearby[w]
words, scores = [], []
try:
for x in _vectors.neighbours(w):
words.append(x[0])
scores.append(x[1])
except: pass # ignore key error, OOV
# put into cache
_cache_nearby[w] = (words, scores)
return words, scores
def compare(s1, s2, seg=True, ignore=False, stopwords=False):
'''
compare similarity
s1 : sentence1
s2 : sentence2
seg : True : The original sentences need jieba.cut
Flase : The original sentences have been cut.
ignore: True: ignore OOV words
False: get vector randomly for OOV words
'''
if s1 == s2: return 1.0
s1_words = []
s2_words = []
if seg:
s1 = [x for x in jieba.cut(s1, cut_all=False, HMM=False)]
s2 = [x for x in jieba.cut(s2, cut_all=False, HMM=False)]
else:
s1 = s1.split()
s2 = s2.split()
# check stopwords
if not stopwords:
global _stopwords
for x in s1:
if not x in _stopwords:
s1_words.append(x)
for x in s2:
if not x in _stopwords:
s2_words.append(x)
else:
s1_words = s1
s2_words = s2
assert len(s1) > 0 and len(s2) > 0, "The length of s1 and s2 should > 0."
return _similarity_distance(s1_words, s2_words, ignore)
def display(word):
print("'%s'近义词:" % word)
o = nearby(word)
assert len(o) == 2, "should contain 2 list"
if len(o[0]) == 0:
print(" out of vocabulary")
for k, v in enumerate(o[0]):
print(" %d. %s:%s" % (k + 1, v, o[1][k]))
def main():
display("人脸")
display("NOT_EXIST")
if __name__ == '__main__':
main()
|
huyingxi/Synonyms | synonyms/synonyms.py | nearby | python | def nearby(word):
'''
Nearby word
'''
w = any2unicode(word)
# read from cache
if w in _cache_nearby: return _cache_nearby[w]
words, scores = [], []
try:
for x in _vectors.neighbours(w):
words.append(x[0])
scores.append(x[1])
except: pass # ignore key error, OOV
# put into cache
_cache_nearby[w] = (words, scores)
return words, scores | Nearby word | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/synonyms.py#L290-L306 | [
"def any2unicode(text, encoding='utf8', errors='strict'):\n \"\"\"Convert a string (bytestring in `encoding` or unicode), to unicode.\"\"\"\n if isinstance(text, unicode):\n return text\n return unicode(text, encoding, errors=errors)\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#=========================================================================
#
# Copyright (c) 2017 <> All Rights Reserved
#
#
# File: /Users/hain/ai/Synonyms/synonyms/__init__.py
# Author: Hai Liang Wang
# Date: 2017-09-27
#
#=========================================================================
"""
Chinese Synonyms for Natural Language Processing and Understanding.
"""
from __future__ import print_function
from __future__ import division
__copyright__ = "Copyright (c) 2017 . All Rights Reserved"
__author__ = "Hu Ying Xi<>, Hai Liang Wang<hailiang.hl.wang@gmail.com>"
__date__ = "2017-09-27"
__version__ = "3.3.10"
import os
import sys
import numpy as np
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
PLT = 2
if sys.version_info[0] < 3:
default_stdout = sys.stdout
default_stderr = sys.stderr
reload(sys)
sys.stdout = default_stdout
sys.stderr = default_stderr
sys.setdefaultencoding("utf-8")
# raise "Must be using Python 3"
else:
PLT = 3
# Get Environment variables
ENVIRON = os.environ.copy()
import json
import gzip
import shutil
from absl import logging
from .word2vec import KeyedVectors
from .utils import any2utf8
from .utils import any2unicode
from .utils import sigmoid
from .utils import cosine
from .utils import is_digit
import jieba
from .jieba import posseg as _tokenizer
'''
globals
'''
_vocab = dict()
_size = 0
_vectors = None
_stopwords = set()
_cache_nearby = dict()
'''
lambda fns
'''
# combine similarity scores
_similarity_smooth = lambda x, y, z, u: (x * y) + z - u
_flat_sum_array = lambda x: np.sum(x, axis=0) # 分子
'''
tokenizer settings
'''
tokenizer_dict = os.path.join(curdir, 'data', 'vocab.txt')
if "SYNONYMS_WORDSEG_DICT" in ENVIRON:
if os.path.exists(ENVIRON["SYNONYMS_WORDSEG_DICT"]):
print("info: set wordseg dict with %s" % tokenizer_dict)
tokenizer_dict = ENVIRON["SYNONYMS_WORDSEG_DICT"]
else: print("warning: can not find dict at [%s]" % tokenizer_dict)
print(">> Synonyms load wordseg dict [%s] ... " % tokenizer_dict)
_tokenizer.initialize(tokenizer_dict)
# stopwords
_fin_stopwords_path = os.path.join(curdir, 'data', 'stopwords.txt')
def _load_stopwords(file_path):
'''
load stop words
'''
global _stopwords
if sys.version_info[0] < 3:
words = open(file_path, 'r')
else:
words = open(file_path, 'r', encoding='utf-8')
stopwords = words.readlines()
for w in stopwords:
_stopwords.add(any2unicode(w).strip())
print(">> Synonyms on loading stopwords [%s] ..." % _fin_stopwords_path)
_load_stopwords(_fin_stopwords_path)
def _segment_words(sen):
'''
segment words with jieba
'''
words, tags = [], []
m = _tokenizer.cut(sen, HMM=True) # HMM更好的识别新词
for x in m:
words.append(x.word)
tags.append(x.flag)
return words, tags
'''
word embedding
'''
# vectors
_f_model = os.path.join(curdir, 'data', 'words.vector')
if "SYNONYMS_WORD2VEC_BIN_MODEL_ZH_CN" in ENVIRON:
_f_model = ENVIRON["SYNONYMS_WORD2VEC_BIN_MODEL_ZH_CN"]
def _load_w2v(model_file=_f_model, binary=True):
'''
load word2vec model
'''
if not os.path.exists(model_file):
print("os.path : ", os.path)
raise Exception("Model file [%s] does not exist." % model_file)
return KeyedVectors.load_word2vec_format(
model_file, binary=binary, unicode_errors='ignore')
print(">> Synonyms on loading vectors [%s] ..." % _f_model)
_vectors = _load_w2v(model_file=_f_model)
def _get_wv(sentence, ignore=False):
'''
get word2vec data by sentence
sentence is segmented string.
'''
global _vectors
vectors = []
for y in sentence:
y_ = any2unicode(y).strip()
if y_ not in _stopwords:
syns = nearby(y_)[0]
# print("sentence %s word: %s" %(sentence, y_))
# print("sentence %s word nearby: %s" %(sentence, " ".join(syns)))
c = []
try:
c.append(_vectors.word_vec(y_))
except KeyError as error:
if ignore:
continue
else:
logging.warning("not exist in w2v model: %s" % y_)
# c.append(np.zeros((100,), dtype=float))
random_state = np.random.RandomState(seed=(hash(y_) % (2**32 - 1)))
c.append(random_state.uniform(low=-10.0, high=10.0, size=(100,)))
for n in syns:
if n is None: continue
try:
v = _vectors.word_vec(any2unicode(n))
except KeyError as error:
# v = np.zeros((100,), dtype=float)
random_state = np.random.RandomState(seed=(hash(n) % (2 ** 32 - 1)))
v = random_state.uniform(low=10.0, high=10.0, size=(100,))
c.append(v)
r = np.average(c, axis=0)
vectors.append(r)
return vectors
'''
Distance
'''
# Levenshtein Distance
def _levenshtein_distance(sentence1, sentence2):
'''
Return the Levenshtein distance between two strings.
Based on:
http://rosettacode.org/wiki/Levenshtein_distance#Python
'''
first = any2utf8(sentence1).decode('utf-8', 'ignore')
second = any2utf8(sentence2).decode('utf-8', 'ignore')
sentence1_len, sentence2_len = len(first), len(second)
maxlen = max(sentence1_len, sentence2_len)
if sentence1_len > sentence2_len:
first, second = second, first
distances = range(len(first) + 1)
for index2, char2 in enumerate(second):
new_distances = [index2 + 1]
for index1, char1 in enumerate(first):
if char1 == char2:
new_distances.append(distances[index1])
else:
new_distances.append(1 + min((distances[index1],
distances[index1 + 1],
new_distances[-1])))
distances = new_distances
levenshtein = distances[-1]
d = float((maxlen - levenshtein)/maxlen)
# smoothing
s = (sigmoid(d * 6) - 0.5) * 2
# print("smoothing[%s| %s]: %s -> %s" % (sentence1, sentence2, d, s))
return s
def sv(sentence, ignore=False):
'''
获得一个分词后句子的向量,向量以BoW方式组成
sentence: 句子是分词后通过空格联合起来
ignore: 是否忽略OOV,False时,随机生成一个向量
'''
return _get_wv(sentence, ignore = ignore)
def v(word):
'''
获得一个词语的向量,OOV时抛出 KeyError 异常
'''
y_ = any2unicode(word).strip()
return _vectors.word_vec(y_)
def _nearby_levenshtein_distance(s1, s2):
'''
使用空间距离近的词汇优化编辑距离计算
'''
s1_len, s2_len = len(s1), len(s2)
maxlen = s1_len
if s1_len == s2_len:
first, second = sorted([s1, s2])
elif s1_len < s2_len:
first = s1
second = s2
maxlen = s2_len
else:
first = s2
second = s1
ft = set() # all related words with first sentence
for x in first:
ft.add(x)
n, _ = nearby(x)
for o in n[:10]:
ft.add(o)
scores = []
for x in second:
choices = [_levenshtein_distance(x, y) for y in ft]
if len(choices) > 0: scores.append(max(choices))
s = np.sum(scores) / maxlen if len(scores) > 0 else 0
return s
def _similarity_distance(s1, s2, ignore):
'''
compute similarity with distance measurement
'''
g = 0.0
try:
g_ = cosine(_flat_sum_array(_get_wv(s1, ignore)), _flat_sum_array(_get_wv(s2, ignore)))
if is_digit(g_): g = g_
except: pass
u = _nearby_levenshtein_distance(s1, s2)
logging.debug("g: %s, u: %s" % (g, u))
if u >= 0.99:
r = 1.0
elif u > 0.9:
r = _similarity_smooth(g, 0.05, u, 0.05)
elif u > 0.8:
r = _similarity_smooth(g, 0.1, u, 0.2)
elif u > 0.4:
r = _similarity_smooth(g, 0.2, u, 0.15)
elif u > 0.2:
r = _similarity_smooth(g, 0.3, u, 0.1)
else:
r = _similarity_smooth(g, 0.4, u, 0)
if r < 0: r = abs(r)
r = min(r, 1.0)
return float("%.3f" % r)
'''
Public Methods
'''
seg = _segment_words # word segmenter
def compare(s1, s2, seg=True, ignore=False, stopwords=False):
'''
compare similarity
s1 : sentence1
s2 : sentence2
seg : True : The original sentences need jieba.cut
Flase : The original sentences have been cut.
ignore: True: ignore OOV words
False: get vector randomly for OOV words
'''
if s1 == s2: return 1.0
s1_words = []
s2_words = []
if seg:
s1 = [x for x in jieba.cut(s1, cut_all=False, HMM=False)]
s2 = [x for x in jieba.cut(s2, cut_all=False, HMM=False)]
else:
s1 = s1.split()
s2 = s2.split()
# check stopwords
if not stopwords:
global _stopwords
for x in s1:
if not x in _stopwords:
s1_words.append(x)
for x in s2:
if not x in _stopwords:
s2_words.append(x)
else:
s1_words = s1
s2_words = s2
assert len(s1) > 0 and len(s2) > 0, "The length of s1 and s2 should > 0."
return _similarity_distance(s1_words, s2_words, ignore)
def display(word):
print("'%s'近义词:" % word)
o = nearby(word)
assert len(o) == 2, "should contain 2 list"
if len(o[0]) == 0:
print(" out of vocabulary")
for k, v in enumerate(o[0]):
print(" %d. %s:%s" % (k + 1, v, o[1][k]))
def main():
display("人脸")
display("NOT_EXIST")
if __name__ == '__main__':
main()
|
huyingxi/Synonyms | synonyms/synonyms.py | compare | python | def compare(s1, s2, seg=True, ignore=False, stopwords=False):
'''
compare similarity
s1 : sentence1
s2 : sentence2
seg : True : The original sentences need jieba.cut
Flase : The original sentences have been cut.
ignore: True: ignore OOV words
False: get vector randomly for OOV words
'''
if s1 == s2: return 1.0
s1_words = []
s2_words = []
if seg:
s1 = [x for x in jieba.cut(s1, cut_all=False, HMM=False)]
s2 = [x for x in jieba.cut(s2, cut_all=False, HMM=False)]
else:
s1 = s1.split()
s2 = s2.split()
# check stopwords
if not stopwords:
global _stopwords
for x in s1:
if not x in _stopwords:
s1_words.append(x)
for x in s2:
if not x in _stopwords:
s2_words.append(x)
else:
s1_words = s1
s2_words = s2
assert len(s1) > 0 and len(s2) > 0, "The length of s1 and s2 should > 0."
return _similarity_distance(s1_words, s2_words, ignore) | compare similarity
s1 : sentence1
s2 : sentence2
seg : True : The original sentences need jieba.cut
Flase : The original sentences have been cut.
ignore: True: ignore OOV words
False: get vector randomly for OOV words | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/synonyms.py#L308-L344 | [
"def _similarity_distance(s1, s2, ignore):\n '''\n compute similarity with distance measurement\n '''\n g = 0.0\n try:\n g_ = cosine(_flat_sum_array(_get_wv(s1, ignore)), _flat_sum_array(_get_wv(s2, ignore)))\n if is_digit(g_): g = g_\n except: pass\n\n u = _nearby_levenshtein_dis... | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#=========================================================================
#
# Copyright (c) 2017 <> All Rights Reserved
#
#
# File: /Users/hain/ai/Synonyms/synonyms/__init__.py
# Author: Hai Liang Wang
# Date: 2017-09-27
#
#=========================================================================
"""
Chinese Synonyms for Natural Language Processing and Understanding.
"""
from __future__ import print_function
from __future__ import division
__copyright__ = "Copyright (c) 2017 . All Rights Reserved"
__author__ = "Hu Ying Xi<>, Hai Liang Wang<hailiang.hl.wang@gmail.com>"
__date__ = "2017-09-27"
__version__ = "3.3.10"
import os
import sys
import numpy as np
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
PLT = 2
if sys.version_info[0] < 3:
default_stdout = sys.stdout
default_stderr = sys.stderr
reload(sys)
sys.stdout = default_stdout
sys.stderr = default_stderr
sys.setdefaultencoding("utf-8")
# raise "Must be using Python 3"
else:
PLT = 3
# Get Environment variables
ENVIRON = os.environ.copy()
import json
import gzip
import shutil
from absl import logging
from .word2vec import KeyedVectors
from .utils import any2utf8
from .utils import any2unicode
from .utils import sigmoid
from .utils import cosine
from .utils import is_digit
import jieba
from .jieba import posseg as _tokenizer
'''
globals
'''
_vocab = dict()
_size = 0
_vectors = None
_stopwords = set()
_cache_nearby = dict()
'''
lambda fns
'''
# combine similarity scores
_similarity_smooth = lambda x, y, z, u: (x * y) + z - u
_flat_sum_array = lambda x: np.sum(x, axis=0) # 分子
'''
tokenizer settings
'''
tokenizer_dict = os.path.join(curdir, 'data', 'vocab.txt')
if "SYNONYMS_WORDSEG_DICT" in ENVIRON:
if os.path.exists(ENVIRON["SYNONYMS_WORDSEG_DICT"]):
print("info: set wordseg dict with %s" % tokenizer_dict)
tokenizer_dict = ENVIRON["SYNONYMS_WORDSEG_DICT"]
else: print("warning: can not find dict at [%s]" % tokenizer_dict)
print(">> Synonyms load wordseg dict [%s] ... " % tokenizer_dict)
_tokenizer.initialize(tokenizer_dict)
# stopwords
_fin_stopwords_path = os.path.join(curdir, 'data', 'stopwords.txt')
def _load_stopwords(file_path):
'''
load stop words
'''
global _stopwords
if sys.version_info[0] < 3:
words = open(file_path, 'r')
else:
words = open(file_path, 'r', encoding='utf-8')
stopwords = words.readlines()
for w in stopwords:
_stopwords.add(any2unicode(w).strip())
print(">> Synonyms on loading stopwords [%s] ..." % _fin_stopwords_path)
_load_stopwords(_fin_stopwords_path)
def _segment_words(sen):
'''
segment words with jieba
'''
words, tags = [], []
m = _tokenizer.cut(sen, HMM=True) # HMM更好的识别新词
for x in m:
words.append(x.word)
tags.append(x.flag)
return words, tags
'''
word embedding
'''
# vectors
_f_model = os.path.join(curdir, 'data', 'words.vector')
if "SYNONYMS_WORD2VEC_BIN_MODEL_ZH_CN" in ENVIRON:
_f_model = ENVIRON["SYNONYMS_WORD2VEC_BIN_MODEL_ZH_CN"]
def _load_w2v(model_file=_f_model, binary=True):
'''
load word2vec model
'''
if not os.path.exists(model_file):
print("os.path : ", os.path)
raise Exception("Model file [%s] does not exist." % model_file)
return KeyedVectors.load_word2vec_format(
model_file, binary=binary, unicode_errors='ignore')
print(">> Synonyms on loading vectors [%s] ..." % _f_model)
_vectors = _load_w2v(model_file=_f_model)
def _get_wv(sentence, ignore=False):
'''
get word2vec data by sentence
sentence is segmented string.
'''
global _vectors
vectors = []
for y in sentence:
y_ = any2unicode(y).strip()
if y_ not in _stopwords:
syns = nearby(y_)[0]
# print("sentence %s word: %s" %(sentence, y_))
# print("sentence %s word nearby: %s" %(sentence, " ".join(syns)))
c = []
try:
c.append(_vectors.word_vec(y_))
except KeyError as error:
if ignore:
continue
else:
logging.warning("not exist in w2v model: %s" % y_)
# c.append(np.zeros((100,), dtype=float))
random_state = np.random.RandomState(seed=(hash(y_) % (2**32 - 1)))
c.append(random_state.uniform(low=-10.0, high=10.0, size=(100,)))
for n in syns:
if n is None: continue
try:
v = _vectors.word_vec(any2unicode(n))
except KeyError as error:
# v = np.zeros((100,), dtype=float)
random_state = np.random.RandomState(seed=(hash(n) % (2 ** 32 - 1)))
v = random_state.uniform(low=10.0, high=10.0, size=(100,))
c.append(v)
r = np.average(c, axis=0)
vectors.append(r)
return vectors
'''
Distance
'''
# Levenshtein Distance
def _levenshtein_distance(sentence1, sentence2):
'''
Return the Levenshtein distance between two strings.
Based on:
http://rosettacode.org/wiki/Levenshtein_distance#Python
'''
first = any2utf8(sentence1).decode('utf-8', 'ignore')
second = any2utf8(sentence2).decode('utf-8', 'ignore')
sentence1_len, sentence2_len = len(first), len(second)
maxlen = max(sentence1_len, sentence2_len)
if sentence1_len > sentence2_len:
first, second = second, first
distances = range(len(first) + 1)
for index2, char2 in enumerate(second):
new_distances = [index2 + 1]
for index1, char1 in enumerate(first):
if char1 == char2:
new_distances.append(distances[index1])
else:
new_distances.append(1 + min((distances[index1],
distances[index1 + 1],
new_distances[-1])))
distances = new_distances
levenshtein = distances[-1]
d = float((maxlen - levenshtein)/maxlen)
# smoothing
s = (sigmoid(d * 6) - 0.5) * 2
# print("smoothing[%s| %s]: %s -> %s" % (sentence1, sentence2, d, s))
return s
def sv(sentence, ignore=False):
'''
获得一个分词后句子的向量,向量以BoW方式组成
sentence: 句子是分词后通过空格联合起来
ignore: 是否忽略OOV,False时,随机生成一个向量
'''
return _get_wv(sentence, ignore = ignore)
def v(word):
'''
获得一个词语的向量,OOV时抛出 KeyError 异常
'''
y_ = any2unicode(word).strip()
return _vectors.word_vec(y_)
def _nearby_levenshtein_distance(s1, s2):
'''
使用空间距离近的词汇优化编辑距离计算
'''
s1_len, s2_len = len(s1), len(s2)
maxlen = s1_len
if s1_len == s2_len:
first, second = sorted([s1, s2])
elif s1_len < s2_len:
first = s1
second = s2
maxlen = s2_len
else:
first = s2
second = s1
ft = set() # all related words with first sentence
for x in first:
ft.add(x)
n, _ = nearby(x)
for o in n[:10]:
ft.add(o)
scores = []
for x in second:
choices = [_levenshtein_distance(x, y) for y in ft]
if len(choices) > 0: scores.append(max(choices))
s = np.sum(scores) / maxlen if len(scores) > 0 else 0
return s
def _similarity_distance(s1, s2, ignore):
'''
compute similarity with distance measurement
'''
g = 0.0
try:
g_ = cosine(_flat_sum_array(_get_wv(s1, ignore)), _flat_sum_array(_get_wv(s2, ignore)))
if is_digit(g_): g = g_
except: pass
u = _nearby_levenshtein_distance(s1, s2)
logging.debug("g: %s, u: %s" % (g, u))
if u >= 0.99:
r = 1.0
elif u > 0.9:
r = _similarity_smooth(g, 0.05, u, 0.05)
elif u > 0.8:
r = _similarity_smooth(g, 0.1, u, 0.2)
elif u > 0.4:
r = _similarity_smooth(g, 0.2, u, 0.15)
elif u > 0.2:
r = _similarity_smooth(g, 0.3, u, 0.1)
else:
r = _similarity_smooth(g, 0.4, u, 0)
if r < 0: r = abs(r)
r = min(r, 1.0)
return float("%.3f" % r)
'''
Public Methods
'''
seg = _segment_words # word segmenter
def nearby(word):
'''
Nearby word
'''
w = any2unicode(word)
# read from cache
if w in _cache_nearby: return _cache_nearby[w]
words, scores = [], []
try:
for x in _vectors.neighbours(w):
words.append(x[0])
scores.append(x[1])
except: pass # ignore key error, OOV
# put into cache
_cache_nearby[w] = (words, scores)
return words, scores
def compare(s1, s2, seg=True, ignore=False, stopwords=False):
'''
compare similarity
s1 : sentence1
s2 : sentence2
seg : True : The original sentences need jieba.cut
Flase : The original sentences have been cut.
ignore: True: ignore OOV words
False: get vector randomly for OOV words
'''
if s1 == s2: return 1.0
s1_words = []
s2_words = []
if seg:
s1 = [x for x in jieba.cut(s1, cut_all=False, HMM=False)]
s2 = [x for x in jieba.cut(s2, cut_all=False, HMM=False)]
else:
s1 = s1.split()
s2 = s2.split()
# check stopwords
if not stopwords:
global _stopwords
for x in s1:
if not x in _stopwords:
s1_words.append(x)
for x in s2:
if not x in _stopwords:
s2_words.append(x)
else:
s1_words = s1
s2_words = s2
assert len(s1) > 0 and len(s2) > 0, "The length of s1 and s2 should > 0."
return _similarity_distance(s1_words, s2_words, ignore)
def display(word):
print("'%s'近义词:" % word)
o = nearby(word)
assert len(o) == 2, "should contain 2 list"
if len(o[0]) == 0:
print(" out of vocabulary")
for k, v in enumerate(o[0]):
print(" %d. %s:%s" % (k + 1, v, o[1][k]))
def main():
display("人脸")
display("NOT_EXIST")
if __name__ == '__main__':
main()
|
pre-commit/pre-commit | pre_commit/languages/helpers.py | _shuffled | python | def _shuffled(seq):
fixed_random = random.Random()
if six.PY2: # pragma: no cover (py2)
fixed_random.seed(FIXED_RANDOM_SEED)
else: # pragma: no cover (py3)
fixed_random.seed(FIXED_RANDOM_SEED, version=1)
seq = list(seq)
random.shuffle(seq, random=fixed_random.random)
return seq | Deterministically shuffle identically under both py2 + py3. | train | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/languages/helpers.py#L73-L83 | null | from __future__ import unicode_literals
import multiprocessing
import os
import random
import shlex
import six
import pre_commit.constants as C
from pre_commit.util import cmd_output
from pre_commit.xargs import xargs
FIXED_RANDOM_SEED = 1542676186
def run_setup_cmd(prefix, cmd):
cmd_output(*cmd, cwd=prefix.prefix_dir, encoding=None)
def environment_dir(ENVIRONMENT_DIR, language_version):
if ENVIRONMENT_DIR is None:
return None
else:
return '{}-{}'.format(ENVIRONMENT_DIR, language_version)
def to_cmd(hook):
return tuple(shlex.split(hook.entry)) + tuple(hook.args)
def assert_version_default(binary, version):
if version != C.DEFAULT:
raise AssertionError(
'For now, pre-commit requires system-installed {}'.format(binary),
)
def assert_no_additional_deps(lang, additional_deps):
if additional_deps:
raise AssertionError(
'For now, pre-commit does not support '
'additional_dependencies for {}'.format(lang),
)
def basic_get_default_version():
return C.DEFAULT
def basic_healthy(prefix, language_version):
return True
def no_install(prefix, version, additional_dependencies):
raise AssertionError('This type is not installable')
def target_concurrency(hook):
if hook.require_serial or 'PRE_COMMIT_NO_CONCURRENCY' in os.environ:
return 1
else:
# Travis appears to have a bunch of CPUs, but we can't use them all.
if 'TRAVIS' in os.environ:
return 2
else:
try:
return multiprocessing.cpu_count()
except NotImplementedError:
return 1
def run_xargs(hook, cmd, file_args):
# Shuffle the files so that they more evenly fill out the xargs partitions,
# but do it deterministically in case a hook cares about ordering.
file_args = _shuffled(file_args)
return xargs(cmd, file_args, target_concurrency=target_concurrency(hook))
|
pre-commit/pre-commit | pre_commit/envcontext.py | envcontext | python | def envcontext(patch, _env=None):
env = os.environ if _env is None else _env
before = env.copy()
for k, v in patch:
if v is UNSET:
env.pop(k, None)
elif isinstance(v, tuple):
env[k] = format_env(v, before)
else:
env[k] = v
try:
yield
finally:
env.clear()
env.update(before) | In this context, `os.environ` is modified according to `patch`.
`patch` is an iterable of 2-tuples (key, value):
`key`: string
`value`:
- string: `environ[key] == value` inside the context.
- UNSET: `key not in environ` inside the context.
- template: A template is a tuple of strings and Var which will be
replaced with the previous environment | train | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/envcontext.py#L24-L50 | [
"def format_env(parts, env):\n return ''.join(\n env.get(part.name, part.default) if isinstance(part, Var) else part\n for part in parts\n )\n"
] | from __future__ import absolute_import
from __future__ import unicode_literals
import collections
import contextlib
import os
UNSET = collections.namedtuple('UNSET', ())()
Var = collections.namedtuple('Var', ('name', 'default'))
Var.__new__.__defaults__ = ('',)
def format_env(parts, env):
return ''.join(
env.get(part.name, part.default) if isinstance(part, Var) else part
for part in parts
)
@contextlib.contextmanager
|
pre-commit/pre-commit | pre_commit/languages/python.py | bin_dir | python | def bin_dir(venv):
bin_part = 'Scripts' if os.name == 'nt' else 'bin'
return os.path.join(venv, bin_part) | On windows there's a different directory for the virtualenv | train | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/languages/python.py#L21-L24 | null | from __future__ import unicode_literals
import contextlib
import os
import sys
import pre_commit.constants as C
from pre_commit.envcontext import envcontext
from pre_commit.envcontext import UNSET
from pre_commit.envcontext import Var
from pre_commit.languages import helpers
from pre_commit.parse_shebang import find_executable
from pre_commit.util import CalledProcessError
from pre_commit.util import clean_path_on_failure
from pre_commit.util import cmd_output
ENVIRONMENT_DIR = 'py_env'
def get_env_patch(venv):
return (
('PYTHONHOME', UNSET),
('VIRTUAL_ENV', venv),
('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),
)
def _find_by_py_launcher(version): # pragma: no cover (windows only)
if version.startswith('python'):
try:
return cmd_output(
'py', '-{}'.format(version[len('python'):]),
'-c', 'import sys; print(sys.executable)',
)[1].strip()
except CalledProcessError:
pass
def _get_default_version(): # pragma: no cover (platform dependent)
def _norm(path):
_, exe = os.path.split(path.lower())
exe, _, _ = exe.partition('.exe')
if find_executable(exe) and exe not in {'python', 'pythonw'}:
return exe
# First attempt from `sys.executable` (or the realpath)
# On linux, I see these common sys.executables:
#
# system `python`: /usr/bin/python -> python2.7
# system `python2`: /usr/bin/python2 -> python2.7
# virtualenv v: v/bin/python (will not return from this loop)
# virtualenv v -ppython2: v/bin/python -> python2
# virtualenv v -ppython2.7: v/bin/python -> python2.7
# virtualenv v -ppypy: v/bin/python -> v/bin/pypy
for path in {sys.executable, os.path.realpath(sys.executable)}:
exe = _norm(path)
if exe:
return exe
# Next try the `pythonX.X` executable
exe = 'python{}.{}'.format(*sys.version_info)
if find_executable(exe):
return exe
if _find_by_py_launcher(exe):
return exe
# Give a best-effort try for windows
if os.path.exists(r'C:\{}\python.exe'.format(exe.replace('.', ''))):
return exe
# We tried!
return C.DEFAULT
def get_default_version():
# TODO: when dropping python2, use `functools.lru_cache(maxsize=1)`
try:
return get_default_version.cached_version
except AttributeError:
get_default_version.cached_version = _get_default_version()
return get_default_version()
def _sys_executable_matches(version):
if version == 'python':
return True
elif not version.startswith('python'):
return False
try:
info = tuple(int(p) for p in version[len('python'):].split('.'))
except ValueError:
return False
return sys.version_info[:len(info)] == info
def norm_version(version):
if os.name == 'nt': # pragma: no cover (windows)
# first see if our current executable is appropriate
if _sys_executable_matches(version):
return sys.executable
version_exec = _find_by_py_launcher(version)
if version_exec:
return version_exec
# Try looking up by name
version_exec = find_executable(version)
if version_exec and version_exec != version:
return version_exec
# If it is in the form pythonx.x search in the default
# place on windows
if version.startswith('python'):
return r'C:\{}\python.exe'.format(version.replace('.', ''))
# Otherwise assume it is a path
return os.path.expanduser(version)
def py_interface(_dir, _make_venv):
@contextlib.contextmanager
def in_env(prefix, language_version):
envdir = prefix.path(helpers.environment_dir(_dir, language_version))
with envcontext(get_env_patch(envdir)):
yield
def healthy(prefix, language_version):
with in_env(prefix, language_version):
retcode, _, _ = cmd_output(
'python', '-c',
'import ctypes, datetime, io, os, ssl, weakref',
retcode=None,
)
return retcode == 0
def run_hook(hook, file_args):
with in_env(hook.prefix, hook.language_version):
return helpers.run_xargs(hook, helpers.to_cmd(hook), file_args)
def install_environment(prefix, version, additional_dependencies):
additional_dependencies = tuple(additional_dependencies)
directory = helpers.environment_dir(_dir, version)
env_dir = prefix.path(directory)
with clean_path_on_failure(env_dir):
if version != C.DEFAULT:
python = norm_version(version)
else:
python = os.path.realpath(sys.executable)
_make_venv(env_dir, python)
with in_env(prefix, version):
helpers.run_setup_cmd(
prefix, ('pip', 'install', '.') + additional_dependencies,
)
return in_env, healthy, run_hook, install_environment
def make_venv(envdir, python):
env = dict(os.environ, VIRTUALENV_NO_DOWNLOAD='1')
cmd = (sys.executable, '-mvirtualenv', envdir, '-p', python)
cmd_output(*cmd, env=env, cwd='/')
_interface = py_interface(ENVIRONMENT_DIR, make_venv)
in_env, healthy, run_hook, install_environment = _interface
|
pre-commit/pre-commit | pre_commit/commands/install_uninstall.py | install | python | def install(
config_file, store,
overwrite=False, hooks=False, hook_type='pre-commit',
skip_on_missing_conf=False,
):
if cmd_output('git', 'config', 'core.hooksPath', retcode=None)[1].strip():
logger.error(
'Cowardly refusing to install hooks with `core.hooksPath` set.\n'
'hint: `git config --unset-all core.hooksPath`',
)
return 1
hook_path, legacy_path = _hook_paths(hook_type)
mkdirp(os.path.dirname(hook_path))
# If we have an existing hook, move it to pre-commit.legacy
if os.path.lexists(hook_path) and not is_our_script(hook_path):
shutil.move(hook_path, legacy_path)
# If we specify overwrite, we simply delete the legacy file
if overwrite and os.path.exists(legacy_path):
os.remove(legacy_path)
elif os.path.exists(legacy_path):
output.write_line(
'Running in migration mode with existing hooks at {}\n'
'Use -f to use only pre-commit.'.format(legacy_path),
)
params = {
'CONFIG': config_file,
'HOOK_TYPE': hook_type,
'INSTALL_PYTHON': sys.executable,
'SKIP_ON_MISSING_CONFIG': skip_on_missing_conf,
}
with io.open(hook_path, 'w') as hook_file:
contents = resource_text('hook-tmpl')
before, rest = contents.split(TEMPLATE_START)
to_template, after = rest.split(TEMPLATE_END)
before = before.replace('#!/usr/bin/env python3', shebang())
hook_file.write(before + TEMPLATE_START)
for line in to_template.splitlines():
var = line.split()[0]
hook_file.write('{} = {!r}\n'.format(var, params[var]))
hook_file.write(TEMPLATE_END + after)
make_executable(hook_path)
output.write_line('pre-commit installed at {}'.format(hook_path))
# If they requested we install all of the hooks, do so.
if hooks:
install_hooks(config_file, store)
return 0 | Install the pre-commit hooks. | train | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/commands/install_uninstall.py#L69-L126 | [
"def cmd_output(*cmd, **kwargs):\n retcode = kwargs.pop('retcode', 0)\n encoding = kwargs.pop('encoding', 'UTF-8')\n\n popen_kwargs = {\n 'stdin': subprocess.PIPE,\n 'stdout': subprocess.PIPE,\n 'stderr': subprocess.PIPE,\n }\n\n # py2/py3 on windows are more strict about the typ... | from __future__ import print_function
from __future__ import unicode_literals
import io
import itertools
import logging
import os.path
import shutil
import sys
from pre_commit import git
from pre_commit import output
from pre_commit.clientlib import load_config
from pre_commit.repository import all_hooks
from pre_commit.repository import install_hook_envs
from pre_commit.util import cmd_output
from pre_commit.util import make_executable
from pre_commit.util import mkdirp
from pre_commit.util import resource_text
logger = logging.getLogger(__name__)
# This is used to identify the hook file we install
PRIOR_HASHES = (
'4d9958c90bc262f47553e2c073f14cfe',
'd8ee923c46731b42cd95cc869add4062',
'49fd668cb42069aa1b6048464be5d395',
'79f09a650522a87b0da915d0d983b2de',
'e358c9dae00eac5d06b38dfdb1e33a8c',
)
CURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'
TEMPLATE_START = '# start templated\n'
TEMPLATE_END = '# end templated\n'
def _hook_paths(hook_type):
pth = os.path.join(git.get_git_dir(), 'hooks', hook_type)
return pth, '{}.legacy'.format(pth)
def is_our_script(filename):
if not os.path.exists(filename): # pragma: windows no cover (symlink)
return False
with io.open(filename) as f:
contents = f.read()
return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)
def shebang():
if sys.platform == 'win32':
py = 'python'
else:
# Homebrew/homebrew-core#35825: be more timid about appropriate `PATH`
path_choices = [p for p in os.defpath.split(os.pathsep) if p]
exe_choices = [
'python{}'.format('.'.join(str(v) for v in sys.version_info[:i]))
for i in range(3)
]
for path, exe in itertools.product(path_choices, exe_choices):
if os.path.exists(os.path.join(path, exe)):
py = exe
break
else:
py = 'python'
return '#!/usr/bin/env {}'.format(py)
def install_hooks(config_file, store):
install_hook_envs(all_hooks(load_config(config_file), store), store)
def uninstall(hook_type='pre-commit'):
"""Uninstall the pre-commit hooks."""
hook_path, legacy_path = _hook_paths(hook_type)
# If our file doesn't exist or it isn't ours, gtfo.
if not os.path.exists(hook_path) or not is_our_script(hook_path):
return 0
os.remove(hook_path)
output.write_line('{} uninstalled'.format(hook_type))
if os.path.exists(legacy_path):
os.rename(legacy_path, hook_path)
output.write_line('Restored previous hooks to {}'.format(hook_path))
return 0
|
pre-commit/pre-commit | pre_commit/commands/install_uninstall.py | uninstall | python | def uninstall(hook_type='pre-commit'):
hook_path, legacy_path = _hook_paths(hook_type)
# If our file doesn't exist or it isn't ours, gtfo.
if not os.path.exists(hook_path) or not is_our_script(hook_path):
return 0
os.remove(hook_path)
output.write_line('{} uninstalled'.format(hook_type))
if os.path.exists(legacy_path):
os.rename(legacy_path, hook_path)
output.write_line('Restored previous hooks to {}'.format(hook_path))
return 0 | Uninstall the pre-commit hooks. | train | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/commands/install_uninstall.py#L133-L148 | [
"def write_line(s=None, stream=stdout_byte_stream, logfile_name=None):\n output_streams = [stream]\n if logfile_name:\n ctx = open(logfile_name, 'ab')\n output_streams.append(ctx)\n else:\n ctx = noop_context()\n\n with ctx:\n for output_stream in output_streams:\n ... | from __future__ import print_function
from __future__ import unicode_literals
import io
import itertools
import logging
import os.path
import shutil
import sys
from pre_commit import git
from pre_commit import output
from pre_commit.clientlib import load_config
from pre_commit.repository import all_hooks
from pre_commit.repository import install_hook_envs
from pre_commit.util import cmd_output
from pre_commit.util import make_executable
from pre_commit.util import mkdirp
from pre_commit.util import resource_text
logger = logging.getLogger(__name__)
# This is used to identify the hook file we install
PRIOR_HASHES = (
'4d9958c90bc262f47553e2c073f14cfe',
'd8ee923c46731b42cd95cc869add4062',
'49fd668cb42069aa1b6048464be5d395',
'79f09a650522a87b0da915d0d983b2de',
'e358c9dae00eac5d06b38dfdb1e33a8c',
)
CURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'
TEMPLATE_START = '# start templated\n'
TEMPLATE_END = '# end templated\n'
def _hook_paths(hook_type):
pth = os.path.join(git.get_git_dir(), 'hooks', hook_type)
return pth, '{}.legacy'.format(pth)
def is_our_script(filename):
if not os.path.exists(filename): # pragma: windows no cover (symlink)
return False
with io.open(filename) as f:
contents = f.read()
return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)
def shebang():
if sys.platform == 'win32':
py = 'python'
else:
# Homebrew/homebrew-core#35825: be more timid about appropriate `PATH`
path_choices = [p for p in os.defpath.split(os.pathsep) if p]
exe_choices = [
'python{}'.format('.'.join(str(v) for v in sys.version_info[:i]))
for i in range(3)
]
for path, exe in itertools.product(path_choices, exe_choices):
if os.path.exists(os.path.join(path, exe)):
py = exe
break
else:
py = 'python'
return '#!/usr/bin/env {}'.format(py)
def install(
config_file, store,
overwrite=False, hooks=False, hook_type='pre-commit',
skip_on_missing_conf=False,
):
"""Install the pre-commit hooks."""
if cmd_output('git', 'config', 'core.hooksPath', retcode=None)[1].strip():
logger.error(
'Cowardly refusing to install hooks with `core.hooksPath` set.\n'
'hint: `git config --unset-all core.hooksPath`',
)
return 1
hook_path, legacy_path = _hook_paths(hook_type)
mkdirp(os.path.dirname(hook_path))
# If we have an existing hook, move it to pre-commit.legacy
if os.path.lexists(hook_path) and not is_our_script(hook_path):
shutil.move(hook_path, legacy_path)
# If we specify overwrite, we simply delete the legacy file
if overwrite and os.path.exists(legacy_path):
os.remove(legacy_path)
elif os.path.exists(legacy_path):
output.write_line(
'Running in migration mode with existing hooks at {}\n'
'Use -f to use only pre-commit.'.format(legacy_path),
)
params = {
'CONFIG': config_file,
'HOOK_TYPE': hook_type,
'INSTALL_PYTHON': sys.executable,
'SKIP_ON_MISSING_CONFIG': skip_on_missing_conf,
}
with io.open(hook_path, 'w') as hook_file:
contents = resource_text('hook-tmpl')
before, rest = contents.split(TEMPLATE_START)
to_template, after = rest.split(TEMPLATE_END)
before = before.replace('#!/usr/bin/env python3', shebang())
hook_file.write(before + TEMPLATE_START)
for line in to_template.splitlines():
var = line.split()[0]
hook_file.write('{} = {!r}\n'.format(var, params[var]))
hook_file.write(TEMPLATE_END + after)
make_executable(hook_path)
output.write_line('pre-commit installed at {}'.format(hook_path))
# If they requested we install all of the hooks, do so.
if hooks:
install_hooks(config_file, store)
return 0
def install_hooks(config_file, store):
install_hook_envs(all_hooks(load_config(config_file), store), store)
|
pre-commit/pre-commit | pre_commit/parse_shebang.py | normalize_cmd | python | def normalize_cmd(cmd):
# Use PATH to determine the executable
exe = normexe(cmd[0])
# Figure out the shebang from the resulting command
cmd = parse_filename(exe) + (exe,) + cmd[1:]
# This could have given us back another bare executable
exe = normexe(cmd[0])
return (exe,) + cmd[1:] | Fixes for the following issues on windows
- https://bugs.python.org/issue8557
- windows does not parse shebangs
This function also makes deep-path shebangs work just fine | train | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/parse_shebang.py#L62-L78 | [
"def parse_filename(filename):\n if not os.path.exists(filename):\n return ()\n else:\n return parse_shebang_from_file(filename)\n",
"def normexe(orig):\n def _error(msg):\n raise ExecutableNotFoundError('Executable `{}` {}'.format(orig, msg))\n\n if os.sep not in orig and (not os... | from __future__ import absolute_import
from __future__ import unicode_literals
import os.path
from identify.identify import parse_shebang_from_file
class ExecutableNotFoundError(OSError):
def to_output(self):
return (1, self.args[0].encode('UTF-8'), b'')
def parse_filename(filename):
if not os.path.exists(filename):
return ()
else:
return parse_shebang_from_file(filename)
def find_executable(exe, _environ=None):
exe = os.path.normpath(exe)
if os.sep in exe:
return exe
environ = _environ if _environ is not None else os.environ
if 'PATHEXT' in environ:
possible_exe_names = tuple(
exe + ext.lower() for ext in environ['PATHEXT'].split(os.pathsep)
) + (exe,)
else:
possible_exe_names = (exe,)
for path in environ.get('PATH', '').split(os.pathsep):
for possible_exe_name in possible_exe_names:
joined = os.path.join(path, possible_exe_name)
if os.path.isfile(joined) and os.access(joined, os.X_OK):
return joined
else:
return None
def normexe(orig):
def _error(msg):
raise ExecutableNotFoundError('Executable `{}` {}'.format(orig, msg))
if os.sep not in orig and (not os.altsep or os.altsep not in orig):
exe = find_executable(orig)
if exe is None:
_error('not found')
return exe
elif not os.access(orig, os.X_OK):
_error('not found')
elif os.path.isdir(orig):
_error('is a directory')
else:
return orig
|
pre-commit/pre-commit | pre_commit/output.py | get_hook_message | python | def get_hook_message(
start,
postfix='',
end_msg=None,
end_len=0,
end_color=None,
use_color=None,
cols=80,
):
if bool(end_msg) == bool(end_len):
raise ValueError('Expected one of (`end_msg`, `end_len`)')
if end_msg is not None and (end_color is None or use_color is None):
raise ValueError(
'`end_color` and `use_color` are required with `end_msg`',
)
if end_len:
return start + '.' * (cols - len(start) - end_len - 1)
else:
return '{}{}{}{}\n'.format(
start,
'.' * (cols - len(start) - len(postfix) - len(end_msg) - 1),
postfix,
color.format_color(end_msg, end_color, use_color),
) | Prints a message for running a hook.
This currently supports three approaches:
# Print `start` followed by dots, leaving 6 characters at the end
>>> print_hook_message('start', end_len=6)
start...............................................................
# Print `start` followed by dots with the end message colored if coloring
# is specified and a newline afterwards
>>> print_hook_message(
'start',
end_msg='end',
end_color=color.RED,
use_color=True,
)
start...................................................................end
# Print `start` followed by dots, followed by the `postfix` message
# uncolored, followed by the `end_msg` colored if specified and a newline
# afterwards
>>> print_hook_message(
'start',
postfix='postfix ',
end_msg='end',
end_color=color.RED,
use_color=True,
)
start...........................................................postfix end | train | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/output.py#L10-L64 | null | from __future__ import unicode_literals
import sys
from pre_commit import color
from pre_commit import five
from pre_commit.util import noop_context
stdout_byte_stream = getattr(sys.stdout, 'buffer', sys.stdout)
def write(s, stream=stdout_byte_stream):
stream.write(five.to_bytes(s))
stream.flush()
def write_line(s=None, stream=stdout_byte_stream, logfile_name=None):
output_streams = [stream]
if logfile_name:
ctx = open(logfile_name, 'ab')
output_streams.append(ctx)
else:
ctx = noop_context()
with ctx:
for output_stream in output_streams:
if s is not None:
output_stream.write(five.to_bytes(s))
output_stream.write(b'\n')
output_stream.flush()
|
pre-commit/pre-commit | pre_commit/git.py | check_for_cygwin_mismatch | python | def check_for_cygwin_mismatch():
if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)
is_cygwin_python = sys.platform == 'cygwin'
toplevel = cmd_output('git', 'rev-parse', '--show-toplevel')[1]
is_cygwin_git = toplevel.startswith('/')
if is_cygwin_python ^ is_cygwin_git:
exe_type = {True: '(cygwin)', False: '(windows)'}
logger.warn(
'pre-commit has detected a mix of cygwin python / git\n'
'This combination is not supported, it is likely you will '
'receive an error later in the program.\n'
'Make sure to use cygwin git+python while using cygwin\n'
'These can be installed through the cygwin installer.\n'
' - python {}\n'
' - git {}\n'.format(
exe_type[is_cygwin_python], exe_type[is_cygwin_git],
),
) | See https://github.com/pre-commit/pre-commit/issues/354 | train | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/git.py#L160-L179 | [
"def cmd_output(*cmd, **kwargs):\n retcode = kwargs.pop('retcode', 0)\n encoding = kwargs.pop('encoding', 'UTF-8')\n\n popen_kwargs = {\n 'stdin': subprocess.PIPE,\n 'stdout': subprocess.PIPE,\n 'stderr': subprocess.PIPE,\n }\n\n # py2/py3 on windows are more strict about the typ... | from __future__ import unicode_literals
import logging
import os.path
import sys
from pre_commit.util import cmd_output
logger = logging.getLogger(__name__)
def zsplit(s):
s = s.strip('\0')
if s:
return s.split('\0')
else:
return []
def no_git_env(_env=None):
# Too many bugs dealing with environment variables and GIT:
# https://github.com/pre-commit/pre-commit/issues/300
# In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running
# pre-commit hooks
# In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE
# while running pre-commit hooks in submodules.
# GIT_DIR: Causes git clone to clone wrong thing
# GIT_INDEX_FILE: Causes 'error invalid object ...' during commit
_env = _env if _env is not None else os.environ
return {
k: v for k, v in _env.items()
if not k.startswith('GIT_') or
k in {'GIT_EXEC_PATH', 'GIT_SSH', 'GIT_SSH_COMMAND'}
}
def get_root():
return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()
def get_git_dir(git_root='.'):
opts = ('--git-common-dir', '--git-dir')
_, out, _ = cmd_output('git', 'rev-parse', *opts, cwd=git_root)
for line, opt in zip(out.splitlines(), opts):
if line != opt: # pragma: no branch (git < 2.5)
return os.path.normpath(os.path.join(git_root, line))
else:
raise AssertionError('unreachable: no git dir')
def get_remote_url(git_root):
ret = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)[1]
return ret.strip()
def is_in_merge_conflict():
git_dir = get_git_dir('.')
return (
os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and
os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))
)
def parse_merge_msg_for_conflicts(merge_msg):
# Conflicted files start with tabs
return [
line.lstrip(b'#').strip().decode('UTF-8')
for line in merge_msg.splitlines()
# '#\t' for git 2.4.1
if line.startswith((b'\t', b'#\t'))
]
def get_conflicted_files():
logger.info('Checking merge-conflict files only.')
# Need to get the conflicted files from the MERGE_MSG because they could
# have resolved the conflict by choosing one side or the other
with open(os.path.join(get_git_dir('.'), 'MERGE_MSG'), 'rb') as f:
merge_msg = f.read()
merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)
# This will get the rest of the changes made after the merge.
# If they resolved the merge conflict by choosing a mesh of both sides
# this will also include the conflicted files
tree_hash = cmd_output('git', 'write-tree')[1].strip()
merge_diff_filenames = zsplit(
cmd_output(
'git', 'diff', '--name-only', '--no-ext-diff', '-z',
'-m', tree_hash, 'HEAD', 'MERGE_HEAD',
)[1],
)
return set(merge_conflict_filenames) | set(merge_diff_filenames)
def get_staged_files(cwd=None):
return zsplit(
cmd_output(
'git', 'diff', '--staged', '--name-only', '--no-ext-diff', '-z',
# Everything except for D
'--diff-filter=ACMRTUXB',
cwd=cwd,
)[1],
)
def intent_to_add_files():
_, stdout_binary, _ = cmd_output('git', 'status', '--porcelain', '-z')
parts = list(reversed(zsplit(stdout_binary)))
intent_to_add = []
while parts:
line = parts.pop()
status, filename = line[:3], line[3:]
if status[0] in {'C', 'R'}: # renames / moves have an additional arg
parts.pop()
if status[1] == 'A':
intent_to_add.append(filename)
return intent_to_add
def get_all_files():
return zsplit(cmd_output('git', 'ls-files', '-z')[1])
def get_changed_files(new, old):
return zsplit(
cmd_output(
'git', 'diff', '--name-only', '--no-ext-diff', '-z',
'{}...{}'.format(old, new),
)[1],
)
def head_rev(remote):
_, out, _ = cmd_output('git', 'ls-remote', '--exit-code', remote, 'HEAD')
return out.split()[0]
def has_diff(*args, **kwargs):
repo = kwargs.pop('repo', '.')
assert not kwargs, kwargs
cmd = ('git', 'diff', '--quiet', '--no-ext-diff') + args
return cmd_output(*cmd, cwd=repo, retcode=None)[0]
def commit(repo='.'):
env = no_git_env()
name, email = 'pre-commit', 'asottile+pre-commit@umich.edu'
env['GIT_AUTHOR_NAME'] = env['GIT_COMMITTER_NAME'] = name
env['GIT_AUTHOR_EMAIL'] = env['GIT_COMMITTER_EMAIL'] = email
cmd = ('git', 'commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')
cmd_output(*cmd, cwd=repo, env=env)
def git_path(name, repo='.'):
_, out, _ = cmd_output('git', 'rev-parse', '--git-path', name, cwd=repo)
return os.path.join(repo, out.strip())
|
pre-commit/pre-commit | pre_commit/xargs.py | xargs | python | def xargs(cmd, varargs, **kwargs):
negate = kwargs.pop('negate', False)
target_concurrency = kwargs.pop('target_concurrency', 1)
max_length = kwargs.pop('_max_length', _get_platform_max_length())
retcode = 0
stdout = b''
stderr = b''
try:
cmd = parse_shebang.normalize_cmd(cmd)
except parse_shebang.ExecutableNotFoundError as e:
return e.to_output()
partitions = partition(cmd, varargs, target_concurrency, max_length)
def run_cmd_partition(run_cmd):
return cmd_output(*run_cmd, encoding=None, retcode=None, **kwargs)
threads = min(len(partitions), target_concurrency)
with _thread_mapper(threads) as thread_map:
results = thread_map(run_cmd_partition, partitions)
for proc_retcode, proc_out, proc_err in results:
# This is *slightly* too clever so I'll explain it.
# First the xor boolean table:
# T | F |
# +-------+
# T | F | T |
# --+-------+
# F | T | F |
# --+-------+
# When negate is True, it has the effect of flipping the return
# code. Otherwise, the returncode is unchanged.
retcode |= bool(proc_retcode) ^ negate
stdout += proc_out
stderr += proc_err
return retcode, stdout, stderr | A simplified implementation of xargs.
negate: Make nonzero successful and zero a failure
target_concurrency: Target number of partitions to run concurrently | train | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/xargs.py#L104-L146 | [
"def partition(cmd, varargs, target_concurrency, _max_length=None):\n _max_length = _max_length or _get_platform_max_length()\n\n # Generally, we try to partition evenly into at least `target_concurrency`\n # partitions, but we don't want a bunch of tiny partitions.\n max_args = max(4, math.ceil(len(var... | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import concurrent.futures
import contextlib
import math
import os
import sys
import six
from pre_commit import parse_shebang
from pre_commit.util import cmd_output
def _environ_size(_env=None):
environ = _env if _env is not None else getattr(os, 'environb', os.environ)
size = 8 * len(environ) # number of pointers in `envp`
for k, v in environ.items():
size += len(k) + len(v) + 2 # c strings in `envp`
return size
def _get_platform_max_length(): # pragma: no cover (platform specific)
if os.name == 'posix':
maximum = os.sysconf(str('SC_ARG_MAX')) - 2048 - _environ_size()
maximum = max(min(maximum, 2 ** 17), 2 ** 12)
return maximum
elif os.name == 'nt':
return 2 ** 15 - 2048 # UNICODE_STRING max - headroom
else:
# posix minimum
return 2 ** 12
def _command_length(*cmd):
full_cmd = ' '.join(cmd)
# win32 uses the amount of characters, more details at:
# https://github.com/pre-commit/pre-commit/pull/839
if sys.platform == 'win32':
# the python2.x apis require bytes, we encode as UTF-8
if six.PY2:
return len(full_cmd.encode('utf-8'))
else:
return len(full_cmd.encode('utf-16le')) // 2
else:
return len(full_cmd.encode(sys.getfilesystemencoding()))
class ArgumentTooLongError(RuntimeError):
pass
def partition(cmd, varargs, target_concurrency, _max_length=None):
_max_length = _max_length or _get_platform_max_length()
# Generally, we try to partition evenly into at least `target_concurrency`
# partitions, but we don't want a bunch of tiny partitions.
max_args = max(4, math.ceil(len(varargs) / target_concurrency))
cmd = tuple(cmd)
ret = []
ret_cmd = []
# Reversed so arguments are in order
varargs = list(reversed(varargs))
total_length = _command_length(*cmd) + 1
while varargs:
arg = varargs.pop()
arg_length = _command_length(arg) + 1
if (
total_length + arg_length <= _max_length and
len(ret_cmd) < max_args
):
ret_cmd.append(arg)
total_length += arg_length
elif not ret_cmd:
raise ArgumentTooLongError(arg)
else:
# We've exceeded the length, yield a command
ret.append(cmd + tuple(ret_cmd))
ret_cmd = []
total_length = _command_length(*cmd) + 1
varargs.append(arg)
ret.append(cmd + tuple(ret_cmd))
return tuple(ret)
@contextlib.contextmanager
def _thread_mapper(maxsize):
if maxsize == 1:
yield map
else:
with concurrent.futures.ThreadPoolExecutor(maxsize) as ex:
yield ex.map
|
pre-commit/pre-commit | pre_commit/make_archives.py | make_archive | python | def make_archive(name, repo, ref, destdir):
output_path = os.path.join(destdir, name + '.tar.gz')
with tmpdir() as tempdir:
# Clone the repository to the temporary directory
cmd_output('git', 'clone', repo, tempdir)
cmd_output('git', 'checkout', ref, cwd=tempdir)
# We don't want the '.git' directory
# It adds a bunch of size to the archive and we don't use it at
# runtime
rmtree(os.path.join(tempdir, '.git'))
with tarfile.open(output_path, 'w|gz') as tf:
tf.add(tempdir, name)
return output_path | Makes an archive of a repository in the given destdir.
:param text name: Name to give the archive. For instance foo. The file
that is created will be called foo.tar.gz.
:param text repo: Repository to clone.
:param text ref: Tag/SHA/branch to check out.
:param text destdir: Directory to place archives in. | train | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/make_archives.py#L30-L53 | [
"def rmtree(path):\n \"\"\"On windows, rmtree fails for readonly dirs.\"\"\"\n def handle_remove_readonly(func, path, exc): # pragma: no cover (windows)\n excvalue = exc[1]\n if (\n func in (os.rmdir, os.remove, os.unlink) and\n excvalue.errno == errno.EACCES\n ... | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os.path
import tarfile
from pre_commit import output
from pre_commit.util import cmd_output
from pre_commit.util import rmtree
from pre_commit.util import tmpdir
# This is a script for generating the tarred resources for git repo
# dependencies. Currently it's just for "vendoring" ruby support packages.
REPOS = (
('rbenv', 'git://github.com/rbenv/rbenv', 'e60ad4a'),
('ruby-build', 'git://github.com/rbenv/ruby-build', '9bc9971'),
(
'ruby-download',
'git://github.com/garnieretienne/rvm-download',
'09bd7c6',
),
)
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('--dest', default='pre_commit/resources')
args = parser.parse_args(argv)
for archive_name, repo, ref in REPOS:
output.write_line(
'Making {}.tar.gz for {}@{}'.format(archive_name, repo, ref),
)
make_archive(archive_name, repo, ref, args.dest)
if __name__ == '__main__':
exit(main())
|
pre-commit/pre-commit | pre_commit/commands/run.py | _compute_cols | python | def _compute_cols(hooks, verbose):
if hooks:
name_len = max(len(_hook_msg_start(hook, verbose)) for hook in hooks)
else:
name_len = 0
cols = name_len + 3 + len(NO_FILES) + 1 + len(SKIPPED)
return max(cols, 80) | Compute the number of columns to display hook messages. The widest
that will be displayed is in the no files skipped case:
Hook name...(no files to check) Skipped
or in the verbose case
Hook name [hookid]...(no files to check) Skipped | train | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/commands/run.py#L171-L187 | null | from __future__ import unicode_literals
import logging
import os
import re
import subprocess
import sys
from identify.identify import tags_from_path
from pre_commit import color
from pre_commit import git
from pre_commit import output
from pre_commit.clientlib import load_config
from pre_commit.output import get_hook_message
from pre_commit.repository import all_hooks
from pre_commit.repository import install_hook_envs
from pre_commit.staged_files_only import staged_files_only
from pre_commit.util import cmd_output
from pre_commit.util import noop_context
logger = logging.getLogger('pre_commit')
def filter_by_include_exclude(names, include, exclude):
include_re, exclude_re = re.compile(include), re.compile(exclude)
return [
filename for filename in names
if include_re.search(filename)
if not exclude_re.search(filename)
]
class Classifier(object):
def __init__(self, filenames):
self.filenames = [f for f in filenames if os.path.lexists(f)]
self._types_cache = {}
def _types_for_file(self, filename):
try:
return self._types_cache[filename]
except KeyError:
ret = self._types_cache[filename] = tags_from_path(filename)
return ret
def by_types(self, names, types, exclude_types):
types, exclude_types = frozenset(types), frozenset(exclude_types)
ret = []
for filename in names:
tags = self._types_for_file(filename)
if tags >= types and not tags & exclude_types:
ret.append(filename)
return ret
def filenames_for_hook(self, hook):
names = self.filenames
names = filter_by_include_exclude(names, hook.files, hook.exclude)
names = self.by_types(names, hook.types, hook.exclude_types)
return names
def _get_skips(environ):
skips = environ.get('SKIP', '')
return {skip.strip() for skip in skips.split(',') if skip.strip()}
def _hook_msg_start(hook, verbose):
return '{}{}'.format('[{}] '.format(hook.id) if verbose else '', hook.name)
SKIPPED = 'Skipped'
NO_FILES = '(no files to check)'
def _run_single_hook(classifier, hook, args, skips, cols):
filenames = classifier.filenames_for_hook(hook)
if hook.language == 'pcre':
logger.warning(
'`{}` (from {}) uses the deprecated pcre language.\n'
'The pcre language is scheduled for removal in pre-commit 2.x.\n'
'The pygrep language is a more portable (and usually drop-in) '
'replacement.'.format(hook.id, hook.src),
)
if hook.id in skips or hook.alias in skips:
output.write(
get_hook_message(
_hook_msg_start(hook, args.verbose),
end_msg=SKIPPED,
end_color=color.YELLOW,
use_color=args.color,
cols=cols,
),
)
return 0
elif not filenames and not hook.always_run:
output.write(
get_hook_message(
_hook_msg_start(hook, args.verbose),
postfix=NO_FILES,
end_msg=SKIPPED,
end_color=color.TURQUOISE,
use_color=args.color,
cols=cols,
),
)
return 0
# Print the hook and the dots first in case the hook takes hella long to
# run.
output.write(
get_hook_message(
_hook_msg_start(hook, args.verbose), end_len=6, cols=cols,
),
)
sys.stdout.flush()
diff_before = cmd_output(
'git', 'diff', '--no-ext-diff', retcode=None, encoding=None,
)
retcode, stdout, stderr = hook.run(
tuple(filenames) if hook.pass_filenames else (),
)
diff_after = cmd_output(
'git', 'diff', '--no-ext-diff', retcode=None, encoding=None,
)
file_modifications = diff_before != diff_after
# If the hook makes changes, fail the commit
if file_modifications:
retcode = 1
if retcode:
retcode = 1
print_color = color.RED
pass_fail = 'Failed'
else:
retcode = 0
print_color = color.GREEN
pass_fail = 'Passed'
output.write_line(color.format_color(pass_fail, print_color, args.color))
if (
(stdout or stderr or file_modifications) and
(retcode or args.verbose or hook.verbose)
):
output.write_line('hookid: {}\n'.format(hook.id))
# Print a message if failing due to file modifications
if file_modifications:
output.write('Files were modified by this hook.')
if stdout or stderr:
output.write_line(' Additional output:')
output.write_line()
for out in (stdout, stderr):
assert type(out) is bytes, type(out)
if out.strip():
output.write_line(out.strip(), logfile_name=hook.log_file)
output.write_line()
return retcode
def _all_filenames(args):
if args.origin and args.source:
return git.get_changed_files(args.origin, args.source)
elif args.hook_stage in {'prepare-commit-msg', 'commit-msg'}:
return (args.commit_msg_filename,)
elif args.files:
return args.files
elif args.all_files:
return git.get_all_files()
elif git.is_in_merge_conflict():
return git.get_conflicted_files()
else:
return git.get_staged_files()
def _run_hooks(config, hooks, args, environ):
"""Actually run the hooks."""
skips = _get_skips(environ)
cols = _compute_cols(hooks, args.verbose)
filenames = _all_filenames(args)
filenames = filter_by_include_exclude(filenames, '', config['exclude'])
classifier = Classifier(filenames)
retval = 0
for hook in hooks:
retval |= _run_single_hook(classifier, hook, args, skips, cols)
if retval and config['fail_fast']:
break
if retval and args.show_diff_on_failure and git.has_diff():
if args.all_files:
output.write_line(
'pre-commit hook(s) made changes.\n'
'If you are seeing this message in CI, '
'reproduce locally with: `pre-commit run --all-files`.\n'
'To run `pre-commit` as part of git workflow, use '
'`pre-commit install`.',
)
output.write_line('All changes made by hooks:')
subprocess.call(('git', '--no-pager', 'diff', '--no-ext-diff'))
return retval
def _has_unmerged_paths():
_, stdout, _ = cmd_output('git', 'ls-files', '--unmerged')
return bool(stdout.strip())
def _has_unstaged_config(config_file):
retcode, _, _ = cmd_output(
'git', 'diff', '--no-ext-diff', '--exit-code', config_file,
retcode=None,
)
# be explicit, other git errors don't mean it has an unstaged config.
return retcode == 1
def run(config_file, store, args, environ=os.environ):
no_stash = args.all_files or bool(args.files)
# Check if we have unresolved merge conflict files and fail fast.
if _has_unmerged_paths():
logger.error('Unmerged files. Resolve before committing.')
return 1
if bool(args.source) != bool(args.origin):
logger.error('Specify both --origin and --source.')
return 1
if _has_unstaged_config(config_file) and not no_stash:
logger.error(
'Your pre-commit configuration is unstaged.\n'
'`git add {}` to fix this.'.format(config_file),
)
return 1
# Expose origin / source as environment variables for hooks to consume
if args.origin and args.source:
environ['PRE_COMMIT_ORIGIN'] = args.origin
environ['PRE_COMMIT_SOURCE'] = args.source
if no_stash:
ctx = noop_context()
else:
ctx = staged_files_only(store.directory)
with ctx:
config = load_config(config_file)
hooks = [
hook
for hook in all_hooks(config, store)
if not args.hook or hook.id == args.hook or hook.alias == args.hook
if args.hook_stage in hook.stages
]
if args.hook and not hooks:
output.write_line('No hook with id `{}`'.format(args.hook))
return 1
install_hook_envs(hooks, store)
return _run_hooks(config, hooks, args, environ)
|
pre-commit/pre-commit | pre_commit/commands/run.py | _run_hooks | python | def _run_hooks(config, hooks, args, environ):
skips = _get_skips(environ)
cols = _compute_cols(hooks, args.verbose)
filenames = _all_filenames(args)
filenames = filter_by_include_exclude(filenames, '', config['exclude'])
classifier = Classifier(filenames)
retval = 0
for hook in hooks:
retval |= _run_single_hook(classifier, hook, args, skips, cols)
if retval and config['fail_fast']:
break
if retval and args.show_diff_on_failure and git.has_diff():
if args.all_files:
output.write_line(
'pre-commit hook(s) made changes.\n'
'If you are seeing this message in CI, '
'reproduce locally with: `pre-commit run --all-files`.\n'
'To run `pre-commit` as part of git workflow, use '
'`pre-commit install`.',
)
output.write_line('All changes made by hooks:')
subprocess.call(('git', '--no-pager', 'diff', '--no-ext-diff'))
return retval | Actually run the hooks. | train | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/commands/run.py#L205-L228 | null | from __future__ import unicode_literals
import logging
import os
import re
import subprocess
import sys
from identify.identify import tags_from_path
from pre_commit import color
from pre_commit import git
from pre_commit import output
from pre_commit.clientlib import load_config
from pre_commit.output import get_hook_message
from pre_commit.repository import all_hooks
from pre_commit.repository import install_hook_envs
from pre_commit.staged_files_only import staged_files_only
from pre_commit.util import cmd_output
from pre_commit.util import noop_context
logger = logging.getLogger('pre_commit')
def filter_by_include_exclude(names, include, exclude):
include_re, exclude_re = re.compile(include), re.compile(exclude)
return [
filename for filename in names
if include_re.search(filename)
if not exclude_re.search(filename)
]
class Classifier(object):
def __init__(self, filenames):
self.filenames = [f for f in filenames if os.path.lexists(f)]
self._types_cache = {}
def _types_for_file(self, filename):
try:
return self._types_cache[filename]
except KeyError:
ret = self._types_cache[filename] = tags_from_path(filename)
return ret
def by_types(self, names, types, exclude_types):
types, exclude_types = frozenset(types), frozenset(exclude_types)
ret = []
for filename in names:
tags = self._types_for_file(filename)
if tags >= types and not tags & exclude_types:
ret.append(filename)
return ret
def filenames_for_hook(self, hook):
names = self.filenames
names = filter_by_include_exclude(names, hook.files, hook.exclude)
names = self.by_types(names, hook.types, hook.exclude_types)
return names
def _get_skips(environ):
skips = environ.get('SKIP', '')
return {skip.strip() for skip in skips.split(',') if skip.strip()}
def _hook_msg_start(hook, verbose):
return '{}{}'.format('[{}] '.format(hook.id) if verbose else '', hook.name)
SKIPPED = 'Skipped'
NO_FILES = '(no files to check)'
def _run_single_hook(classifier, hook, args, skips, cols):
filenames = classifier.filenames_for_hook(hook)
if hook.language == 'pcre':
logger.warning(
'`{}` (from {}) uses the deprecated pcre language.\n'
'The pcre language is scheduled for removal in pre-commit 2.x.\n'
'The pygrep language is a more portable (and usually drop-in) '
'replacement.'.format(hook.id, hook.src),
)
if hook.id in skips or hook.alias in skips:
output.write(
get_hook_message(
_hook_msg_start(hook, args.verbose),
end_msg=SKIPPED,
end_color=color.YELLOW,
use_color=args.color,
cols=cols,
),
)
return 0
elif not filenames and not hook.always_run:
output.write(
get_hook_message(
_hook_msg_start(hook, args.verbose),
postfix=NO_FILES,
end_msg=SKIPPED,
end_color=color.TURQUOISE,
use_color=args.color,
cols=cols,
),
)
return 0
# Print the hook and the dots first in case the hook takes hella long to
# run.
output.write(
get_hook_message(
_hook_msg_start(hook, args.verbose), end_len=6, cols=cols,
),
)
sys.stdout.flush()
diff_before = cmd_output(
'git', 'diff', '--no-ext-diff', retcode=None, encoding=None,
)
retcode, stdout, stderr = hook.run(
tuple(filenames) if hook.pass_filenames else (),
)
diff_after = cmd_output(
'git', 'diff', '--no-ext-diff', retcode=None, encoding=None,
)
file_modifications = diff_before != diff_after
# If the hook makes changes, fail the commit
if file_modifications:
retcode = 1
if retcode:
retcode = 1
print_color = color.RED
pass_fail = 'Failed'
else:
retcode = 0
print_color = color.GREEN
pass_fail = 'Passed'
output.write_line(color.format_color(pass_fail, print_color, args.color))
if (
(stdout or stderr or file_modifications) and
(retcode or args.verbose or hook.verbose)
):
output.write_line('hookid: {}\n'.format(hook.id))
# Print a message if failing due to file modifications
if file_modifications:
output.write('Files were modified by this hook.')
if stdout or stderr:
output.write_line(' Additional output:')
output.write_line()
for out in (stdout, stderr):
assert type(out) is bytes, type(out)
if out.strip():
output.write_line(out.strip(), logfile_name=hook.log_file)
output.write_line()
return retcode
def _compute_cols(hooks, verbose):
"""Compute the number of columns to display hook messages. The widest
that will be displayed is in the no files skipped case:
Hook name...(no files to check) Skipped
or in the verbose case
Hook name [hookid]...(no files to check) Skipped
"""
if hooks:
name_len = max(len(_hook_msg_start(hook, verbose)) for hook in hooks)
else:
name_len = 0
cols = name_len + 3 + len(NO_FILES) + 1 + len(SKIPPED)
return max(cols, 80)
def _all_filenames(args):
if args.origin and args.source:
return git.get_changed_files(args.origin, args.source)
elif args.hook_stage in {'prepare-commit-msg', 'commit-msg'}:
return (args.commit_msg_filename,)
elif args.files:
return args.files
elif args.all_files:
return git.get_all_files()
elif git.is_in_merge_conflict():
return git.get_conflicted_files()
else:
return git.get_staged_files()
def _has_unmerged_paths():
_, stdout, _ = cmd_output('git', 'ls-files', '--unmerged')
return bool(stdout.strip())
def _has_unstaged_config(config_file):
retcode, _, _ = cmd_output(
'git', 'diff', '--no-ext-diff', '--exit-code', config_file,
retcode=None,
)
# be explicit, other git errors don't mean it has an unstaged config.
return retcode == 1
def run(config_file, store, args, environ=os.environ):
no_stash = args.all_files or bool(args.files)
# Check if we have unresolved merge conflict files and fail fast.
if _has_unmerged_paths():
logger.error('Unmerged files. Resolve before committing.')
return 1
if bool(args.source) != bool(args.origin):
logger.error('Specify both --origin and --source.')
return 1
if _has_unstaged_config(config_file) and not no_stash:
logger.error(
'Your pre-commit configuration is unstaged.\n'
'`git add {}` to fix this.'.format(config_file),
)
return 1
# Expose origin / source as environment variables for hooks to consume
if args.origin and args.source:
environ['PRE_COMMIT_ORIGIN'] = args.origin
environ['PRE_COMMIT_SOURCE'] = args.source
if no_stash:
ctx = noop_context()
else:
ctx = staged_files_only(store.directory)
with ctx:
config = load_config(config_file)
hooks = [
hook
for hook in all_hooks(config, store)
if not args.hook or hook.id == args.hook or hook.alias == args.hook
if args.hook_stage in hook.stages
]
if args.hook and not hooks:
output.write_line('No hook with id `{}`'.format(args.hook))
return 1
install_hook_envs(hooks, store)
return _run_hooks(config, hooks, args, environ)
|
pre-commit/pre-commit | pre_commit/util.py | rmtree | python | def rmtree(path):
def handle_remove_readonly(func, path, exc): # pragma: no cover (windows)
excvalue = exc[1]
if (
func in (os.rmdir, os.remove, os.unlink) and
excvalue.errno == errno.EACCES
):
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
func(path)
else:
raise
shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_readonly) | On windows, rmtree fails for readonly dirs. | train | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/util.py#L159-L171 | null | from __future__ import unicode_literals
import contextlib
import errno
import os.path
import shutil
import stat
import subprocess
import sys
import tempfile
import six
from pre_commit import five
from pre_commit import parse_shebang
if sys.version_info >= (3, 7): # pragma: no cover (PY37+)
from importlib.resources import open_binary
from importlib.resources import read_text
else: # pragma: no cover (<PY37)
from importlib_resources import open_binary
from importlib_resources import read_text
def mkdirp(path):
try:
os.makedirs(path)
except OSError:
if not os.path.exists(path):
raise
@contextlib.contextmanager
def clean_path_on_failure(path):
"""Cleans up the directory on an exceptional failure."""
try:
yield
except BaseException:
if os.path.exists(path):
rmtree(path)
raise
@contextlib.contextmanager
def noop_context():
yield
@contextlib.contextmanager
def tmpdir():
"""Contextmanager to create a temporary directory. It will be cleaned up
afterwards.
"""
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
rmtree(tempdir)
def resource_bytesio(filename):
return open_binary('pre_commit.resources', filename)
def resource_text(filename):
return read_text('pre_commit.resources', filename)
def make_executable(filename):
original_mode = os.stat(filename).st_mode
os.chmod(
filename, original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH,
)
class CalledProcessError(RuntimeError):
def __init__(self, returncode, cmd, expected_returncode, output=None):
super(CalledProcessError, self).__init__(
returncode, cmd, expected_returncode, output,
)
self.returncode = returncode
self.cmd = cmd
self.expected_returncode = expected_returncode
self.output = output
def to_bytes(self):
output = []
for maybe_text in self.output:
if maybe_text:
output.append(
b'\n ' +
five.to_bytes(maybe_text).replace(b'\n', b'\n '),
)
else:
output.append(b'(none)')
return b''.join((
five.to_bytes(
'Command: {!r}\n'
'Return code: {}\n'
'Expected return code: {}\n'.format(
self.cmd, self.returncode, self.expected_returncode,
),
),
b'Output: ', output[0], b'\n',
b'Errors: ', output[1], b'\n',
))
def to_text(self):
return self.to_bytes().decode('UTF-8')
if six.PY2: # pragma: no cover (py2)
__str__ = to_bytes
__unicode__ = to_text
else: # pragma: no cover (py3)
__bytes__ = to_bytes
__str__ = to_text
def cmd_output(*cmd, **kwargs):
retcode = kwargs.pop('retcode', 0)
encoding = kwargs.pop('encoding', 'UTF-8')
popen_kwargs = {
'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
# py2/py3 on windows are more strict about the types here
cmd = tuple(five.n(arg) for arg in cmd)
kwargs['env'] = {
five.n(key): five.n(value)
for key, value in kwargs.pop('env', {}).items()
} or None
try:
cmd = parse_shebang.normalize_cmd(cmd)
except parse_shebang.ExecutableNotFoundError as e:
returncode, stdout, stderr = e.to_output()
else:
popen_kwargs.update(kwargs)
proc = subprocess.Popen(cmd, **popen_kwargs)
stdout, stderr = proc.communicate()
returncode = proc.returncode
if encoding is not None and stdout is not None:
stdout = stdout.decode(encoding)
if encoding is not None and stderr is not None:
stderr = stderr.decode(encoding)
if retcode is not None and retcode != returncode:
raise CalledProcessError(
returncode, cmd, retcode, output=(stdout, stderr),
)
return returncode, stdout, stderr
def parse_version(s):
"""poor man's version comparison"""
return tuple(int(p) for p in s.split('.'))
|
pre-commit/pre-commit | pre_commit/commands/autoupdate.py | _update_repo | python | def _update_repo(repo_config, store, tags_only):
repo_path = store.clone(repo_config['repo'], repo_config['rev'])
cmd_output('git', 'fetch', cwd=repo_path)
tag_cmd = ('git', 'describe', 'origin/master', '--tags')
if tags_only:
tag_cmd += ('--abbrev=0',)
else:
tag_cmd += ('--exact',)
try:
rev = cmd_output(*tag_cmd, cwd=repo_path)[1].strip()
except CalledProcessError:
tag_cmd = ('git', 'rev-parse', 'origin/master')
rev = cmd_output(*tag_cmd, cwd=repo_path)[1].strip()
# Don't bother trying to update if our rev is the same
if rev == repo_config['rev']:
return repo_config
try:
path = store.clone(repo_config['repo'], rev)
manifest = load_manifest(os.path.join(path, C.MANIFEST_FILE))
except InvalidManifestError as e:
raise RepositoryCannotBeUpdatedError(six.text_type(e))
# See if any of our hooks were deleted with the new commits
hooks = {hook['id'] for hook in repo_config['hooks']}
hooks_missing = hooks - {hook['id'] for hook in manifest}
if hooks_missing:
raise RepositoryCannotBeUpdatedError(
'Cannot update because the tip of master is missing these hooks:\n'
'{}'.format(', '.join(sorted(hooks_missing))),
)
# Construct a new config with the head rev
new_config = repo_config.copy()
new_config['rev'] = rev
return new_config | Updates a repository to the tip of `master`. If the repository cannot
be updated because a hook that is configured does not exist in `master`,
this raises a RepositoryCannotBeUpdatedError
Args:
repo_config - A config for a repository | train | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/commands/autoupdate.py#L29-L73 | [
"def cmd_output(*cmd, **kwargs):\n retcode = kwargs.pop('retcode', 0)\n encoding = kwargs.pop('encoding', 'UTF-8')\n\n popen_kwargs = {\n 'stdin': subprocess.PIPE,\n 'stdout': subprocess.PIPE,\n 'stderr': subprocess.PIPE,\n }\n\n # py2/py3 on windows are more strict about the typ... | from __future__ import print_function
from __future__ import unicode_literals
import os.path
import re
import six
from aspy.yaml import ordered_dump
from aspy.yaml import ordered_load
from cfgv import remove_defaults
import pre_commit.constants as C
from pre_commit import output
from pre_commit.clientlib import CONFIG_SCHEMA
from pre_commit.clientlib import InvalidManifestError
from pre_commit.clientlib import load_config
from pre_commit.clientlib import load_manifest
from pre_commit.clientlib import LOCAL
from pre_commit.clientlib import META
from pre_commit.commands.migrate_config import migrate_config
from pre_commit.util import CalledProcessError
from pre_commit.util import cmd_output
class RepositoryCannotBeUpdatedError(RuntimeError):
pass
REV_LINE_RE = re.compile(r'^(\s+)rev:(\s*)([^\s#]+)(.*)$', re.DOTALL)
REV_LINE_FMT = '{}rev:{}{}{}'
def _write_new_config_file(path, output):
with open(path) as f:
original_contents = f.read()
output = remove_defaults(output, CONFIG_SCHEMA)
new_contents = ordered_dump(output, **C.YAML_DUMP_KWARGS)
lines = original_contents.splitlines(True)
rev_line_indices_reversed = list(
reversed([
i for i, line in enumerate(lines) if REV_LINE_RE.match(line)
]),
)
for line in new_contents.splitlines(True):
if REV_LINE_RE.match(line):
# It's possible we didn't identify the rev lines in the original
if not rev_line_indices_reversed:
break
line_index = rev_line_indices_reversed.pop()
original_line = lines[line_index]
orig_match = REV_LINE_RE.match(original_line)
new_match = REV_LINE_RE.match(line)
lines[line_index] = REV_LINE_FMT.format(
orig_match.group(1), orig_match.group(2),
new_match.group(3), orig_match.group(4),
)
# If we failed to intelligently rewrite the rev lines, fall back to the
# pretty-formatted yaml output
to_write = ''.join(lines)
if remove_defaults(ordered_load(to_write), CONFIG_SCHEMA) != output:
to_write = new_contents
with open(path, 'w') as f:
f.write(to_write)
def autoupdate(config_file, store, tags_only, repos=()):
"""Auto-update the pre-commit config to the latest versions of repos."""
migrate_config(config_file, quiet=True)
retv = 0
output_repos = []
changed = False
input_config = load_config(config_file)
for repo_config in input_config['repos']:
if (
repo_config['repo'] in {LOCAL, META} or
# Skip updating any repo_configs that aren't for the specified repo
repos and repo_config['repo'] not in repos
):
output_repos.append(repo_config)
continue
output.write('Updating {}...'.format(repo_config['repo']))
try:
new_repo_config = _update_repo(repo_config, store, tags_only)
except RepositoryCannotBeUpdatedError as error:
output.write_line(error.args[0])
output_repos.append(repo_config)
retv = 1
continue
if new_repo_config['rev'] != repo_config['rev']:
changed = True
output.write_line(
'updating {} -> {}.'.format(
repo_config['rev'], new_repo_config['rev'],
),
)
output_repos.append(new_repo_config)
else:
output.write_line('already up to date.')
output_repos.append(repo_config)
if changed:
output_config = input_config.copy()
output_config['repos'] = output_repos
_write_new_config_file(config_file, output_config)
return retv
|
pre-commit/pre-commit | pre_commit/commands/autoupdate.py | autoupdate | python | def autoupdate(config_file, store, tags_only, repos=()):
migrate_config(config_file, quiet=True)
retv = 0
output_repos = []
changed = False
input_config = load_config(config_file)
for repo_config in input_config['repos']:
if (
repo_config['repo'] in {LOCAL, META} or
# Skip updating any repo_configs that aren't for the specified repo
repos and repo_config['repo'] not in repos
):
output_repos.append(repo_config)
continue
output.write('Updating {}...'.format(repo_config['repo']))
try:
new_repo_config = _update_repo(repo_config, store, tags_only)
except RepositoryCannotBeUpdatedError as error:
output.write_line(error.args[0])
output_repos.append(repo_config)
retv = 1
continue
if new_repo_config['rev'] != repo_config['rev']:
changed = True
output.write_line(
'updating {} -> {}.'.format(
repo_config['rev'], new_repo_config['rev'],
),
)
output_repos.append(new_repo_config)
else:
output.write_line('already up to date.')
output_repos.append(repo_config)
if changed:
output_config = input_config.copy()
output_config['repos'] = output_repos
_write_new_config_file(config_file, output_config)
return retv | Auto-update the pre-commit config to the latest versions of repos. | train | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/commands/autoupdate.py#L117-L160 | [
"def write(s, stream=stdout_byte_stream):\n stream.write(five.to_bytes(s))\n stream.flush()\n",
"def migrate_config(config_file, quiet=False):\n with io.open(config_file) as f:\n orig_contents = contents = f.read()\n\n contents = _migrate_map(contents)\n contents = _migrate_sha_to_rev(conten... | from __future__ import print_function
from __future__ import unicode_literals
import os.path
import re
import six
from aspy.yaml import ordered_dump
from aspy.yaml import ordered_load
from cfgv import remove_defaults
import pre_commit.constants as C
from pre_commit import output
from pre_commit.clientlib import CONFIG_SCHEMA
from pre_commit.clientlib import InvalidManifestError
from pre_commit.clientlib import load_config
from pre_commit.clientlib import load_manifest
from pre_commit.clientlib import LOCAL
from pre_commit.clientlib import META
from pre_commit.commands.migrate_config import migrate_config
from pre_commit.util import CalledProcessError
from pre_commit.util import cmd_output
class RepositoryCannotBeUpdatedError(RuntimeError):
pass
def _update_repo(repo_config, store, tags_only):
"""Updates a repository to the tip of `master`. If the repository cannot
be updated because a hook that is configured does not exist in `master`,
this raises a RepositoryCannotBeUpdatedError
Args:
repo_config - A config for a repository
"""
repo_path = store.clone(repo_config['repo'], repo_config['rev'])
cmd_output('git', 'fetch', cwd=repo_path)
tag_cmd = ('git', 'describe', 'origin/master', '--tags')
if tags_only:
tag_cmd += ('--abbrev=0',)
else:
tag_cmd += ('--exact',)
try:
rev = cmd_output(*tag_cmd, cwd=repo_path)[1].strip()
except CalledProcessError:
tag_cmd = ('git', 'rev-parse', 'origin/master')
rev = cmd_output(*tag_cmd, cwd=repo_path)[1].strip()
# Don't bother trying to update if our rev is the same
if rev == repo_config['rev']:
return repo_config
try:
path = store.clone(repo_config['repo'], rev)
manifest = load_manifest(os.path.join(path, C.MANIFEST_FILE))
except InvalidManifestError as e:
raise RepositoryCannotBeUpdatedError(six.text_type(e))
# See if any of our hooks were deleted with the new commits
hooks = {hook['id'] for hook in repo_config['hooks']}
hooks_missing = hooks - {hook['id'] for hook in manifest}
if hooks_missing:
raise RepositoryCannotBeUpdatedError(
'Cannot update because the tip of master is missing these hooks:\n'
'{}'.format(', '.join(sorted(hooks_missing))),
)
# Construct a new config with the head rev
new_config = repo_config.copy()
new_config['rev'] = rev
return new_config
REV_LINE_RE = re.compile(r'^(\s+)rev:(\s*)([^\s#]+)(.*)$', re.DOTALL)
REV_LINE_FMT = '{}rev:{}{}{}'
def _write_new_config_file(path, output):
with open(path) as f:
original_contents = f.read()
output = remove_defaults(output, CONFIG_SCHEMA)
new_contents = ordered_dump(output, **C.YAML_DUMP_KWARGS)
lines = original_contents.splitlines(True)
rev_line_indices_reversed = list(
reversed([
i for i, line in enumerate(lines) if REV_LINE_RE.match(line)
]),
)
for line in new_contents.splitlines(True):
if REV_LINE_RE.match(line):
# It's possible we didn't identify the rev lines in the original
if not rev_line_indices_reversed:
break
line_index = rev_line_indices_reversed.pop()
original_line = lines[line_index]
orig_match = REV_LINE_RE.match(original_line)
new_match = REV_LINE_RE.match(line)
lines[line_index] = REV_LINE_FMT.format(
orig_match.group(1), orig_match.group(2),
new_match.group(3), orig_match.group(4),
)
# If we failed to intelligently rewrite the rev lines, fall back to the
# pretty-formatted yaml output
to_write = ''.join(lines)
if remove_defaults(ordered_load(to_write), CONFIG_SCHEMA) != output:
to_write = new_contents
with open(path, 'w') as f:
f.write(to_write)
|
pre-commit/pre-commit | pre_commit/languages/python_venv.py | orig_py_exe | python | def orig_py_exe(exe): # pragma: no cover (platform specific)
try:
prefix_script = 'import sys; print(sys.real_prefix)'
_, prefix, _ = cmd_output(exe, '-c', prefix_script)
prefix = prefix.strip()
except CalledProcessError:
# not created from -mvirtualenv
return exe
if os.name == 'nt':
expected = os.path.join(prefix, 'python.exe')
else:
expected = os.path.join(prefix, 'bin', os.path.basename(exe))
if os.path.exists(expected):
return expected
else:
return exe | A -mvenv virtualenv made from a -mvirtualenv virtualenv installs
packages to the incorrect location. Attempt to find the _original_ exe
and invoke `-mvenv` from there.
See:
- https://github.com/pre-commit/pre-commit/issues/755
- https://github.com/pypa/virtualenv/issues/1095
- https://bugs.python.org/issue30811 | train | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/languages/python_venv.py#L21-L47 | [
"def cmd_output(*cmd, **kwargs):\n retcode = kwargs.pop('retcode', 0)\n encoding = kwargs.pop('encoding', 'UTF-8')\n\n popen_kwargs = {\n 'stdin': subprocess.PIPE,\n 'stdout': subprocess.PIPE,\n 'stderr': subprocess.PIPE,\n }\n\n # py2/py3 on windows are more strict about the typ... | from __future__ import unicode_literals
import os.path
import sys
from pre_commit.languages import python
from pre_commit.util import CalledProcessError
from pre_commit.util import cmd_output
ENVIRONMENT_DIR = 'py_venv'
def get_default_version(): # pragma: no cover (version specific)
if sys.version_info < (3,):
return 'python3'
else:
return python.get_default_version()
def make_venv(envdir, python):
cmd_output(orig_py_exe(python), '-mvenv', envdir, cwd='/')
_interface = python.py_interface(ENVIRONMENT_DIR, make_venv)
in_env, healthy, run_hook, install_environment = _interface
|
pre-commit/pre-commit | pre_commit/color_windows.py | enable_virtual_terminal_processing | python | def enable_virtual_terminal_processing():
stdout = GetStdHandle(STD_OUTPUT_HANDLE)
flags = GetConsoleMode(stdout)
SetConsoleMode(stdout, flags | ENABLE_VIRTUAL_TERMINAL_PROCESSING) | As of Windows 10, the Windows console supports (some) ANSI escape
sequences, but it needs to be enabled using `SetConsoleMode` first.
More info on the escape sequences supported:
https://msdn.microsoft.com/en-us/library/windows/desktop/mt638032(v=vs.85).aspx | train | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/color_windows.py#L39-L48 | null | from __future__ import absolute_import
from __future__ import unicode_literals
from ctypes import POINTER
from ctypes import windll
from ctypes import WinError
from ctypes import WINFUNCTYPE
from ctypes.wintypes import BOOL
from ctypes.wintypes import DWORD
from ctypes.wintypes import HANDLE
STD_OUTPUT_HANDLE = -11
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
def bool_errcheck(result, func, args):
if not result:
raise WinError()
return args
GetStdHandle = WINFUNCTYPE(HANDLE, DWORD)(
('GetStdHandle', windll.kernel32), ((1, 'nStdHandle'),),
)
GetConsoleMode = WINFUNCTYPE(BOOL, HANDLE, POINTER(DWORD))(
('GetConsoleMode', windll.kernel32),
((1, 'hConsoleHandle'), (2, 'lpMode')),
)
GetConsoleMode.errcheck = bool_errcheck
SetConsoleMode = WINFUNCTYPE(BOOL, HANDLE, DWORD)(
('SetConsoleMode', windll.kernel32),
((1, 'hConsoleHandle'), (1, 'dwMode')),
)
SetConsoleMode.errcheck = bool_errcheck
|
pre-commit/pre-commit | pre_commit/color.py | format_color | python | def format_color(text, color, use_color_setting):
if not use_color_setting:
return text
else:
return '{}{}{}'.format(color, text, NORMAL) | Format text with color.
Args:
text - Text to be formatted with color if `use_color`
color - The color start string
use_color_setting - Whether or not to color | train | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/color.py#L25-L36 | null | from __future__ import unicode_literals
import os
import sys
terminal_supports_color = True
if os.name == 'nt': # pragma: no cover (windows)
from pre_commit.color_windows import enable_virtual_terminal_processing
try:
enable_virtual_terminal_processing()
except WindowsError:
terminal_supports_color = False
RED = '\033[41m'
GREEN = '\033[42m'
YELLOW = '\033[43;30m'
TURQUOISE = '\033[46;30m'
NORMAL = '\033[0m'
class InvalidColorSetting(ValueError):
pass
COLOR_CHOICES = ('auto', 'always', 'never')
def use_color(setting):
"""Choose whether to use color based on the command argument.
Args:
setting - Either `auto`, `always`, or `never`
"""
if setting not in COLOR_CHOICES:
raise InvalidColorSetting(setting)
return (
setting == 'always' or
(setting == 'auto' and sys.stdout.isatty() and terminal_supports_color)
)
|
pre-commit/pre-commit | pre_commit/color.py | use_color | python | def use_color(setting):
if setting not in COLOR_CHOICES:
raise InvalidColorSetting(setting)
return (
setting == 'always' or
(setting == 'auto' and sys.stdout.isatty() and terminal_supports_color)
) | Choose whether to use color based on the command argument.
Args:
setting - Either `auto`, `always`, or `never` | train | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/color.py#L42-L54 | null | from __future__ import unicode_literals
import os
import sys
terminal_supports_color = True
if os.name == 'nt': # pragma: no cover (windows)
from pre_commit.color_windows import enable_virtual_terminal_processing
try:
enable_virtual_terminal_processing()
except WindowsError:
terminal_supports_color = False
RED = '\033[41m'
GREEN = '\033[42m'
YELLOW = '\033[43;30m'
TURQUOISE = '\033[46;30m'
NORMAL = '\033[0m'
class InvalidColorSetting(ValueError):
pass
def format_color(text, color, use_color_setting):
"""Format text with color.
Args:
text - Text to be formatted with color if `use_color`
color - The color start string
use_color_setting - Whether or not to color
"""
if not use_color_setting:
return text
else:
return '{}{}{}'.format(color, text, NORMAL)
COLOR_CHOICES = ('auto', 'always', 'never')
|
pre-commit/pre-commit | pre_commit/store.py | _get_default_directory | python | def _get_default_directory():
return os.environ.get('PRE_COMMIT_HOME') or os.path.join(
os.environ.get('XDG_CACHE_HOME') or os.path.expanduser('~/.cache'),
'pre-commit',
) | Returns the default directory for the Store. This is intentionally
underscored to indicate that `Store.get_default_directory` is the intended
way to get this information. This is also done so
`Store.get_default_directory` can be mocked in tests and
`_get_default_directory` can be tested. | train | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/store.py#L23-L33 | null | from __future__ import unicode_literals
import contextlib
import io
import logging
import os.path
import sqlite3
import tempfile
import pre_commit.constants as C
from pre_commit import file_lock
from pre_commit import git
from pre_commit.util import CalledProcessError
from pre_commit.util import clean_path_on_failure
from pre_commit.util import cmd_output
from pre_commit.util import resource_text
from pre_commit.util import rmtree
logger = logging.getLogger('pre_commit')
class Store(object):
get_default_directory = staticmethod(_get_default_directory)
def __init__(self, directory=None):
self.directory = directory or Store.get_default_directory()
self.db_path = os.path.join(self.directory, 'db.db')
if not os.path.exists(self.directory):
os.makedirs(self.directory)
with io.open(os.path.join(self.directory, 'README'), 'w') as f:
f.write(
'This directory is maintained by the pre-commit project.\n'
'Learn more: https://github.com/pre-commit/pre-commit\n',
)
if os.path.exists(self.db_path):
return
with self.exclusive_lock():
# Another process may have already completed this work
if os.path.exists(self.db_path): # pragma: no cover (race)
return
# To avoid a race where someone ^Cs between db creation and
# execution of the CREATE TABLE statement
fd, tmpfile = tempfile.mkstemp(dir=self.directory)
# We'll be managing this file ourselves
os.close(fd)
with self.connect(db_path=tmpfile) as db:
db.executescript(
'CREATE TABLE repos ('
' repo TEXT NOT NULL,'
' ref TEXT NOT NULL,'
' path TEXT NOT NULL,'
' PRIMARY KEY (repo, ref)'
');',
)
self._create_config_table_if_not_exists(db)
# Atomic file move
os.rename(tmpfile, self.db_path)
@contextlib.contextmanager
def exclusive_lock(self):
def blocked_cb(): # pragma: no cover (tests are single-process)
logger.info('Locking pre-commit directory')
with file_lock.lock(os.path.join(self.directory, '.lock'), blocked_cb):
yield
@contextlib.contextmanager
def connect(self, db_path=None):
db_path = db_path or self.db_path
# sqlite doesn't close its fd with its contextmanager >.<
# contextlib.closing fixes this.
# See: https://stackoverflow.com/a/28032829/812183
with contextlib.closing(sqlite3.connect(db_path)) as db:
# this creates a transaction
with db:
yield db
@classmethod
def db_repo_name(cls, repo, deps):
if deps:
return '{}:{}'.format(repo, ','.join(sorted(deps)))
else:
return repo
def _new_repo(self, repo, ref, deps, make_strategy):
repo = self.db_repo_name(repo, deps)
def _get_result():
# Check if we already exist
with self.connect() as db:
result = db.execute(
'SELECT path FROM repos WHERE repo = ? AND ref = ?',
(repo, ref),
).fetchone()
if result:
return result[0]
result = _get_result()
if result:
return result
with self.exclusive_lock():
# Another process may have already completed this work
result = _get_result()
if result: # pragma: no cover (race)
return result
logger.info('Initializing environment for {}.'.format(repo))
directory = tempfile.mkdtemp(prefix='repo', dir=self.directory)
with clean_path_on_failure(directory):
make_strategy(directory)
# Update our db with the created repo
with self.connect() as db:
db.execute(
'INSERT INTO repos (repo, ref, path) VALUES (?, ?, ?)',
[repo, ref, directory],
)
return directory
def _complete_clone(self, ref, git_cmd):
"""Perform a complete clone of a repository and its submodules """
git_cmd('fetch', 'origin', '--tags')
git_cmd('checkout', ref)
git_cmd('submodule', 'update', '--init', '--recursive')
def _shallow_clone(self, ref, git_cmd): # pragma: windows no cover
"""Perform a shallow clone of a repository and its submodules """
git_config = 'protocol.version=2'
git_cmd('-c', git_config, 'fetch', 'origin', ref, '--depth=1')
git_cmd('checkout', ref)
git_cmd(
'-c', git_config, 'submodule', 'update', '--init',
'--recursive', '--depth=1',
)
def clone(self, repo, ref, deps=()):
"""Clone the given url and checkout the specific ref."""
if os.path.isdir(repo):
repo = os.path.abspath(repo)
def clone_strategy(directory):
env = git.no_git_env()
def _git_cmd(*args):
cmd_output('git', *args, cwd=directory, env=env)
_git_cmd('init', '.')
_git_cmd('remote', 'add', 'origin', repo)
try:
self._shallow_clone(ref, _git_cmd)
except CalledProcessError:
self._complete_clone(ref, _git_cmd)
return self._new_repo(repo, ref, deps, clone_strategy)
LOCAL_RESOURCES = (
'Cargo.toml', 'main.go', 'main.rs', '.npmignore', 'package.json',
'pre_commit_dummy_package.gemspec', 'setup.py',
)
def make_local(self, deps):
def make_local_strategy(directory):
for resource in self.LOCAL_RESOURCES:
contents = resource_text('empty_template_{}'.format(resource))
with io.open(os.path.join(directory, resource), 'w') as f:
f.write(contents)
env = git.no_git_env()
# initialize the git repository so it looks more like cloned repos
def _git_cmd(*args):
cmd_output('git', *args, cwd=directory, env=env)
_git_cmd('init', '.')
_git_cmd('config', 'remote.origin.url', '<<unknown>>')
_git_cmd('add', '.')
git.commit(repo=directory)
return self._new_repo(
'local', C.LOCAL_REPO_VERSION, deps, make_local_strategy,
)
def _create_config_table_if_not_exists(self, db):
db.executescript(
'CREATE TABLE IF NOT EXISTS configs ('
' path TEXT NOT NULL,'
' PRIMARY KEY (path)'
');',
)
def mark_config_used(self, path):
path = os.path.realpath(path)
# don't insert config files that do not exist
if not os.path.exists(path):
return
with self.connect() as db:
# TODO: eventually remove this and only create in _create
self._create_config_table_if_not_exists(db)
db.execute('INSERT OR IGNORE INTO configs VALUES (?)', (path,))
def select_all_configs(self):
with self.connect() as db:
self._create_config_table_if_not_exists(db)
rows = db.execute('SELECT path FROM configs').fetchall()
return [path for path, in rows]
def delete_configs(self, configs):
with self.connect() as db:
rows = [(path,) for path in configs]
db.executemany('DELETE FROM configs WHERE path = ?', rows)
def select_all_repos(self):
with self.connect() as db:
return db.execute('SELECT repo, ref, path from repos').fetchall()
def delete_repo(self, db_repo_name, ref, path):
with self.connect() as db:
db.execute(
'DELETE FROM repos WHERE repo = ? and ref = ?',
(db_repo_name, ref),
)
rmtree(path)
|
pre-commit/pre-commit | pre_commit/store.py | Store._shallow_clone | python | def _shallow_clone(self, ref, git_cmd): # pragma: windows no cover
git_config = 'protocol.version=2'
git_cmd('-c', git_config, 'fetch', 'origin', ref, '--depth=1')
git_cmd('checkout', ref)
git_cmd(
'-c', git_config, 'submodule', 'update', '--init',
'--recursive', '--depth=1',
) | Perform a shallow clone of a repository and its submodules | train | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/store.py#L145-L154 | null | class Store(object):
get_default_directory = staticmethod(_get_default_directory)
def __init__(self, directory=None):
self.directory = directory or Store.get_default_directory()
self.db_path = os.path.join(self.directory, 'db.db')
if not os.path.exists(self.directory):
os.makedirs(self.directory)
with io.open(os.path.join(self.directory, 'README'), 'w') as f:
f.write(
'This directory is maintained by the pre-commit project.\n'
'Learn more: https://github.com/pre-commit/pre-commit\n',
)
if os.path.exists(self.db_path):
return
with self.exclusive_lock():
# Another process may have already completed this work
if os.path.exists(self.db_path): # pragma: no cover (race)
return
# To avoid a race where someone ^Cs between db creation and
# execution of the CREATE TABLE statement
fd, tmpfile = tempfile.mkstemp(dir=self.directory)
# We'll be managing this file ourselves
os.close(fd)
with self.connect(db_path=tmpfile) as db:
db.executescript(
'CREATE TABLE repos ('
' repo TEXT NOT NULL,'
' ref TEXT NOT NULL,'
' path TEXT NOT NULL,'
' PRIMARY KEY (repo, ref)'
');',
)
self._create_config_table_if_not_exists(db)
# Atomic file move
os.rename(tmpfile, self.db_path)
@contextlib.contextmanager
def exclusive_lock(self):
def blocked_cb(): # pragma: no cover (tests are single-process)
logger.info('Locking pre-commit directory')
with file_lock.lock(os.path.join(self.directory, '.lock'), blocked_cb):
yield
@contextlib.contextmanager
def connect(self, db_path=None):
db_path = db_path or self.db_path
# sqlite doesn't close its fd with its contextmanager >.<
# contextlib.closing fixes this.
# See: https://stackoverflow.com/a/28032829/812183
with contextlib.closing(sqlite3.connect(db_path)) as db:
# this creates a transaction
with db:
yield db
@classmethod
def db_repo_name(cls, repo, deps):
if deps:
return '{}:{}'.format(repo, ','.join(sorted(deps)))
else:
return repo
def _new_repo(self, repo, ref, deps, make_strategy):
repo = self.db_repo_name(repo, deps)
def _get_result():
# Check if we already exist
with self.connect() as db:
result = db.execute(
'SELECT path FROM repos WHERE repo = ? AND ref = ?',
(repo, ref),
).fetchone()
if result:
return result[0]
result = _get_result()
if result:
return result
with self.exclusive_lock():
# Another process may have already completed this work
result = _get_result()
if result: # pragma: no cover (race)
return result
logger.info('Initializing environment for {}.'.format(repo))
directory = tempfile.mkdtemp(prefix='repo', dir=self.directory)
with clean_path_on_failure(directory):
make_strategy(directory)
# Update our db with the created repo
with self.connect() as db:
db.execute(
'INSERT INTO repos (repo, ref, path) VALUES (?, ?, ?)',
[repo, ref, directory],
)
return directory
def _complete_clone(self, ref, git_cmd):
"""Perform a complete clone of a repository and its submodules """
git_cmd('fetch', 'origin', '--tags')
git_cmd('checkout', ref)
git_cmd('submodule', 'update', '--init', '--recursive')
def clone(self, repo, ref, deps=()):
"""Clone the given url and checkout the specific ref."""
if os.path.isdir(repo):
repo = os.path.abspath(repo)
def clone_strategy(directory):
env = git.no_git_env()
def _git_cmd(*args):
cmd_output('git', *args, cwd=directory, env=env)
_git_cmd('init', '.')
_git_cmd('remote', 'add', 'origin', repo)
try:
self._shallow_clone(ref, _git_cmd)
except CalledProcessError:
self._complete_clone(ref, _git_cmd)
return self._new_repo(repo, ref, deps, clone_strategy)
LOCAL_RESOURCES = (
'Cargo.toml', 'main.go', 'main.rs', '.npmignore', 'package.json',
'pre_commit_dummy_package.gemspec', 'setup.py',
)
def make_local(self, deps):
def make_local_strategy(directory):
for resource in self.LOCAL_RESOURCES:
contents = resource_text('empty_template_{}'.format(resource))
with io.open(os.path.join(directory, resource), 'w') as f:
f.write(contents)
env = git.no_git_env()
# initialize the git repository so it looks more like cloned repos
def _git_cmd(*args):
cmd_output('git', *args, cwd=directory, env=env)
_git_cmd('init', '.')
_git_cmd('config', 'remote.origin.url', '<<unknown>>')
_git_cmd('add', '.')
git.commit(repo=directory)
return self._new_repo(
'local', C.LOCAL_REPO_VERSION, deps, make_local_strategy,
)
def _create_config_table_if_not_exists(self, db):
db.executescript(
'CREATE TABLE IF NOT EXISTS configs ('
' path TEXT NOT NULL,'
' PRIMARY KEY (path)'
');',
)
def mark_config_used(self, path):
path = os.path.realpath(path)
# don't insert config files that do not exist
if not os.path.exists(path):
return
with self.connect() as db:
# TODO: eventually remove this and only create in _create
self._create_config_table_if_not_exists(db)
db.execute('INSERT OR IGNORE INTO configs VALUES (?)', (path,))
def select_all_configs(self):
with self.connect() as db:
self._create_config_table_if_not_exists(db)
rows = db.execute('SELECT path FROM configs').fetchall()
return [path for path, in rows]
def delete_configs(self, configs):
with self.connect() as db:
rows = [(path,) for path in configs]
db.executemany('DELETE FROM configs WHERE path = ?', rows)
def select_all_repos(self):
with self.connect() as db:
return db.execute('SELECT repo, ref, path from repos').fetchall()
def delete_repo(self, db_repo_name, ref, path):
with self.connect() as db:
db.execute(
'DELETE FROM repos WHERE repo = ? and ref = ?',
(db_repo_name, ref),
)
rmtree(path)
|
pre-commit/pre-commit | pre_commit/store.py | Store.clone | python | def clone(self, repo, ref, deps=()):
if os.path.isdir(repo):
repo = os.path.abspath(repo)
def clone_strategy(directory):
env = git.no_git_env()
def _git_cmd(*args):
cmd_output('git', *args, cwd=directory, env=env)
_git_cmd('init', '.')
_git_cmd('remote', 'add', 'origin', repo)
try:
self._shallow_clone(ref, _git_cmd)
except CalledProcessError:
self._complete_clone(ref, _git_cmd)
return self._new_repo(repo, ref, deps, clone_strategy) | Clone the given url and checkout the specific ref. | train | https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/store.py#L156-L176 | [
"def _new_repo(self, repo, ref, deps, make_strategy):\n repo = self.db_repo_name(repo, deps)\n\n def _get_result():\n # Check if we already exist\n with self.connect() as db:\n result = db.execute(\n 'SELECT path FROM repos WHERE repo = ? AND ref = ?',\n ... | class Store(object):
get_default_directory = staticmethod(_get_default_directory)
def __init__(self, directory=None):
self.directory = directory or Store.get_default_directory()
self.db_path = os.path.join(self.directory, 'db.db')
if not os.path.exists(self.directory):
os.makedirs(self.directory)
with io.open(os.path.join(self.directory, 'README'), 'w') as f:
f.write(
'This directory is maintained by the pre-commit project.\n'
'Learn more: https://github.com/pre-commit/pre-commit\n',
)
if os.path.exists(self.db_path):
return
with self.exclusive_lock():
# Another process may have already completed this work
if os.path.exists(self.db_path): # pragma: no cover (race)
return
# To avoid a race where someone ^Cs between db creation and
# execution of the CREATE TABLE statement
fd, tmpfile = tempfile.mkstemp(dir=self.directory)
# We'll be managing this file ourselves
os.close(fd)
with self.connect(db_path=tmpfile) as db:
db.executescript(
'CREATE TABLE repos ('
' repo TEXT NOT NULL,'
' ref TEXT NOT NULL,'
' path TEXT NOT NULL,'
' PRIMARY KEY (repo, ref)'
');',
)
self._create_config_table_if_not_exists(db)
# Atomic file move
os.rename(tmpfile, self.db_path)
@contextlib.contextmanager
def exclusive_lock(self):
def blocked_cb(): # pragma: no cover (tests are single-process)
logger.info('Locking pre-commit directory')
with file_lock.lock(os.path.join(self.directory, '.lock'), blocked_cb):
yield
@contextlib.contextmanager
def connect(self, db_path=None):
db_path = db_path or self.db_path
# sqlite doesn't close its fd with its contextmanager >.<
# contextlib.closing fixes this.
# See: https://stackoverflow.com/a/28032829/812183
with contextlib.closing(sqlite3.connect(db_path)) as db:
# this creates a transaction
with db:
yield db
@classmethod
def db_repo_name(cls, repo, deps):
if deps:
return '{}:{}'.format(repo, ','.join(sorted(deps)))
else:
return repo
def _new_repo(self, repo, ref, deps, make_strategy):
repo = self.db_repo_name(repo, deps)
def _get_result():
# Check if we already exist
with self.connect() as db:
result = db.execute(
'SELECT path FROM repos WHERE repo = ? AND ref = ?',
(repo, ref),
).fetchone()
if result:
return result[0]
result = _get_result()
if result:
return result
with self.exclusive_lock():
# Another process may have already completed this work
result = _get_result()
if result: # pragma: no cover (race)
return result
logger.info('Initializing environment for {}.'.format(repo))
directory = tempfile.mkdtemp(prefix='repo', dir=self.directory)
with clean_path_on_failure(directory):
make_strategy(directory)
# Update our db with the created repo
with self.connect() as db:
db.execute(
'INSERT INTO repos (repo, ref, path) VALUES (?, ?, ?)',
[repo, ref, directory],
)
return directory
def _complete_clone(self, ref, git_cmd):
"""Perform a complete clone of a repository and its submodules """
git_cmd('fetch', 'origin', '--tags')
git_cmd('checkout', ref)
git_cmd('submodule', 'update', '--init', '--recursive')
def _shallow_clone(self, ref, git_cmd): # pragma: windows no cover
"""Perform a shallow clone of a repository and its submodules """
git_config = 'protocol.version=2'
git_cmd('-c', git_config, 'fetch', 'origin', ref, '--depth=1')
git_cmd('checkout', ref)
git_cmd(
'-c', git_config, 'submodule', 'update', '--init',
'--recursive', '--depth=1',
)
LOCAL_RESOURCES = (
'Cargo.toml', 'main.go', 'main.rs', '.npmignore', 'package.json',
'pre_commit_dummy_package.gemspec', 'setup.py',
)
def make_local(self, deps):
def make_local_strategy(directory):
for resource in self.LOCAL_RESOURCES:
contents = resource_text('empty_template_{}'.format(resource))
with io.open(os.path.join(directory, resource), 'w') as f:
f.write(contents)
env = git.no_git_env()
# initialize the git repository so it looks more like cloned repos
def _git_cmd(*args):
cmd_output('git', *args, cwd=directory, env=env)
_git_cmd('init', '.')
_git_cmd('config', 'remote.origin.url', '<<unknown>>')
_git_cmd('add', '.')
git.commit(repo=directory)
return self._new_repo(
'local', C.LOCAL_REPO_VERSION, deps, make_local_strategy,
)
def _create_config_table_if_not_exists(self, db):
db.executescript(
'CREATE TABLE IF NOT EXISTS configs ('
' path TEXT NOT NULL,'
' PRIMARY KEY (path)'
');',
)
def mark_config_used(self, path):
path = os.path.realpath(path)
# don't insert config files that do not exist
if not os.path.exists(path):
return
with self.connect() as db:
# TODO: eventually remove this and only create in _create
self._create_config_table_if_not_exists(db)
db.execute('INSERT OR IGNORE INTO configs VALUES (?)', (path,))
def select_all_configs(self):
with self.connect() as db:
self._create_config_table_if_not_exists(db)
rows = db.execute('SELECT path FROM configs').fetchall()
return [path for path, in rows]
def delete_configs(self, configs):
with self.connect() as db:
rows = [(path,) for path in configs]
db.executemany('DELETE FROM configs WHERE path = ?', rows)
def select_all_repos(self):
with self.connect() as db:
return db.execute('SELECT repo, ref, path from repos').fetchall()
def delete_repo(self, db_repo_name, ref, path):
with self.connect() as db:
db.execute(
'DELETE FROM repos WHERE repo = ? and ref = ?',
(db_repo_name, ref),
)
rmtree(path)
|
infobloxopen/infoblox-client | infoblox_client/utils.py | generate_duid | python | def generate_duid(mac):
valid = mac and isinstance(mac, six.string_types)
if not valid:
raise ValueError("Invalid argument was passed")
return "00:" + mac[9:] + ":" + mac | DUID is consisted of 10 hex numbers.
0x00 + mac with last 3 hex + mac with 6 hex | train | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/utils.py#L41-L49 | null | # Copyright 2015 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import six
try:
from oslo_log import log as logging
except ImportError: # pragma: no cover
import logging
try:
from oslo_serialization import jsonutils
except ImportError: # pragma: no cover
import json as jsonutils
LOG = logging.getLogger(__name__)
def is_valid_ip(ip):
try:
netaddr.IPAddress(ip)
except netaddr.core.AddrFormatError:
return False
return True
def determine_ip_version(ip_in):
ip_ver = 4
if isinstance(ip_in, (list, tuple)):
ip_in = ip_in[0]
if ip_in:
if isinstance(ip_in, int):
if ip_in == 6:
ip_ver = 6
else:
ip_ver = 4
elif hasattr(ip_in, 'ip_version'):
return ip_in.ip_version
else:
if type(ip_in) is dict:
addr = ip_in['ip_address']
else:
addr = ip_in
try:
ip = netaddr.IPAddress(addr)
except ValueError:
ip = netaddr.IPNetwork(addr)
ip_ver = ip.version
return ip_ver
def safe_json_load(data):
try:
return jsonutils.loads(data)
except ValueError:
LOG.warn("Could not decode reply into json: %s", data)
def try_value_to_bool(value, strict_mode=True):
"""Tries to convert value into boolean.
strict_mode is True:
- Only string representation of str(True) and str(False)
are converted into booleans;
- Otherwise unchanged incoming value is returned;
strict_mode is False:
- Anything that looks like True or False is converted into booleans.
Values accepted as True:
- 'true', 'on', 'yes' (case independent)
Values accepted as False:
- 'false', 'off', 'no' (case independent)
- all other values are returned unchanged
"""
if strict_mode:
true_list = ('True',)
false_list = ('False',)
val = value
else:
true_list = ('true', 'on', 'yes')
false_list = ('false', 'off', 'no')
val = str(value).lower()
if val in true_list:
return True
elif val in false_list:
return False
return value
|
infobloxopen/infoblox-client | infoblox_client/utils.py | try_value_to_bool | python | def try_value_to_bool(value, strict_mode=True):
if strict_mode:
true_list = ('True',)
false_list = ('False',)
val = value
else:
true_list = ('true', 'on', 'yes')
false_list = ('false', 'off', 'no')
val = str(value).lower()
if val in true_list:
return True
elif val in false_list:
return False
return value | Tries to convert value into boolean.
strict_mode is True:
- Only string representation of str(True) and str(False)
are converted into booleans;
- Otherwise unchanged incoming value is returned;
strict_mode is False:
- Anything that looks like True or False is converted into booleans.
Values accepted as True:
- 'true', 'on', 'yes' (case independent)
Values accepted as False:
- 'false', 'off', 'no' (case independent)
- all other values are returned unchanged | train | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/utils.py#L85-L114 | null | # Copyright 2015 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import six
try:
from oslo_log import log as logging
except ImportError: # pragma: no cover
import logging
try:
from oslo_serialization import jsonutils
except ImportError: # pragma: no cover
import json as jsonutils
LOG = logging.getLogger(__name__)
def is_valid_ip(ip):
try:
netaddr.IPAddress(ip)
except netaddr.core.AddrFormatError:
return False
return True
def generate_duid(mac):
"""DUID is consisted of 10 hex numbers.
0x00 + mac with last 3 hex + mac with 6 hex
"""
valid = mac and isinstance(mac, six.string_types)
if not valid:
raise ValueError("Invalid argument was passed")
return "00:" + mac[9:] + ":" + mac
def determine_ip_version(ip_in):
ip_ver = 4
if isinstance(ip_in, (list, tuple)):
ip_in = ip_in[0]
if ip_in:
if isinstance(ip_in, int):
if ip_in == 6:
ip_ver = 6
else:
ip_ver = 4
elif hasattr(ip_in, 'ip_version'):
return ip_in.ip_version
else:
if type(ip_in) is dict:
addr = ip_in['ip_address']
else:
addr = ip_in
try:
ip = netaddr.IPAddress(addr)
except ValueError:
ip = netaddr.IPNetwork(addr)
ip_ver = ip.version
return ip_ver
def safe_json_load(data):
try:
return jsonutils.loads(data)
except ValueError:
LOG.warn("Could not decode reply into json: %s", data)
|
infobloxopen/infoblox-client | infoblox_client/object_manager.py | InfobloxObjectManager.create_network | python | def create_network(self, net_view_name, cidr, nameservers=None,
members=None, gateway_ip=None, dhcp_trel_ip=None,
network_extattrs=None):
ipv4 = ib_utils.determine_ip_version(cidr) == 4
options = []
if nameservers:
options.append(obj.DhcpOption(name='domain-name-servers',
value=",".join(nameservers)))
if ipv4 and gateway_ip:
options.append(obj.DhcpOption(name='routers',
value=gateway_ip))
if ipv4 and dhcp_trel_ip:
options.append(obj.DhcpOption(name='dhcp-server-identifier',
num=54,
value=dhcp_trel_ip))
return obj.Network.create(self.connector,
network_view=net_view_name,
cidr=cidr,
members=members,
options=options,
extattrs=network_extattrs,
check_if_exists=False) | Create NIOS Network and prepare DHCP options.
Some DHCP options are valid for IPv4 only, so just skip processing
them for IPv6 case.
:param net_view_name: network view name
:param cidr: network to allocate, example '172.23.23.0/24'
:param nameservers: list of name servers hosts/ip
:param members: list of objects.AnyMember objects that are expected
to serve dhcp for created network
:param gateway_ip: gateway ip for the network (valid for IPv4 only)
:param dhcp_trel_ip: ip address of dhcp relay (valid for IPv4 only)
:param network_extattrs: extensible attributes for network (instance of
objects.EA)
:returns: created network (instance of objects.Network) | train | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/object_manager.py#L58-L96 | [
"def determine_ip_version(ip_in):\n ip_ver = 4\n if isinstance(ip_in, (list, tuple)):\n ip_in = ip_in[0]\n if ip_in:\n if isinstance(ip_in, int):\n if ip_in == 6:\n ip_ver = 6\n else:\n ip_ver = 4\n elif hasattr(ip_in, 'ip_version'):\... | class InfobloxObjectManager(object):
def __init__(self, connector):
self.connector = connector
def create_network_view(self, network_view, extattrs):
return obj.NetworkView.create(self.connector,
name=network_view,
extattrs=extattrs)
def delete_network_view(self, network_view):
# never delete default network view
if network_view == 'default':
return
nview = obj.NetworkView.search(self.connector,
name=network_view)
if nview:
nview.delete()
def create_dns_view(self, network_view, dns_view):
return obj.DNSView.create(self.connector,
name=dns_view,
network_view=network_view)
def delete_dns_view(self, dns_view):
dns_view = obj.DNSView.search(self.connector,
name=dns_view)
if dns_view:
dns_view.delete()
def get_network(self, network_view, cidr):
return obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
def create_ip_range(self, network_view, start_ip, end_ip, network,
disable, range_extattrs):
"""Creates IPRange or fails if already exists."""
return obj.IPRange.create(self.connector,
network_view=network_view,
start_addr=start_ip,
end_addr=end_ip,
cidr=network,
disable=disable,
extattrs=range_extattrs,
check_if_exists=False)
def delete_ip_range(self, network_view, start_ip, end_ip):
range = obj.IPRange.search(self.connector,
network_view=network_view,
start_addr=start_ip,
end_addr=end_ip)
if range:
range.delete()
def has_networks(self, network_view_name):
networks = obj.Network.search_all(self.connector,
network_view=network_view_name)
return bool(networks)
def network_exists(self, network_view, cidr):
"""Deprecated, use get_network() instead."""
LOG.warning(
"DEPRECATION WARNING! Using network_exists() is deprecated "
"and to be removed in next releases. "
"Use get_network() or objects.Network.search instead")
network = obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
return network is not None
def delete_network(self, network_view, cidr):
network = obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
if network:
network.delete()
def create_network_from_template(self, network_view, cidr, template,
extattrs):
return obj.Network.create(self.connector,
network_view=network_view,
cidr=cidr,
template=template,
extattrs=extattrs,
check_if_exists=False)
def update_network_options(self, ib_network, extattrs=None):
if extattrs:
if ib_network.extattrs:
# Merge EA values as dicts
ea_dict = ib_network.extattrs.ea_dict
ea_dict.update(extattrs.ea_dict)
merged_ea = obj.EA(ea_dict)
ib_network.extattrs = merged_ea
else:
ib_network.extattrs = extattrs
return ib_network.update()
def get_host_record(self, dns_view, ip, network_view=None):
return obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip,
network_view=network_view)
def find_hostname(self, dns_view, hostname, ip, network_view=None):
return obj.HostRecord.search(self.connector,
name=hostname,
view=dns_view,
ip=ip,
network_view=network_view)
def find_host_records_by_mac(self, dns_view, mac, network_view=None):
host_records = []
host_records.extend(obj.HostRecord.search_all(
self.connector, view=dns_view, mac=mac, network_view=network_view))
# Unfortunately WAPI does not support search host records by DUID, so
# search host addresses by duid and then search hosts by name
ipv6_host_addresses = obj.IPv6HostAddress.search_all(
self.connector, duid=mac, network_view=network_view)
ipv6_hosts = []
for addr in ipv6_host_addresses:
hosts = obj.HostRecordV6.search_all(
self.connector, name=addr.host, view=dns_view,
network_view=network_view)
for host in hosts:
if host not in ipv6_hosts:
ipv6_hosts.append(host)
host_records.extend(ipv6_hosts)
return host_records
def create_host_record_for_given_ip(self, dns_view, zone_auth,
hostname, mac, ip, extattrs,
use_dhcp, use_dns=True):
name = '.'.join([hostname, zone_auth])
ip_obj = obj.IP.create(ip=ip, mac=mac, configure_for_dhcp=use_dhcp)
return obj.HostRecord.create(self.connector,
view=dns_view,
name=name,
ip=ip_obj,
configure_for_dns=use_dns,
extattrs=extattrs,
check_if_exists=False)
def create_host_record_from_range(self, dns_view, network_view_name,
zone_auth, hostname, mac, first_ip,
last_ip, extattrs, use_dhcp,
use_dns=True):
name = '.'.join([hostname, zone_auth])
ip_alloc = obj.IPAllocation.next_available_ip_from_range(
network_view_name, first_ip, last_ip)
ip_obj = obj.IP.create(ip=ip_alloc, mac=mac,
configure_for_dhcp=use_dhcp)
return obj.HostRecord.create(self.connector,
view=dns_view,
name=name,
ip=ip_obj,
configure_for_dns=use_dns,
extattrs=extattrs,
check_if_exists=False)
def delete_host_record(self, dns_view, ip_address, network_view=None):
host_record = obj.HostRecord.search(self.connector,
view=dns_view, ip=ip_address,
network_view=network_view)
if host_record:
host_record.delete()
def create_fixed_address_for_given_ip(self, network_view, mac, ip,
extattrs):
return obj.FixedAddress.create(self.connector,
network_view=network_view,
mac=mac,
ip=ip,
extattrs=extattrs,
check_if_exists=False)
def create_fixed_address_from_range(self, network_view, mac, first_ip,
last_ip, extattrs):
ip = obj.IPAllocation.next_available_ip_from_range(
network_view, first_ip, last_ip)
return obj.FixedAddress.create(self.connector,
ip=ip,
mac=mac,
network_view=network_view,
extattrs=extattrs,
check_if_exists=False)
def create_fixed_address_from_cidr(self, netview, mac, cidr, extattrs):
ip = obj.IPAllocation.next_available_ip_from_cidr(netview, cidr)
return obj.FixedAddress.create(self.connector,
network_view=netview,
ip=ip,
mac=mac,
extattrs=extattrs,
check_if_exists=False)
def delete_fixed_address(self, network_view, ip_address):
fixed_address = obj.FixedAddress.search(self.connector,
network_view=network_view,
ip=ip_address)
if fixed_address:
fixed_address.delete()
def get_fixed_addresses_by_mac(self, network_view, mac):
return obj.FixedAddress.search_all(
self.connector, network_view=network_view, mac=mac)
def add_ip_to_record(self, host_record, ip, mac, use_dhcp=True):
ip_obj = obj.IP.create(ip=ip, mac=mac, configure_for_dhcp=use_dhcp)
host_record.ip.append(ip_obj)
return host_record.update()
def add_ip_to_host_record_from_range(self, host_record, network_view,
mac, first_ip, last_ip,
use_dhcp=True):
ip_alloc = obj.IPAllocation.next_available_ip_from_range(
network_view, first_ip, last_ip)
ip_obj = obj.IP.create(ip=ip_alloc, mac=mac,
configure_for_dhcp=use_dhcp)
host_record.ip.append(ip_obj)
return host_record.update()
def delete_ip_from_host_record(self, host_record, ip):
host_record.ip.remove(ip)
return host_record.update()
def has_dns_zones(self, dns_view):
zones = obj.DNSZone.search_all(self.connector, view=dns_view)
return bool(zones)
def create_dns_zone(self, dns_view, dns_zone,
grid_primary=None, grid_secondaries=None,
zone_format=None, ns_group=None, prefix=None,
extattrs=None):
return obj.DNSZone.create(self.connector,
fqdn=dns_zone,
view=dns_view,
extattrs=extattrs,
zone_format=zone_format,
ns_group=ns_group,
prefix=prefix,
grid_primary=grid_primary,
grid_secondaries=grid_secondaries)
def delete_dns_zone(self, dns_view, dns_zone_fqdn):
dns_zone = obj.DNSZone.search(self.connector,
fqdn=dns_zone_fqdn,
view=dns_view)
if dns_zone:
dns_zone.delete()
def update_dns_zone_attrs(self, dns_view, dns_zone_fqdn, extattrs):
if not extattrs:
return
dns_zone = obj.DNSZone.search(self.connector,
fqdn=dns_zone_fqdn,
view=dns_view)
if dns_zone:
dns_zone.extattrs = extattrs
dns_zone.update()
def update_host_record_eas(self, dns_view, ip, extattrs):
host_record = obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip)
if host_record:
host_record.extattrs = extattrs
host_record.update()
def update_fixed_address_eas(self, network_view, ip, extattrs):
fixed_address = obj.FixedAddress.search(self.connector,
network_view=network_view,
ip=ip)
if fixed_address:
fixed_address.extattrs = extattrs
fixed_address.update()
def update_dns_record_eas(self, dns_view, ip, extattrs):
a_record = obj.ARecordBase.search(self.connector,
ip=ip,
view=dns_view)
if a_record:
a_record.extattrs = extattrs
a_record.update()
ptr_record = obj.PtrRecord.search(self.connector,
ip=ip,
view=dns_view)
if ptr_record:
ptr_record.extattrs = extattrs
ptr_record.update()
def bind_name_with_host_record(self, dns_view, ip, name, extattrs,
network_view=None):
host_record = obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip,
network_view=network_view)
if host_record:
host_record.name = name
host_record.extattrs = extattrs
host_record.update()
def bind_name_with_record_a(self, dns_view, ip, name, bind_list,
extattrs):
is_ipv4 = ib_utils.determine_ip_version(ip) == 4
if ((is_ipv4 and 'record:a' in bind_list) or
(not is_ipv4 and 'record:aaaa' in bind_list)):
obj.ARecordBase.create(self.connector,
view=dns_view,
ip=ip,
name=name,
extattrs=extattrs,
update_if_exists=True)
if 'record:ptr' in bind_list:
obj.PtrRecord.create(self.connector,
view=dns_view,
ip=ip,
ptrdname=name,
extattrs=extattrs,
update_if_exists=True)
def unbind_name_from_record_a(self, dns_view, ip, name, unbind_list):
is_ipv4 = ib_utils.determine_ip_version(ip) == 4
if ((is_ipv4 and 'record:a' in unbind_list) or
(not is_ipv4 and 'record:aaaa' in unbind_list)):
a_record = obj.ARecordBase.search(self.connector,
view=dns_view,
ip=ip,
name=name)
if a_record:
self.delete_objects_associated_with_a_record(a_record.name,
a_record.view,
unbind_list)
a_record.delete()
if 'record:ptr' in unbind_list:
ptr_record = obj.PtrRecord.search(self.connector,
view=dns_view,
ip=ip,
ptrdname=name)
if ptr_record:
ptr_record.delete()
def get_member(self, member):
member.fetch()
return member
def get_all_ea_definitions(self):
return obj.EADefinition.search_all(self.connector)
def create_ea_definition(self, ea_def, reraise=False):
try:
return obj.EADefinition.create(self.connector,
check_if_exists=False,
**ea_def)
except ib_ex.InfobloxCannotCreateObject:
LOG.error('Unable to create Extensible Attribute Definition '
'%s' % ea_def)
if reraise:
raise
def create_required_ea_definitions(self, required_ea_defs, reraise=False):
existing_ea_defs = self.get_all_ea_definitions()
missing_ea_defs = []
for req_def in required_ea_defs:
if not [ea_def for ea_def in existing_ea_defs
if ea_def.name == req_def['name']]:
missing_ea_defs.append(req_def)
created_ea_defs = []
for ea_def in missing_ea_defs:
if self.create_ea_definition(ea_def, reraise=reraise):
created_ea_defs.append(ea_def)
return created_ea_defs
def restart_all_services(self, member):
if not member._ref:
member.fetch(only_ref=True)
self.connector.call_func('restartservices', member._ref,
{'restart_option': 'RESTART_IF_NEEDED',
'service_option': 'ALL'})
def delete_objects_associated_with_a_record(self, name, view, delete_list):
"""Deletes records associated with record:a or record:aaaa."""
search_objects = {}
if 'record:cname' in delete_list:
search_objects['record:cname'] = 'canonical'
if 'record:txt' in delete_list:
search_objects['record:txt'] = 'name'
if not search_objects:
return
for obj_type, search_type in search_objects.items():
payload = {'view': view,
search_type: name}
ib_objs = self.connector.get_object(obj_type, payload)
if ib_objs:
for ib_obj in ib_objs:
self.delete_object_by_ref(ib_obj['_ref'])
def delete_all_associated_objects(self, network_view, ip, delete_list):
LOG.warning(
"DEPRECATION WARNING! Using delete_all_associated_objects() "
"is deprecated and to be removed in next releases. "
"Use unbind_name_from_record_a() instead.")
def delete_object_by_ref(self, ref):
try:
self.connector.delete_object(ref)
except ib_ex.InfobloxCannotDeleteObject:
pass
|
infobloxopen/infoblox-client | infoblox_client/object_manager.py | InfobloxObjectManager.create_ip_range | python | def create_ip_range(self, network_view, start_ip, end_ip, network,
disable, range_extattrs):
return obj.IPRange.create(self.connector,
network_view=network_view,
start_addr=start_ip,
end_addr=end_ip,
cidr=network,
disable=disable,
extattrs=range_extattrs,
check_if_exists=False) | Creates IPRange or fails if already exists. | train | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/object_manager.py#L103-L113 | [
"def create(cls, connector, check_if_exists=True,\n update_if_exists=False, **kwargs):\n ib_object, _ = (\n cls.create_check_exists(connector,\n check_if_exists=check_if_exists,\n update_if_exists=update_if_exists,\n ... | class InfobloxObjectManager(object):
def __init__(self, connector):
self.connector = connector
def create_network_view(self, network_view, extattrs):
return obj.NetworkView.create(self.connector,
name=network_view,
extattrs=extattrs)
def delete_network_view(self, network_view):
# never delete default network view
if network_view == 'default':
return
nview = obj.NetworkView.search(self.connector,
name=network_view)
if nview:
nview.delete()
def create_dns_view(self, network_view, dns_view):
return obj.DNSView.create(self.connector,
name=dns_view,
network_view=network_view)
def delete_dns_view(self, dns_view):
dns_view = obj.DNSView.search(self.connector,
name=dns_view)
if dns_view:
dns_view.delete()
def create_network(self, net_view_name, cidr, nameservers=None,
members=None, gateway_ip=None, dhcp_trel_ip=None,
network_extattrs=None):
"""Create NIOS Network and prepare DHCP options.
Some DHCP options are valid for IPv4 only, so just skip processing
them for IPv6 case.
:param net_view_name: network view name
:param cidr: network to allocate, example '172.23.23.0/24'
:param nameservers: list of name servers hosts/ip
:param members: list of objects.AnyMember objects that are expected
to serve dhcp for created network
:param gateway_ip: gateway ip for the network (valid for IPv4 only)
:param dhcp_trel_ip: ip address of dhcp relay (valid for IPv4 only)
:param network_extattrs: extensible attributes for network (instance of
objects.EA)
:returns: created network (instance of objects.Network)
"""
ipv4 = ib_utils.determine_ip_version(cidr) == 4
options = []
if nameservers:
options.append(obj.DhcpOption(name='domain-name-servers',
value=",".join(nameservers)))
if ipv4 and gateway_ip:
options.append(obj.DhcpOption(name='routers',
value=gateway_ip))
if ipv4 and dhcp_trel_ip:
options.append(obj.DhcpOption(name='dhcp-server-identifier',
num=54,
value=dhcp_trel_ip))
return obj.Network.create(self.connector,
network_view=net_view_name,
cidr=cidr,
members=members,
options=options,
extattrs=network_extattrs,
check_if_exists=False)
def get_network(self, network_view, cidr):
return obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
def delete_ip_range(self, network_view, start_ip, end_ip):
range = obj.IPRange.search(self.connector,
network_view=network_view,
start_addr=start_ip,
end_addr=end_ip)
if range:
range.delete()
def has_networks(self, network_view_name):
networks = obj.Network.search_all(self.connector,
network_view=network_view_name)
return bool(networks)
def network_exists(self, network_view, cidr):
"""Deprecated, use get_network() instead."""
LOG.warning(
"DEPRECATION WARNING! Using network_exists() is deprecated "
"and to be removed in next releases. "
"Use get_network() or objects.Network.search instead")
network = obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
return network is not None
def delete_network(self, network_view, cidr):
network = obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
if network:
network.delete()
def create_network_from_template(self, network_view, cidr, template,
extattrs):
return obj.Network.create(self.connector,
network_view=network_view,
cidr=cidr,
template=template,
extattrs=extattrs,
check_if_exists=False)
def update_network_options(self, ib_network, extattrs=None):
if extattrs:
if ib_network.extattrs:
# Merge EA values as dicts
ea_dict = ib_network.extattrs.ea_dict
ea_dict.update(extattrs.ea_dict)
merged_ea = obj.EA(ea_dict)
ib_network.extattrs = merged_ea
else:
ib_network.extattrs = extattrs
return ib_network.update()
def get_host_record(self, dns_view, ip, network_view=None):
return obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip,
network_view=network_view)
def find_hostname(self, dns_view, hostname, ip, network_view=None):
return obj.HostRecord.search(self.connector,
name=hostname,
view=dns_view,
ip=ip,
network_view=network_view)
def find_host_records_by_mac(self, dns_view, mac, network_view=None):
host_records = []
host_records.extend(obj.HostRecord.search_all(
self.connector, view=dns_view, mac=mac, network_view=network_view))
# Unfortunately WAPI does not support search host records by DUID, so
# search host addresses by duid and then search hosts by name
ipv6_host_addresses = obj.IPv6HostAddress.search_all(
self.connector, duid=mac, network_view=network_view)
ipv6_hosts = []
for addr in ipv6_host_addresses:
hosts = obj.HostRecordV6.search_all(
self.connector, name=addr.host, view=dns_view,
network_view=network_view)
for host in hosts:
if host not in ipv6_hosts:
ipv6_hosts.append(host)
host_records.extend(ipv6_hosts)
return host_records
def create_host_record_for_given_ip(self, dns_view, zone_auth,
hostname, mac, ip, extattrs,
use_dhcp, use_dns=True):
name = '.'.join([hostname, zone_auth])
ip_obj = obj.IP.create(ip=ip, mac=mac, configure_for_dhcp=use_dhcp)
return obj.HostRecord.create(self.connector,
view=dns_view,
name=name,
ip=ip_obj,
configure_for_dns=use_dns,
extattrs=extattrs,
check_if_exists=False)
def create_host_record_from_range(self, dns_view, network_view_name,
zone_auth, hostname, mac, first_ip,
last_ip, extattrs, use_dhcp,
use_dns=True):
name = '.'.join([hostname, zone_auth])
ip_alloc = obj.IPAllocation.next_available_ip_from_range(
network_view_name, first_ip, last_ip)
ip_obj = obj.IP.create(ip=ip_alloc, mac=mac,
configure_for_dhcp=use_dhcp)
return obj.HostRecord.create(self.connector,
view=dns_view,
name=name,
ip=ip_obj,
configure_for_dns=use_dns,
extattrs=extattrs,
check_if_exists=False)
def delete_host_record(self, dns_view, ip_address, network_view=None):
host_record = obj.HostRecord.search(self.connector,
view=dns_view, ip=ip_address,
network_view=network_view)
if host_record:
host_record.delete()
def create_fixed_address_for_given_ip(self, network_view, mac, ip,
extattrs):
return obj.FixedAddress.create(self.connector,
network_view=network_view,
mac=mac,
ip=ip,
extattrs=extattrs,
check_if_exists=False)
def create_fixed_address_from_range(self, network_view, mac, first_ip,
last_ip, extattrs):
ip = obj.IPAllocation.next_available_ip_from_range(
network_view, first_ip, last_ip)
return obj.FixedAddress.create(self.connector,
ip=ip,
mac=mac,
network_view=network_view,
extattrs=extattrs,
check_if_exists=False)
def create_fixed_address_from_cidr(self, netview, mac, cidr, extattrs):
ip = obj.IPAllocation.next_available_ip_from_cidr(netview, cidr)
return obj.FixedAddress.create(self.connector,
network_view=netview,
ip=ip,
mac=mac,
extattrs=extattrs,
check_if_exists=False)
def delete_fixed_address(self, network_view, ip_address):
fixed_address = obj.FixedAddress.search(self.connector,
network_view=network_view,
ip=ip_address)
if fixed_address:
fixed_address.delete()
def get_fixed_addresses_by_mac(self, network_view, mac):
return obj.FixedAddress.search_all(
self.connector, network_view=network_view, mac=mac)
def add_ip_to_record(self, host_record, ip, mac, use_dhcp=True):
ip_obj = obj.IP.create(ip=ip, mac=mac, configure_for_dhcp=use_dhcp)
host_record.ip.append(ip_obj)
return host_record.update()
def add_ip_to_host_record_from_range(self, host_record, network_view,
mac, first_ip, last_ip,
use_dhcp=True):
ip_alloc = obj.IPAllocation.next_available_ip_from_range(
network_view, first_ip, last_ip)
ip_obj = obj.IP.create(ip=ip_alloc, mac=mac,
configure_for_dhcp=use_dhcp)
host_record.ip.append(ip_obj)
return host_record.update()
def delete_ip_from_host_record(self, host_record, ip):
host_record.ip.remove(ip)
return host_record.update()
def has_dns_zones(self, dns_view):
zones = obj.DNSZone.search_all(self.connector, view=dns_view)
return bool(zones)
def create_dns_zone(self, dns_view, dns_zone,
grid_primary=None, grid_secondaries=None,
zone_format=None, ns_group=None, prefix=None,
extattrs=None):
return obj.DNSZone.create(self.connector,
fqdn=dns_zone,
view=dns_view,
extattrs=extattrs,
zone_format=zone_format,
ns_group=ns_group,
prefix=prefix,
grid_primary=grid_primary,
grid_secondaries=grid_secondaries)
def delete_dns_zone(self, dns_view, dns_zone_fqdn):
dns_zone = obj.DNSZone.search(self.connector,
fqdn=dns_zone_fqdn,
view=dns_view)
if dns_zone:
dns_zone.delete()
def update_dns_zone_attrs(self, dns_view, dns_zone_fqdn, extattrs):
if not extattrs:
return
dns_zone = obj.DNSZone.search(self.connector,
fqdn=dns_zone_fqdn,
view=dns_view)
if dns_zone:
dns_zone.extattrs = extattrs
dns_zone.update()
def update_host_record_eas(self, dns_view, ip, extattrs):
host_record = obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip)
if host_record:
host_record.extattrs = extattrs
host_record.update()
def update_fixed_address_eas(self, network_view, ip, extattrs):
fixed_address = obj.FixedAddress.search(self.connector,
network_view=network_view,
ip=ip)
if fixed_address:
fixed_address.extattrs = extattrs
fixed_address.update()
def update_dns_record_eas(self, dns_view, ip, extattrs):
a_record = obj.ARecordBase.search(self.connector,
ip=ip,
view=dns_view)
if a_record:
a_record.extattrs = extattrs
a_record.update()
ptr_record = obj.PtrRecord.search(self.connector,
ip=ip,
view=dns_view)
if ptr_record:
ptr_record.extattrs = extattrs
ptr_record.update()
def bind_name_with_host_record(self, dns_view, ip, name, extattrs,
network_view=None):
host_record = obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip,
network_view=network_view)
if host_record:
host_record.name = name
host_record.extattrs = extattrs
host_record.update()
def bind_name_with_record_a(self, dns_view, ip, name, bind_list,
extattrs):
is_ipv4 = ib_utils.determine_ip_version(ip) == 4
if ((is_ipv4 and 'record:a' in bind_list) or
(not is_ipv4 and 'record:aaaa' in bind_list)):
obj.ARecordBase.create(self.connector,
view=dns_view,
ip=ip,
name=name,
extattrs=extattrs,
update_if_exists=True)
if 'record:ptr' in bind_list:
obj.PtrRecord.create(self.connector,
view=dns_view,
ip=ip,
ptrdname=name,
extattrs=extattrs,
update_if_exists=True)
def unbind_name_from_record_a(self, dns_view, ip, name, unbind_list):
is_ipv4 = ib_utils.determine_ip_version(ip) == 4
if ((is_ipv4 and 'record:a' in unbind_list) or
(not is_ipv4 and 'record:aaaa' in unbind_list)):
a_record = obj.ARecordBase.search(self.connector,
view=dns_view,
ip=ip,
name=name)
if a_record:
self.delete_objects_associated_with_a_record(a_record.name,
a_record.view,
unbind_list)
a_record.delete()
if 'record:ptr' in unbind_list:
ptr_record = obj.PtrRecord.search(self.connector,
view=dns_view,
ip=ip,
ptrdname=name)
if ptr_record:
ptr_record.delete()
def get_member(self, member):
member.fetch()
return member
def get_all_ea_definitions(self):
return obj.EADefinition.search_all(self.connector)
def create_ea_definition(self, ea_def, reraise=False):
try:
return obj.EADefinition.create(self.connector,
check_if_exists=False,
**ea_def)
except ib_ex.InfobloxCannotCreateObject:
LOG.error('Unable to create Extensible Attribute Definition '
'%s' % ea_def)
if reraise:
raise
def create_required_ea_definitions(self, required_ea_defs, reraise=False):
existing_ea_defs = self.get_all_ea_definitions()
missing_ea_defs = []
for req_def in required_ea_defs:
if not [ea_def for ea_def in existing_ea_defs
if ea_def.name == req_def['name']]:
missing_ea_defs.append(req_def)
created_ea_defs = []
for ea_def in missing_ea_defs:
if self.create_ea_definition(ea_def, reraise=reraise):
created_ea_defs.append(ea_def)
return created_ea_defs
def restart_all_services(self, member):
if not member._ref:
member.fetch(only_ref=True)
self.connector.call_func('restartservices', member._ref,
{'restart_option': 'RESTART_IF_NEEDED',
'service_option': 'ALL'})
def delete_objects_associated_with_a_record(self, name, view, delete_list):
"""Deletes records associated with record:a or record:aaaa."""
search_objects = {}
if 'record:cname' in delete_list:
search_objects['record:cname'] = 'canonical'
if 'record:txt' in delete_list:
search_objects['record:txt'] = 'name'
if not search_objects:
return
for obj_type, search_type in search_objects.items():
payload = {'view': view,
search_type: name}
ib_objs = self.connector.get_object(obj_type, payload)
if ib_objs:
for ib_obj in ib_objs:
self.delete_object_by_ref(ib_obj['_ref'])
def delete_all_associated_objects(self, network_view, ip, delete_list):
LOG.warning(
"DEPRECATION WARNING! Using delete_all_associated_objects() "
"is deprecated and to be removed in next releases. "
"Use unbind_name_from_record_a() instead.")
def delete_object_by_ref(self, ref):
try:
self.connector.delete_object(ref)
except ib_ex.InfobloxCannotDeleteObject:
pass
|
infobloxopen/infoblox-client | infoblox_client/object_manager.py | InfobloxObjectManager.network_exists | python | def network_exists(self, network_view, cidr):
LOG.warning(
"DEPRECATION WARNING! Using network_exists() is deprecated "
"and to be removed in next releases. "
"Use get_network() or objects.Network.search instead")
network = obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
return network is not None | Deprecated, use get_network() instead. | train | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/object_manager.py#L128-L137 | [
"def search(cls, connector, **kwargs):\n ib_obj, parse_class = cls._search(\n connector, **kwargs)\n if ib_obj:\n return parse_class.from_dict(connector, ib_obj[0])\n"
] | class InfobloxObjectManager(object):
def __init__(self, connector):
self.connector = connector
def create_network_view(self, network_view, extattrs):
return obj.NetworkView.create(self.connector,
name=network_view,
extattrs=extattrs)
def delete_network_view(self, network_view):
# never delete default network view
if network_view == 'default':
return
nview = obj.NetworkView.search(self.connector,
name=network_view)
if nview:
nview.delete()
def create_dns_view(self, network_view, dns_view):
return obj.DNSView.create(self.connector,
name=dns_view,
network_view=network_view)
def delete_dns_view(self, dns_view):
dns_view = obj.DNSView.search(self.connector,
name=dns_view)
if dns_view:
dns_view.delete()
def create_network(self, net_view_name, cidr, nameservers=None,
members=None, gateway_ip=None, dhcp_trel_ip=None,
network_extattrs=None):
"""Create NIOS Network and prepare DHCP options.
Some DHCP options are valid for IPv4 only, so just skip processing
them for IPv6 case.
:param net_view_name: network view name
:param cidr: network to allocate, example '172.23.23.0/24'
:param nameservers: list of name servers hosts/ip
:param members: list of objects.AnyMember objects that are expected
to serve dhcp for created network
:param gateway_ip: gateway ip for the network (valid for IPv4 only)
:param dhcp_trel_ip: ip address of dhcp relay (valid for IPv4 only)
:param network_extattrs: extensible attributes for network (instance of
objects.EA)
:returns: created network (instance of objects.Network)
"""
ipv4 = ib_utils.determine_ip_version(cidr) == 4
options = []
if nameservers:
options.append(obj.DhcpOption(name='domain-name-servers',
value=",".join(nameservers)))
if ipv4 and gateway_ip:
options.append(obj.DhcpOption(name='routers',
value=gateway_ip))
if ipv4 and dhcp_trel_ip:
options.append(obj.DhcpOption(name='dhcp-server-identifier',
num=54,
value=dhcp_trel_ip))
return obj.Network.create(self.connector,
network_view=net_view_name,
cidr=cidr,
members=members,
options=options,
extattrs=network_extattrs,
check_if_exists=False)
def get_network(self, network_view, cidr):
return obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
def create_ip_range(self, network_view, start_ip, end_ip, network,
disable, range_extattrs):
"""Creates IPRange or fails if already exists."""
return obj.IPRange.create(self.connector,
network_view=network_view,
start_addr=start_ip,
end_addr=end_ip,
cidr=network,
disable=disable,
extattrs=range_extattrs,
check_if_exists=False)
def delete_ip_range(self, network_view, start_ip, end_ip):
range = obj.IPRange.search(self.connector,
network_view=network_view,
start_addr=start_ip,
end_addr=end_ip)
if range:
range.delete()
def has_networks(self, network_view_name):
networks = obj.Network.search_all(self.connector,
network_view=network_view_name)
return bool(networks)
def delete_network(self, network_view, cidr):
network = obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
if network:
network.delete()
def create_network_from_template(self, network_view, cidr, template,
extattrs):
return obj.Network.create(self.connector,
network_view=network_view,
cidr=cidr,
template=template,
extattrs=extattrs,
check_if_exists=False)
def update_network_options(self, ib_network, extattrs=None):
if extattrs:
if ib_network.extattrs:
# Merge EA values as dicts
ea_dict = ib_network.extattrs.ea_dict
ea_dict.update(extattrs.ea_dict)
merged_ea = obj.EA(ea_dict)
ib_network.extattrs = merged_ea
else:
ib_network.extattrs = extattrs
return ib_network.update()
def get_host_record(self, dns_view, ip, network_view=None):
return obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip,
network_view=network_view)
def find_hostname(self, dns_view, hostname, ip, network_view=None):
return obj.HostRecord.search(self.connector,
name=hostname,
view=dns_view,
ip=ip,
network_view=network_view)
def find_host_records_by_mac(self, dns_view, mac, network_view=None):
host_records = []
host_records.extend(obj.HostRecord.search_all(
self.connector, view=dns_view, mac=mac, network_view=network_view))
# Unfortunately WAPI does not support search host records by DUID, so
# search host addresses by duid and then search hosts by name
ipv6_host_addresses = obj.IPv6HostAddress.search_all(
self.connector, duid=mac, network_view=network_view)
ipv6_hosts = []
for addr in ipv6_host_addresses:
hosts = obj.HostRecordV6.search_all(
self.connector, name=addr.host, view=dns_view,
network_view=network_view)
for host in hosts:
if host not in ipv6_hosts:
ipv6_hosts.append(host)
host_records.extend(ipv6_hosts)
return host_records
def create_host_record_for_given_ip(self, dns_view, zone_auth,
hostname, mac, ip, extattrs,
use_dhcp, use_dns=True):
name = '.'.join([hostname, zone_auth])
ip_obj = obj.IP.create(ip=ip, mac=mac, configure_for_dhcp=use_dhcp)
return obj.HostRecord.create(self.connector,
view=dns_view,
name=name,
ip=ip_obj,
configure_for_dns=use_dns,
extattrs=extattrs,
check_if_exists=False)
def create_host_record_from_range(self, dns_view, network_view_name,
zone_auth, hostname, mac, first_ip,
last_ip, extattrs, use_dhcp,
use_dns=True):
name = '.'.join([hostname, zone_auth])
ip_alloc = obj.IPAllocation.next_available_ip_from_range(
network_view_name, first_ip, last_ip)
ip_obj = obj.IP.create(ip=ip_alloc, mac=mac,
configure_for_dhcp=use_dhcp)
return obj.HostRecord.create(self.connector,
view=dns_view,
name=name,
ip=ip_obj,
configure_for_dns=use_dns,
extattrs=extattrs,
check_if_exists=False)
def delete_host_record(self, dns_view, ip_address, network_view=None):
host_record = obj.HostRecord.search(self.connector,
view=dns_view, ip=ip_address,
network_view=network_view)
if host_record:
host_record.delete()
def create_fixed_address_for_given_ip(self, network_view, mac, ip,
extattrs):
return obj.FixedAddress.create(self.connector,
network_view=network_view,
mac=mac,
ip=ip,
extattrs=extattrs,
check_if_exists=False)
def create_fixed_address_from_range(self, network_view, mac, first_ip,
last_ip, extattrs):
ip = obj.IPAllocation.next_available_ip_from_range(
network_view, first_ip, last_ip)
return obj.FixedAddress.create(self.connector,
ip=ip,
mac=mac,
network_view=network_view,
extattrs=extattrs,
check_if_exists=False)
def create_fixed_address_from_cidr(self, netview, mac, cidr, extattrs):
ip = obj.IPAllocation.next_available_ip_from_cidr(netview, cidr)
return obj.FixedAddress.create(self.connector,
network_view=netview,
ip=ip,
mac=mac,
extattrs=extattrs,
check_if_exists=False)
def delete_fixed_address(self, network_view, ip_address):
fixed_address = obj.FixedAddress.search(self.connector,
network_view=network_view,
ip=ip_address)
if fixed_address:
fixed_address.delete()
def get_fixed_addresses_by_mac(self, network_view, mac):
return obj.FixedAddress.search_all(
self.connector, network_view=network_view, mac=mac)
def add_ip_to_record(self, host_record, ip, mac, use_dhcp=True):
ip_obj = obj.IP.create(ip=ip, mac=mac, configure_for_dhcp=use_dhcp)
host_record.ip.append(ip_obj)
return host_record.update()
def add_ip_to_host_record_from_range(self, host_record, network_view,
mac, first_ip, last_ip,
use_dhcp=True):
ip_alloc = obj.IPAllocation.next_available_ip_from_range(
network_view, first_ip, last_ip)
ip_obj = obj.IP.create(ip=ip_alloc, mac=mac,
configure_for_dhcp=use_dhcp)
host_record.ip.append(ip_obj)
return host_record.update()
def delete_ip_from_host_record(self, host_record, ip):
host_record.ip.remove(ip)
return host_record.update()
def has_dns_zones(self, dns_view):
zones = obj.DNSZone.search_all(self.connector, view=dns_view)
return bool(zones)
def create_dns_zone(self, dns_view, dns_zone,
grid_primary=None, grid_secondaries=None,
zone_format=None, ns_group=None, prefix=None,
extattrs=None):
return obj.DNSZone.create(self.connector,
fqdn=dns_zone,
view=dns_view,
extattrs=extattrs,
zone_format=zone_format,
ns_group=ns_group,
prefix=prefix,
grid_primary=grid_primary,
grid_secondaries=grid_secondaries)
def delete_dns_zone(self, dns_view, dns_zone_fqdn):
dns_zone = obj.DNSZone.search(self.connector,
fqdn=dns_zone_fqdn,
view=dns_view)
if dns_zone:
dns_zone.delete()
def update_dns_zone_attrs(self, dns_view, dns_zone_fqdn, extattrs):
if not extattrs:
return
dns_zone = obj.DNSZone.search(self.connector,
fqdn=dns_zone_fqdn,
view=dns_view)
if dns_zone:
dns_zone.extattrs = extattrs
dns_zone.update()
def update_host_record_eas(self, dns_view, ip, extattrs):
host_record = obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip)
if host_record:
host_record.extattrs = extattrs
host_record.update()
def update_fixed_address_eas(self, network_view, ip, extattrs):
fixed_address = obj.FixedAddress.search(self.connector,
network_view=network_view,
ip=ip)
if fixed_address:
fixed_address.extattrs = extattrs
fixed_address.update()
def update_dns_record_eas(self, dns_view, ip, extattrs):
a_record = obj.ARecordBase.search(self.connector,
ip=ip,
view=dns_view)
if a_record:
a_record.extattrs = extattrs
a_record.update()
ptr_record = obj.PtrRecord.search(self.connector,
ip=ip,
view=dns_view)
if ptr_record:
ptr_record.extattrs = extattrs
ptr_record.update()
def bind_name_with_host_record(self, dns_view, ip, name, extattrs,
network_view=None):
host_record = obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip,
network_view=network_view)
if host_record:
host_record.name = name
host_record.extattrs = extattrs
host_record.update()
def bind_name_with_record_a(self, dns_view, ip, name, bind_list,
extattrs):
is_ipv4 = ib_utils.determine_ip_version(ip) == 4
if ((is_ipv4 and 'record:a' in bind_list) or
(not is_ipv4 and 'record:aaaa' in bind_list)):
obj.ARecordBase.create(self.connector,
view=dns_view,
ip=ip,
name=name,
extattrs=extattrs,
update_if_exists=True)
if 'record:ptr' in bind_list:
obj.PtrRecord.create(self.connector,
view=dns_view,
ip=ip,
ptrdname=name,
extattrs=extattrs,
update_if_exists=True)
def unbind_name_from_record_a(self, dns_view, ip, name, unbind_list):
is_ipv4 = ib_utils.determine_ip_version(ip) == 4
if ((is_ipv4 and 'record:a' in unbind_list) or
(not is_ipv4 and 'record:aaaa' in unbind_list)):
a_record = obj.ARecordBase.search(self.connector,
view=dns_view,
ip=ip,
name=name)
if a_record:
self.delete_objects_associated_with_a_record(a_record.name,
a_record.view,
unbind_list)
a_record.delete()
if 'record:ptr' in unbind_list:
ptr_record = obj.PtrRecord.search(self.connector,
view=dns_view,
ip=ip,
ptrdname=name)
if ptr_record:
ptr_record.delete()
def get_member(self, member):
member.fetch()
return member
def get_all_ea_definitions(self):
return obj.EADefinition.search_all(self.connector)
def create_ea_definition(self, ea_def, reraise=False):
try:
return obj.EADefinition.create(self.connector,
check_if_exists=False,
**ea_def)
except ib_ex.InfobloxCannotCreateObject:
LOG.error('Unable to create Extensible Attribute Definition '
'%s' % ea_def)
if reraise:
raise
def create_required_ea_definitions(self, required_ea_defs, reraise=False):
existing_ea_defs = self.get_all_ea_definitions()
missing_ea_defs = []
for req_def in required_ea_defs:
if not [ea_def for ea_def in existing_ea_defs
if ea_def.name == req_def['name']]:
missing_ea_defs.append(req_def)
created_ea_defs = []
for ea_def in missing_ea_defs:
if self.create_ea_definition(ea_def, reraise=reraise):
created_ea_defs.append(ea_def)
return created_ea_defs
def restart_all_services(self, member):
if not member._ref:
member.fetch(only_ref=True)
self.connector.call_func('restartservices', member._ref,
{'restart_option': 'RESTART_IF_NEEDED',
'service_option': 'ALL'})
def delete_objects_associated_with_a_record(self, name, view, delete_list):
"""Deletes records associated with record:a or record:aaaa."""
search_objects = {}
if 'record:cname' in delete_list:
search_objects['record:cname'] = 'canonical'
if 'record:txt' in delete_list:
search_objects['record:txt'] = 'name'
if not search_objects:
return
for obj_type, search_type in search_objects.items():
payload = {'view': view,
search_type: name}
ib_objs = self.connector.get_object(obj_type, payload)
if ib_objs:
for ib_obj in ib_objs:
self.delete_object_by_ref(ib_obj['_ref'])
def delete_all_associated_objects(self, network_view, ip, delete_list):
LOG.warning(
"DEPRECATION WARNING! Using delete_all_associated_objects() "
"is deprecated and to be removed in next releases. "
"Use unbind_name_from_record_a() instead.")
def delete_object_by_ref(self, ref):
try:
self.connector.delete_object(ref)
except ib_ex.InfobloxCannotDeleteObject:
pass
|
infobloxopen/infoblox-client | infoblox_client/object_manager.py | InfobloxObjectManager.delete_objects_associated_with_a_record | python | def delete_objects_associated_with_a_record(self, name, view, delete_list):
search_objects = {}
if 'record:cname' in delete_list:
search_objects['record:cname'] = 'canonical'
if 'record:txt' in delete_list:
search_objects['record:txt'] = 'name'
if not search_objects:
return
for obj_type, search_type in search_objects.items():
payload = {'view': view,
search_type: name}
ib_objs = self.connector.get_object(obj_type, payload)
if ib_objs:
for ib_obj in ib_objs:
self.delete_object_by_ref(ib_obj['_ref']) | Deletes records associated with record:a or record:aaaa. | train | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/object_manager.py#L453-L470 | [
"def delete_object_by_ref(self, ref):\n try:\n self.connector.delete_object(ref)\n except ib_ex.InfobloxCannotDeleteObject:\n pass\n"
] | class InfobloxObjectManager(object):
def __init__(self, connector):
self.connector = connector
def create_network_view(self, network_view, extattrs):
return obj.NetworkView.create(self.connector,
name=network_view,
extattrs=extattrs)
def delete_network_view(self, network_view):
# never delete default network view
if network_view == 'default':
return
nview = obj.NetworkView.search(self.connector,
name=network_view)
if nview:
nview.delete()
def create_dns_view(self, network_view, dns_view):
return obj.DNSView.create(self.connector,
name=dns_view,
network_view=network_view)
def delete_dns_view(self, dns_view):
dns_view = obj.DNSView.search(self.connector,
name=dns_view)
if dns_view:
dns_view.delete()
def create_network(self, net_view_name, cidr, nameservers=None,
members=None, gateway_ip=None, dhcp_trel_ip=None,
network_extattrs=None):
"""Create NIOS Network and prepare DHCP options.
Some DHCP options are valid for IPv4 only, so just skip processing
them for IPv6 case.
:param net_view_name: network view name
:param cidr: network to allocate, example '172.23.23.0/24'
:param nameservers: list of name servers hosts/ip
:param members: list of objects.AnyMember objects that are expected
to serve dhcp for created network
:param gateway_ip: gateway ip for the network (valid for IPv4 only)
:param dhcp_trel_ip: ip address of dhcp relay (valid for IPv4 only)
:param network_extattrs: extensible attributes for network (instance of
objects.EA)
:returns: created network (instance of objects.Network)
"""
ipv4 = ib_utils.determine_ip_version(cidr) == 4
options = []
if nameservers:
options.append(obj.DhcpOption(name='domain-name-servers',
value=",".join(nameservers)))
if ipv4 and gateway_ip:
options.append(obj.DhcpOption(name='routers',
value=gateway_ip))
if ipv4 and dhcp_trel_ip:
options.append(obj.DhcpOption(name='dhcp-server-identifier',
num=54,
value=dhcp_trel_ip))
return obj.Network.create(self.connector,
network_view=net_view_name,
cidr=cidr,
members=members,
options=options,
extattrs=network_extattrs,
check_if_exists=False)
def get_network(self, network_view, cidr):
return obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
def create_ip_range(self, network_view, start_ip, end_ip, network,
disable, range_extattrs):
"""Creates IPRange or fails if already exists."""
return obj.IPRange.create(self.connector,
network_view=network_view,
start_addr=start_ip,
end_addr=end_ip,
cidr=network,
disable=disable,
extattrs=range_extattrs,
check_if_exists=False)
def delete_ip_range(self, network_view, start_ip, end_ip):
range = obj.IPRange.search(self.connector,
network_view=network_view,
start_addr=start_ip,
end_addr=end_ip)
if range:
range.delete()
def has_networks(self, network_view_name):
networks = obj.Network.search_all(self.connector,
network_view=network_view_name)
return bool(networks)
def network_exists(self, network_view, cidr):
"""Deprecated, use get_network() instead."""
LOG.warning(
"DEPRECATION WARNING! Using network_exists() is deprecated "
"and to be removed in next releases. "
"Use get_network() or objects.Network.search instead")
network = obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
return network is not None
def delete_network(self, network_view, cidr):
network = obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
if network:
network.delete()
def create_network_from_template(self, network_view, cidr, template,
extattrs):
return obj.Network.create(self.connector,
network_view=network_view,
cidr=cidr,
template=template,
extattrs=extattrs,
check_if_exists=False)
def update_network_options(self, ib_network, extattrs=None):
if extattrs:
if ib_network.extattrs:
# Merge EA values as dicts
ea_dict = ib_network.extattrs.ea_dict
ea_dict.update(extattrs.ea_dict)
merged_ea = obj.EA(ea_dict)
ib_network.extattrs = merged_ea
else:
ib_network.extattrs = extattrs
return ib_network.update()
def get_host_record(self, dns_view, ip, network_view=None):
return obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip,
network_view=network_view)
def find_hostname(self, dns_view, hostname, ip, network_view=None):
return obj.HostRecord.search(self.connector,
name=hostname,
view=dns_view,
ip=ip,
network_view=network_view)
def find_host_records_by_mac(self, dns_view, mac, network_view=None):
host_records = []
host_records.extend(obj.HostRecord.search_all(
self.connector, view=dns_view, mac=mac, network_view=network_view))
# Unfortunately WAPI does not support search host records by DUID, so
# search host addresses by duid and then search hosts by name
ipv6_host_addresses = obj.IPv6HostAddress.search_all(
self.connector, duid=mac, network_view=network_view)
ipv6_hosts = []
for addr in ipv6_host_addresses:
hosts = obj.HostRecordV6.search_all(
self.connector, name=addr.host, view=dns_view,
network_view=network_view)
for host in hosts:
if host not in ipv6_hosts:
ipv6_hosts.append(host)
host_records.extend(ipv6_hosts)
return host_records
def create_host_record_for_given_ip(self, dns_view, zone_auth,
hostname, mac, ip, extattrs,
use_dhcp, use_dns=True):
name = '.'.join([hostname, zone_auth])
ip_obj = obj.IP.create(ip=ip, mac=mac, configure_for_dhcp=use_dhcp)
return obj.HostRecord.create(self.connector,
view=dns_view,
name=name,
ip=ip_obj,
configure_for_dns=use_dns,
extattrs=extattrs,
check_if_exists=False)
def create_host_record_from_range(self, dns_view, network_view_name,
zone_auth, hostname, mac, first_ip,
last_ip, extattrs, use_dhcp,
use_dns=True):
name = '.'.join([hostname, zone_auth])
ip_alloc = obj.IPAllocation.next_available_ip_from_range(
network_view_name, first_ip, last_ip)
ip_obj = obj.IP.create(ip=ip_alloc, mac=mac,
configure_for_dhcp=use_dhcp)
return obj.HostRecord.create(self.connector,
view=dns_view,
name=name,
ip=ip_obj,
configure_for_dns=use_dns,
extattrs=extattrs,
check_if_exists=False)
def delete_host_record(self, dns_view, ip_address, network_view=None):
host_record = obj.HostRecord.search(self.connector,
view=dns_view, ip=ip_address,
network_view=network_view)
if host_record:
host_record.delete()
def create_fixed_address_for_given_ip(self, network_view, mac, ip,
extattrs):
return obj.FixedAddress.create(self.connector,
network_view=network_view,
mac=mac,
ip=ip,
extattrs=extattrs,
check_if_exists=False)
def create_fixed_address_from_range(self, network_view, mac, first_ip,
last_ip, extattrs):
ip = obj.IPAllocation.next_available_ip_from_range(
network_view, first_ip, last_ip)
return obj.FixedAddress.create(self.connector,
ip=ip,
mac=mac,
network_view=network_view,
extattrs=extattrs,
check_if_exists=False)
def create_fixed_address_from_cidr(self, netview, mac, cidr, extattrs):
ip = obj.IPAllocation.next_available_ip_from_cidr(netview, cidr)
return obj.FixedAddress.create(self.connector,
network_view=netview,
ip=ip,
mac=mac,
extattrs=extattrs,
check_if_exists=False)
def delete_fixed_address(self, network_view, ip_address):
fixed_address = obj.FixedAddress.search(self.connector,
network_view=network_view,
ip=ip_address)
if fixed_address:
fixed_address.delete()
def get_fixed_addresses_by_mac(self, network_view, mac):
return obj.FixedAddress.search_all(
self.connector, network_view=network_view, mac=mac)
def add_ip_to_record(self, host_record, ip, mac, use_dhcp=True):
ip_obj = obj.IP.create(ip=ip, mac=mac, configure_for_dhcp=use_dhcp)
host_record.ip.append(ip_obj)
return host_record.update()
def add_ip_to_host_record_from_range(self, host_record, network_view,
mac, first_ip, last_ip,
use_dhcp=True):
ip_alloc = obj.IPAllocation.next_available_ip_from_range(
network_view, first_ip, last_ip)
ip_obj = obj.IP.create(ip=ip_alloc, mac=mac,
configure_for_dhcp=use_dhcp)
host_record.ip.append(ip_obj)
return host_record.update()
def delete_ip_from_host_record(self, host_record, ip):
host_record.ip.remove(ip)
return host_record.update()
def has_dns_zones(self, dns_view):
zones = obj.DNSZone.search_all(self.connector, view=dns_view)
return bool(zones)
def create_dns_zone(self, dns_view, dns_zone,
grid_primary=None, grid_secondaries=None,
zone_format=None, ns_group=None, prefix=None,
extattrs=None):
return obj.DNSZone.create(self.connector,
fqdn=dns_zone,
view=dns_view,
extattrs=extattrs,
zone_format=zone_format,
ns_group=ns_group,
prefix=prefix,
grid_primary=grid_primary,
grid_secondaries=grid_secondaries)
def delete_dns_zone(self, dns_view, dns_zone_fqdn):
dns_zone = obj.DNSZone.search(self.connector,
fqdn=dns_zone_fqdn,
view=dns_view)
if dns_zone:
dns_zone.delete()
def update_dns_zone_attrs(self, dns_view, dns_zone_fqdn, extattrs):
if not extattrs:
return
dns_zone = obj.DNSZone.search(self.connector,
fqdn=dns_zone_fqdn,
view=dns_view)
if dns_zone:
dns_zone.extattrs = extattrs
dns_zone.update()
def update_host_record_eas(self, dns_view, ip, extattrs):
host_record = obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip)
if host_record:
host_record.extattrs = extattrs
host_record.update()
def update_fixed_address_eas(self, network_view, ip, extattrs):
fixed_address = obj.FixedAddress.search(self.connector,
network_view=network_view,
ip=ip)
if fixed_address:
fixed_address.extattrs = extattrs
fixed_address.update()
def update_dns_record_eas(self, dns_view, ip, extattrs):
a_record = obj.ARecordBase.search(self.connector,
ip=ip,
view=dns_view)
if a_record:
a_record.extattrs = extattrs
a_record.update()
ptr_record = obj.PtrRecord.search(self.connector,
ip=ip,
view=dns_view)
if ptr_record:
ptr_record.extattrs = extattrs
ptr_record.update()
def bind_name_with_host_record(self, dns_view, ip, name, extattrs,
network_view=None):
host_record = obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip,
network_view=network_view)
if host_record:
host_record.name = name
host_record.extattrs = extattrs
host_record.update()
def bind_name_with_record_a(self, dns_view, ip, name, bind_list,
extattrs):
is_ipv4 = ib_utils.determine_ip_version(ip) == 4
if ((is_ipv4 and 'record:a' in bind_list) or
(not is_ipv4 and 'record:aaaa' in bind_list)):
obj.ARecordBase.create(self.connector,
view=dns_view,
ip=ip,
name=name,
extattrs=extattrs,
update_if_exists=True)
if 'record:ptr' in bind_list:
obj.PtrRecord.create(self.connector,
view=dns_view,
ip=ip,
ptrdname=name,
extattrs=extattrs,
update_if_exists=True)
def unbind_name_from_record_a(self, dns_view, ip, name, unbind_list):
is_ipv4 = ib_utils.determine_ip_version(ip) == 4
if ((is_ipv4 and 'record:a' in unbind_list) or
(not is_ipv4 and 'record:aaaa' in unbind_list)):
a_record = obj.ARecordBase.search(self.connector,
view=dns_view,
ip=ip,
name=name)
if a_record:
self.delete_objects_associated_with_a_record(a_record.name,
a_record.view,
unbind_list)
a_record.delete()
if 'record:ptr' in unbind_list:
ptr_record = obj.PtrRecord.search(self.connector,
view=dns_view,
ip=ip,
ptrdname=name)
if ptr_record:
ptr_record.delete()
def get_member(self, member):
member.fetch()
return member
def get_all_ea_definitions(self):
return obj.EADefinition.search_all(self.connector)
def create_ea_definition(self, ea_def, reraise=False):
try:
return obj.EADefinition.create(self.connector,
check_if_exists=False,
**ea_def)
except ib_ex.InfobloxCannotCreateObject:
LOG.error('Unable to create Extensible Attribute Definition '
'%s' % ea_def)
if reraise:
raise
def create_required_ea_definitions(self, required_ea_defs, reraise=False):
existing_ea_defs = self.get_all_ea_definitions()
missing_ea_defs = []
for req_def in required_ea_defs:
if not [ea_def for ea_def in existing_ea_defs
if ea_def.name == req_def['name']]:
missing_ea_defs.append(req_def)
created_ea_defs = []
for ea_def in missing_ea_defs:
if self.create_ea_definition(ea_def, reraise=reraise):
created_ea_defs.append(ea_def)
return created_ea_defs
def restart_all_services(self, member):
if not member._ref:
member.fetch(only_ref=True)
self.connector.call_func('restartservices', member._ref,
{'restart_option': 'RESTART_IF_NEEDED',
'service_option': 'ALL'})
def delete_all_associated_objects(self, network_view, ip, delete_list):
LOG.warning(
"DEPRECATION WARNING! Using delete_all_associated_objects() "
"is deprecated and to be removed in next releases. "
"Use unbind_name_from_record_a() instead.")
def delete_object_by_ref(self, ref):
try:
self.connector.delete_object(ref)
except ib_ex.InfobloxCannotDeleteObject:
pass
|
infobloxopen/infoblox-client | infoblox_client/connector.py | Connector._parse_options | python | def _parse_options(self, options):
attributes = ('host', 'wapi_version', 'username', 'password',
'ssl_verify', 'http_request_timeout', 'max_retries',
'http_pool_connections', 'http_pool_maxsize',
'silent_ssl_warnings', 'log_api_calls_as_info',
'max_results', 'paging')
for attr in attributes:
if isinstance(options, dict) and attr in options:
setattr(self, attr, options[attr])
elif hasattr(options, attr):
value = getattr(options, attr)
setattr(self, attr, value)
elif attr in self.DEFAULT_OPTIONS:
setattr(self, attr, self.DEFAULT_OPTIONS[attr])
else:
msg = "WAPI config error. Option %s is not defined" % attr
raise ib_ex.InfobloxConfigException(msg=msg)
for attr in ('host', 'username', 'password'):
if not getattr(self, attr):
msg = "WAPI config error. Option %s can not be blank" % attr
raise ib_ex.InfobloxConfigException(msg=msg)
self.wapi_url = "https://%s/wapi/v%s/" % (self.host,
self.wapi_version)
self.cloud_api_enabled = self.is_cloud_wapi(self.wapi_version) | Copy needed options to self | train | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/connector.py#L89-L115 | [
"def is_cloud_wapi(wapi_version):\n valid = wapi_version and isinstance(wapi_version, six.string_types)\n if not valid:\n raise ValueError(\"Invalid argument was passed\")\n version_match = re.search(r'(\\d+)\\.(\\d+)', wapi_version)\n if version_match:\n if int(version_match.group(1)) >= ... | class Connector(object):
"""Connector stands for interacting with Infoblox NIOS
Defines methods for getting, creating, updating and
removing objects from an Infoblox server instance.
"""
DEFAULT_HEADER = {'Content-type': 'application/json'}
DEFAULT_OPTIONS = {'ssl_verify': False,
'silent_ssl_warnings': False,
'http_request_timeout': 10,
'http_pool_connections': 10,
'http_pool_maxsize': 10,
'max_retries': 3,
'wapi_version': '2.1',
'max_results': None,
'log_api_calls_as_info': False,
'paging': False}
def __init__(self, options):
self._parse_options(options)
self._configure_session()
# urllib has different interface for py27 and py34
try:
self._urlencode = urllib.urlencode
self._quote = urllib.quote
self._urljoin = urlparse.urljoin
except AttributeError:
self._urlencode = urlparse.urlencode
self._quote = urlparse.quote
self._urljoin = urlparse.urljoin
def _configure_session(self):
self.session = requests.Session()
adapter = requests.adapters.HTTPAdapter(
pool_connections=self.http_pool_connections,
pool_maxsize=self.http_pool_maxsize,
max_retries=self.max_retries)
self.session.mount('http://', adapter)
self.session.mount('https://', adapter)
self.session.auth = (self.username, self.password)
self.session.verify = utils.try_value_to_bool(self.ssl_verify,
strict_mode=False)
if self.silent_ssl_warnings:
urllib3.disable_warnings()
def _construct_url(self, relative_path, query_params=None,
extattrs=None, force_proxy=False):
if query_params is None:
query_params = {}
if extattrs is None:
extattrs = {}
if force_proxy:
query_params['_proxy_search'] = 'GM'
if not relative_path or relative_path[0] == '/':
raise ValueError('Path in request must be relative.')
query = ''
if query_params or extattrs:
query = '?'
if extattrs:
attrs_queries = []
for key, value in extattrs.items():
param = "*%s" % key
value = value['value']
if isinstance(value, list):
for item in value:
attrs_queries.append(self._urlencode({param: item}))
else:
attrs_queries.append(self._urlencode({param: value}))
query += '&'.join(attrs_queries)
if query_params:
if len(query) > 1:
query += '&'
query += self._urlencode(query_params)
base_url = self._urljoin(self.wapi_url,
self._quote(relative_path))
return base_url + query
@staticmethod
def _validate_obj_type_or_die(obj_type, obj_type_expected=True):
if not obj_type:
raise ValueError('NIOS object type cannot be empty.')
if obj_type_expected and '/' in obj_type:
raise ValueError('NIOS object type cannot contain slash.')
@staticmethod
def _validate_authorized(response):
if response.status_code == requests.codes.UNAUTHORIZED:
raise ib_ex.InfobloxBadWAPICredential(response='')
@staticmethod
def _build_query_params(payload=None, return_fields=None,
max_results=None, paging=False):
if payload:
query_params = payload
else:
query_params = dict()
if return_fields:
if 'default' in return_fields:
return_fields.remove('default')
query_params['_return_fields+'] = ','.join(return_fields)
else:
query_params['_return_fields'] = ','.join(return_fields)
if max_results:
query_params['_max_results'] = max_results
if paging:
query_params['_paging'] = 1
query_params['_return_as_object'] = 1
return query_params
def _get_request_options(self, data=None):
opts = dict(timeout=self.http_request_timeout,
headers=self.DEFAULT_HEADER,
verify=self.session.verify)
if data:
opts['data'] = jsonutils.dumps(data)
return opts
@staticmethod
def _parse_reply(request):
"""Tries to parse reply from NIOS.
Raises exception with content if reply is not in json format
"""
try:
return jsonutils.loads(request.content)
except ValueError:
raise ib_ex.InfobloxConnectionError(reason=request.content)
def _log_request(self, type, url, opts):
message = ("Sending %s request to %s with parameters %s",
type, url, opts)
if self.log_api_calls_as_info:
LOG.info(*message)
else:
LOG.debug(*message)
@reraise_neutron_exception
def get_object(self, obj_type, payload=None, return_fields=None,
extattrs=None, force_proxy=False, max_results=None,
paging=False):
"""Retrieve a list of Infoblox objects of type 'obj_type'
Some get requests like 'ipv4address' should be always
proxied to GM on Hellfire
If request is cloud and proxy is not forced yet,
then plan to do 2 request:
- the first one is not proxied to GM
- the second is proxied to GM
Args:
obj_type (str): Infoblox object type, e.g. 'network',
'range', etc.
payload (dict): Payload with data to send
return_fields (list): List of fields to be returned
extattrs (dict): List of Extensible Attributes
force_proxy (bool): Set _proxy_search flag
to process requests on GM
max_results (int): Maximum number of objects to be returned.
If set to a negative number the appliance will return an error
when the number of returned objects would exceed the setting.
The default is -1000. If this is set to a positive number,
the results will be truncated when necessary.
paging (bool): Enables paging to wapi calls if paging = True,
it uses _max_results to set paging size of the wapi calls.
If _max_results is negative it will take paging size as 1000.
Returns:
A list of the Infoblox objects requested
Raises:
InfobloxObjectNotFound
"""
self._validate_obj_type_or_die(obj_type, obj_type_expected=False)
# max_results passed to get_object has priority over
# one defined as connector option
if max_results is None and self.max_results:
max_results = self.max_results
if paging is False and self.paging:
paging = self.paging
query_params = self._build_query_params(payload=payload,
return_fields=return_fields,
max_results=max_results,
paging=paging)
# Clear proxy flag if wapi version is too old (non-cloud)
proxy_flag = self.cloud_api_enabled and force_proxy
ib_object = self._handle_get_object(obj_type, query_params, extattrs,
proxy_flag)
if ib_object:
return ib_object
# Do second get call with force_proxy if not done yet
if self.cloud_api_enabled and not force_proxy:
ib_object = self._handle_get_object(obj_type, query_params,
extattrs, proxy_flag=True)
if ib_object:
return ib_object
return None
def _handle_get_object(self, obj_type, query_params, extattrs,
proxy_flag=False):
if '_paging' in query_params:
if not ('_max_results' in query_params):
query_params['_max_results'] = 1000
if query_params['_max_results'] < 0:
# Since pagination is enabled with _max_results < 0,
# set _max_results = 1000.
query_params['_max_results'] = 1000
result = []
while True:
url = self._construct_url(obj_type, query_params, extattrs,
force_proxy=proxy_flag)
resp = self._get_object(obj_type, url)
if not resp:
return None
if not ('next_page_id' in resp):
result.extend(resp['result'])
query_params.pop('_page_id', None)
return result
else:
query_params['_page_id'] = resp['next_page_id']
result.extend(resp['result'])
else:
url = self._construct_url(obj_type, query_params, extattrs,
force_proxy=proxy_flag)
return self._get_object(obj_type, url)
def _get_object(self, obj_type, url):
opts = self._get_request_options()
self._log_request('get', url, opts)
if(self.session.cookies):
# the first 'get' or 'post' action will generate a cookie
# after that, we don't need to re-authenticate
self.session.auth = None
r = self.session.get(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.ok:
LOG.warning("Failed on object search with url %s: %s",
url, r.content)
return None
return self._parse_reply(r)
@reraise_neutron_exception
def create_object(self, obj_type, payload, return_fields=None):
"""Create an Infoblox object of type 'obj_type'
Args:
obj_type (str): Infoblox object type,
e.g. 'network', 'range', etc.
payload (dict): Payload with data to send
return_fields (list): List of fields to be returned
Returns:
The object reference of the newly create object
Raises:
InfobloxException
"""
self._validate_obj_type_or_die(obj_type)
query_params = self._build_query_params(return_fields=return_fields)
url = self._construct_url(obj_type, query_params)
opts = self._get_request_options(data=payload)
self._log_request('post', url, opts)
if(self.session.cookies):
# the first 'get' or 'post' action will generate a cookie
# after that, we don't need to re-authenticate
self.session.auth = None
r = self.session.post(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.CREATED:
response = utils.safe_json_load(r.content)
already_assigned = 'is assigned to another network view'
if response and already_assigned in response.get('text'):
exception = ib_ex.InfobloxMemberAlreadyAssigned
else:
exception = ib_ex.InfobloxCannotCreateObject
raise exception(
response=response,
obj_type=obj_type,
content=r.content,
args=payload,
code=r.status_code)
return self._parse_reply(r)
def _check_service_availability(self, operation, resp, ref):
if resp.status_code == requests.codes.SERVICE_UNAVAILABLE:
raise ib_ex.InfobloxGridTemporaryUnavailable(
response=resp.content,
operation=operation,
ref=ref,
content=resp.content,
code=resp.status_code)
@reraise_neutron_exception
def call_func(self, func_name, ref, payload, return_fields=None):
query_params = self._build_query_params(return_fields=return_fields)
query_params['_function'] = func_name
url = self._construct_url(ref, query_params)
opts = self._get_request_options(data=payload)
self._log_request('post', url, opts)
r = self.session.post(url, **opts)
self._validate_authorized(r)
if r.status_code not in (requests.codes.CREATED,
requests.codes.ok):
self._check_service_availability('call_func', r, ref)
raise ib_ex.InfobloxFuncException(
response=jsonutils.loads(r.content),
ref=ref,
func_name=func_name,
content=r.content,
code=r.status_code)
return self._parse_reply(r)
@reraise_neutron_exception
def update_object(self, ref, payload, return_fields=None):
"""Update an Infoblox object
Args:
ref (str): Infoblox object reference
payload (dict): Payload with data to send
Returns:
The object reference of the updated object
Raises:
InfobloxException
"""
query_params = self._build_query_params(return_fields=return_fields)
opts = self._get_request_options(data=payload)
url = self._construct_url(ref, query_params)
self._log_request('put', url, opts)
r = self.session.put(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.ok:
self._check_service_availability('update', r, ref)
raise ib_ex.InfobloxCannotUpdateObject(
response=jsonutils.loads(r.content),
ref=ref,
content=r.content,
code=r.status_code)
return self._parse_reply(r)
@reraise_neutron_exception
def delete_object(self, ref, delete_arguments=None):
"""Remove an Infoblox object
Args:
ref (str): Object reference
delete_arguments (dict): Extra delete arguments
Returns:
The object reference of the removed object
Raises:
InfobloxException
"""
opts = self._get_request_options()
if not isinstance(delete_arguments, dict):
delete_arguments = {}
url = self._construct_url(ref, query_params=delete_arguments)
self._log_request('delete', url, opts)
r = self.session.delete(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.ok:
self._check_service_availability('delete', r, ref)
raise ib_ex.InfobloxCannotDeleteObject(
response=jsonutils.loads(r.content),
ref=ref,
content=r.content,
code=r.status_code)
return self._parse_reply(r)
@staticmethod
def is_cloud_wapi(wapi_version):
valid = wapi_version and isinstance(wapi_version, six.string_types)
if not valid:
raise ValueError("Invalid argument was passed")
version_match = re.search(r'(\d+)\.(\d+)', wapi_version)
if version_match:
if int(version_match.group(1)) >= \
CLOUD_WAPI_MAJOR_VERSION:
return True
return False
|
infobloxopen/infoblox-client | infoblox_client/connector.py | Connector._parse_reply | python | def _parse_reply(request):
try:
return jsonutils.loads(request.content)
except ValueError:
raise ib_ex.InfobloxConnectionError(reason=request.content) | Tries to parse reply from NIOS.
Raises exception with content if reply is not in json format | train | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/connector.py#L212-L220 | null | class Connector(object):
"""Connector stands for interacting with Infoblox NIOS
Defines methods for getting, creating, updating and
removing objects from an Infoblox server instance.
"""
DEFAULT_HEADER = {'Content-type': 'application/json'}
DEFAULT_OPTIONS = {'ssl_verify': False,
'silent_ssl_warnings': False,
'http_request_timeout': 10,
'http_pool_connections': 10,
'http_pool_maxsize': 10,
'max_retries': 3,
'wapi_version': '2.1',
'max_results': None,
'log_api_calls_as_info': False,
'paging': False}
def __init__(self, options):
self._parse_options(options)
self._configure_session()
# urllib has different interface for py27 and py34
try:
self._urlencode = urllib.urlencode
self._quote = urllib.quote
self._urljoin = urlparse.urljoin
except AttributeError:
self._urlencode = urlparse.urlencode
self._quote = urlparse.quote
self._urljoin = urlparse.urljoin
def _parse_options(self, options):
"""Copy needed options to self"""
attributes = ('host', 'wapi_version', 'username', 'password',
'ssl_verify', 'http_request_timeout', 'max_retries',
'http_pool_connections', 'http_pool_maxsize',
'silent_ssl_warnings', 'log_api_calls_as_info',
'max_results', 'paging')
for attr in attributes:
if isinstance(options, dict) and attr in options:
setattr(self, attr, options[attr])
elif hasattr(options, attr):
value = getattr(options, attr)
setattr(self, attr, value)
elif attr in self.DEFAULT_OPTIONS:
setattr(self, attr, self.DEFAULT_OPTIONS[attr])
else:
msg = "WAPI config error. Option %s is not defined" % attr
raise ib_ex.InfobloxConfigException(msg=msg)
for attr in ('host', 'username', 'password'):
if not getattr(self, attr):
msg = "WAPI config error. Option %s can not be blank" % attr
raise ib_ex.InfobloxConfigException(msg=msg)
self.wapi_url = "https://%s/wapi/v%s/" % (self.host,
self.wapi_version)
self.cloud_api_enabled = self.is_cloud_wapi(self.wapi_version)
def _configure_session(self):
self.session = requests.Session()
adapter = requests.adapters.HTTPAdapter(
pool_connections=self.http_pool_connections,
pool_maxsize=self.http_pool_maxsize,
max_retries=self.max_retries)
self.session.mount('http://', adapter)
self.session.mount('https://', adapter)
self.session.auth = (self.username, self.password)
self.session.verify = utils.try_value_to_bool(self.ssl_verify,
strict_mode=False)
if self.silent_ssl_warnings:
urllib3.disable_warnings()
def _construct_url(self, relative_path, query_params=None,
extattrs=None, force_proxy=False):
if query_params is None:
query_params = {}
if extattrs is None:
extattrs = {}
if force_proxy:
query_params['_proxy_search'] = 'GM'
if not relative_path or relative_path[0] == '/':
raise ValueError('Path in request must be relative.')
query = ''
if query_params or extattrs:
query = '?'
if extattrs:
attrs_queries = []
for key, value in extattrs.items():
param = "*%s" % key
value = value['value']
if isinstance(value, list):
for item in value:
attrs_queries.append(self._urlencode({param: item}))
else:
attrs_queries.append(self._urlencode({param: value}))
query += '&'.join(attrs_queries)
if query_params:
if len(query) > 1:
query += '&'
query += self._urlencode(query_params)
base_url = self._urljoin(self.wapi_url,
self._quote(relative_path))
return base_url + query
@staticmethod
def _validate_obj_type_or_die(obj_type, obj_type_expected=True):
if not obj_type:
raise ValueError('NIOS object type cannot be empty.')
if obj_type_expected and '/' in obj_type:
raise ValueError('NIOS object type cannot contain slash.')
@staticmethod
def _validate_authorized(response):
if response.status_code == requests.codes.UNAUTHORIZED:
raise ib_ex.InfobloxBadWAPICredential(response='')
@staticmethod
def _build_query_params(payload=None, return_fields=None,
max_results=None, paging=False):
if payload:
query_params = payload
else:
query_params = dict()
if return_fields:
if 'default' in return_fields:
return_fields.remove('default')
query_params['_return_fields+'] = ','.join(return_fields)
else:
query_params['_return_fields'] = ','.join(return_fields)
if max_results:
query_params['_max_results'] = max_results
if paging:
query_params['_paging'] = 1
query_params['_return_as_object'] = 1
return query_params
def _get_request_options(self, data=None):
opts = dict(timeout=self.http_request_timeout,
headers=self.DEFAULT_HEADER,
verify=self.session.verify)
if data:
opts['data'] = jsonutils.dumps(data)
return opts
@staticmethod
def _log_request(self, type, url, opts):
message = ("Sending %s request to %s with parameters %s",
type, url, opts)
if self.log_api_calls_as_info:
LOG.info(*message)
else:
LOG.debug(*message)
@reraise_neutron_exception
def get_object(self, obj_type, payload=None, return_fields=None,
extattrs=None, force_proxy=False, max_results=None,
paging=False):
"""Retrieve a list of Infoblox objects of type 'obj_type'
Some get requests like 'ipv4address' should be always
proxied to GM on Hellfire
If request is cloud and proxy is not forced yet,
then plan to do 2 request:
- the first one is not proxied to GM
- the second is proxied to GM
Args:
obj_type (str): Infoblox object type, e.g. 'network',
'range', etc.
payload (dict): Payload with data to send
return_fields (list): List of fields to be returned
extattrs (dict): List of Extensible Attributes
force_proxy (bool): Set _proxy_search flag
to process requests on GM
max_results (int): Maximum number of objects to be returned.
If set to a negative number the appliance will return an error
when the number of returned objects would exceed the setting.
The default is -1000. If this is set to a positive number,
the results will be truncated when necessary.
paging (bool): Enables paging to wapi calls if paging = True,
it uses _max_results to set paging size of the wapi calls.
If _max_results is negative it will take paging size as 1000.
Returns:
A list of the Infoblox objects requested
Raises:
InfobloxObjectNotFound
"""
self._validate_obj_type_or_die(obj_type, obj_type_expected=False)
# max_results passed to get_object has priority over
# one defined as connector option
if max_results is None and self.max_results:
max_results = self.max_results
if paging is False and self.paging:
paging = self.paging
query_params = self._build_query_params(payload=payload,
return_fields=return_fields,
max_results=max_results,
paging=paging)
# Clear proxy flag if wapi version is too old (non-cloud)
proxy_flag = self.cloud_api_enabled and force_proxy
ib_object = self._handle_get_object(obj_type, query_params, extattrs,
proxy_flag)
if ib_object:
return ib_object
# Do second get call with force_proxy if not done yet
if self.cloud_api_enabled and not force_proxy:
ib_object = self._handle_get_object(obj_type, query_params,
extattrs, proxy_flag=True)
if ib_object:
return ib_object
return None
def _handle_get_object(self, obj_type, query_params, extattrs,
proxy_flag=False):
if '_paging' in query_params:
if not ('_max_results' in query_params):
query_params['_max_results'] = 1000
if query_params['_max_results'] < 0:
# Since pagination is enabled with _max_results < 0,
# set _max_results = 1000.
query_params['_max_results'] = 1000
result = []
while True:
url = self._construct_url(obj_type, query_params, extattrs,
force_proxy=proxy_flag)
resp = self._get_object(obj_type, url)
if not resp:
return None
if not ('next_page_id' in resp):
result.extend(resp['result'])
query_params.pop('_page_id', None)
return result
else:
query_params['_page_id'] = resp['next_page_id']
result.extend(resp['result'])
else:
url = self._construct_url(obj_type, query_params, extattrs,
force_proxy=proxy_flag)
return self._get_object(obj_type, url)
def _get_object(self, obj_type, url):
opts = self._get_request_options()
self._log_request('get', url, opts)
if(self.session.cookies):
# the first 'get' or 'post' action will generate a cookie
# after that, we don't need to re-authenticate
self.session.auth = None
r = self.session.get(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.ok:
LOG.warning("Failed on object search with url %s: %s",
url, r.content)
return None
return self._parse_reply(r)
@reraise_neutron_exception
def create_object(self, obj_type, payload, return_fields=None):
"""Create an Infoblox object of type 'obj_type'
Args:
obj_type (str): Infoblox object type,
e.g. 'network', 'range', etc.
payload (dict): Payload with data to send
return_fields (list): List of fields to be returned
Returns:
The object reference of the newly create object
Raises:
InfobloxException
"""
self._validate_obj_type_or_die(obj_type)
query_params = self._build_query_params(return_fields=return_fields)
url = self._construct_url(obj_type, query_params)
opts = self._get_request_options(data=payload)
self._log_request('post', url, opts)
if(self.session.cookies):
# the first 'get' or 'post' action will generate a cookie
# after that, we don't need to re-authenticate
self.session.auth = None
r = self.session.post(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.CREATED:
response = utils.safe_json_load(r.content)
already_assigned = 'is assigned to another network view'
if response and already_assigned in response.get('text'):
exception = ib_ex.InfobloxMemberAlreadyAssigned
else:
exception = ib_ex.InfobloxCannotCreateObject
raise exception(
response=response,
obj_type=obj_type,
content=r.content,
args=payload,
code=r.status_code)
return self._parse_reply(r)
def _check_service_availability(self, operation, resp, ref):
if resp.status_code == requests.codes.SERVICE_UNAVAILABLE:
raise ib_ex.InfobloxGridTemporaryUnavailable(
response=resp.content,
operation=operation,
ref=ref,
content=resp.content,
code=resp.status_code)
@reraise_neutron_exception
def call_func(self, func_name, ref, payload, return_fields=None):
query_params = self._build_query_params(return_fields=return_fields)
query_params['_function'] = func_name
url = self._construct_url(ref, query_params)
opts = self._get_request_options(data=payload)
self._log_request('post', url, opts)
r = self.session.post(url, **opts)
self._validate_authorized(r)
if r.status_code not in (requests.codes.CREATED,
requests.codes.ok):
self._check_service_availability('call_func', r, ref)
raise ib_ex.InfobloxFuncException(
response=jsonutils.loads(r.content),
ref=ref,
func_name=func_name,
content=r.content,
code=r.status_code)
return self._parse_reply(r)
@reraise_neutron_exception
def update_object(self, ref, payload, return_fields=None):
"""Update an Infoblox object
Args:
ref (str): Infoblox object reference
payload (dict): Payload with data to send
Returns:
The object reference of the updated object
Raises:
InfobloxException
"""
query_params = self._build_query_params(return_fields=return_fields)
opts = self._get_request_options(data=payload)
url = self._construct_url(ref, query_params)
self._log_request('put', url, opts)
r = self.session.put(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.ok:
self._check_service_availability('update', r, ref)
raise ib_ex.InfobloxCannotUpdateObject(
response=jsonutils.loads(r.content),
ref=ref,
content=r.content,
code=r.status_code)
return self._parse_reply(r)
@reraise_neutron_exception
def delete_object(self, ref, delete_arguments=None):
"""Remove an Infoblox object
Args:
ref (str): Object reference
delete_arguments (dict): Extra delete arguments
Returns:
The object reference of the removed object
Raises:
InfobloxException
"""
opts = self._get_request_options()
if not isinstance(delete_arguments, dict):
delete_arguments = {}
url = self._construct_url(ref, query_params=delete_arguments)
self._log_request('delete', url, opts)
r = self.session.delete(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.ok:
self._check_service_availability('delete', r, ref)
raise ib_ex.InfobloxCannotDeleteObject(
response=jsonutils.loads(r.content),
ref=ref,
content=r.content,
code=r.status_code)
return self._parse_reply(r)
@staticmethod
def is_cloud_wapi(wapi_version):
valid = wapi_version and isinstance(wapi_version, six.string_types)
if not valid:
raise ValueError("Invalid argument was passed")
version_match = re.search(r'(\d+)\.(\d+)', wapi_version)
if version_match:
if int(version_match.group(1)) >= \
CLOUD_WAPI_MAJOR_VERSION:
return True
return False
|
infobloxopen/infoblox-client | infoblox_client/connector.py | Connector.get_object | python | def get_object(self, obj_type, payload=None, return_fields=None,
extattrs=None, force_proxy=False, max_results=None,
paging=False):
self._validate_obj_type_or_die(obj_type, obj_type_expected=False)
# max_results passed to get_object has priority over
# one defined as connector option
if max_results is None and self.max_results:
max_results = self.max_results
if paging is False and self.paging:
paging = self.paging
query_params = self._build_query_params(payload=payload,
return_fields=return_fields,
max_results=max_results,
paging=paging)
# Clear proxy flag if wapi version is too old (non-cloud)
proxy_flag = self.cloud_api_enabled and force_proxy
ib_object = self._handle_get_object(obj_type, query_params, extattrs,
proxy_flag)
if ib_object:
return ib_object
# Do second get call with force_proxy if not done yet
if self.cloud_api_enabled and not force_proxy:
ib_object = self._handle_get_object(obj_type, query_params,
extattrs, proxy_flag=True)
if ib_object:
return ib_object
return None | Retrieve a list of Infoblox objects of type 'obj_type'
Some get requests like 'ipv4address' should be always
proxied to GM on Hellfire
If request is cloud and proxy is not forced yet,
then plan to do 2 request:
- the first one is not proxied to GM
- the second is proxied to GM
Args:
obj_type (str): Infoblox object type, e.g. 'network',
'range', etc.
payload (dict): Payload with data to send
return_fields (list): List of fields to be returned
extattrs (dict): List of Extensible Attributes
force_proxy (bool): Set _proxy_search flag
to process requests on GM
max_results (int): Maximum number of objects to be returned.
If set to a negative number the appliance will return an error
when the number of returned objects would exceed the setting.
The default is -1000. If this is set to a positive number,
the results will be truncated when necessary.
paging (bool): Enables paging to wapi calls if paging = True,
it uses _max_results to set paging size of the wapi calls.
If _max_results is negative it will take paging size as 1000.
Returns:
A list of the Infoblox objects requested
Raises:
InfobloxObjectNotFound | train | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/connector.py#L231-L293 | [
"def _validate_obj_type_or_die(obj_type, obj_type_expected=True):\n if not obj_type:\n raise ValueError('NIOS object type cannot be empty.')\n if obj_type_expected and '/' in obj_type:\n raise ValueError('NIOS object type cannot contain slash.')\n",
"def _build_query_params(payload=None, retur... | class Connector(object):
"""Connector stands for interacting with Infoblox NIOS
Defines methods for getting, creating, updating and
removing objects from an Infoblox server instance.
"""
DEFAULT_HEADER = {'Content-type': 'application/json'}
DEFAULT_OPTIONS = {'ssl_verify': False,
'silent_ssl_warnings': False,
'http_request_timeout': 10,
'http_pool_connections': 10,
'http_pool_maxsize': 10,
'max_retries': 3,
'wapi_version': '2.1',
'max_results': None,
'log_api_calls_as_info': False,
'paging': False}
def __init__(self, options):
self._parse_options(options)
self._configure_session()
# urllib has different interface for py27 and py34
try:
self._urlencode = urllib.urlencode
self._quote = urllib.quote
self._urljoin = urlparse.urljoin
except AttributeError:
self._urlencode = urlparse.urlencode
self._quote = urlparse.quote
self._urljoin = urlparse.urljoin
def _parse_options(self, options):
"""Copy needed options to self"""
attributes = ('host', 'wapi_version', 'username', 'password',
'ssl_verify', 'http_request_timeout', 'max_retries',
'http_pool_connections', 'http_pool_maxsize',
'silent_ssl_warnings', 'log_api_calls_as_info',
'max_results', 'paging')
for attr in attributes:
if isinstance(options, dict) and attr in options:
setattr(self, attr, options[attr])
elif hasattr(options, attr):
value = getattr(options, attr)
setattr(self, attr, value)
elif attr in self.DEFAULT_OPTIONS:
setattr(self, attr, self.DEFAULT_OPTIONS[attr])
else:
msg = "WAPI config error. Option %s is not defined" % attr
raise ib_ex.InfobloxConfigException(msg=msg)
for attr in ('host', 'username', 'password'):
if not getattr(self, attr):
msg = "WAPI config error. Option %s can not be blank" % attr
raise ib_ex.InfobloxConfigException(msg=msg)
self.wapi_url = "https://%s/wapi/v%s/" % (self.host,
self.wapi_version)
self.cloud_api_enabled = self.is_cloud_wapi(self.wapi_version)
def _configure_session(self):
self.session = requests.Session()
adapter = requests.adapters.HTTPAdapter(
pool_connections=self.http_pool_connections,
pool_maxsize=self.http_pool_maxsize,
max_retries=self.max_retries)
self.session.mount('http://', adapter)
self.session.mount('https://', adapter)
self.session.auth = (self.username, self.password)
self.session.verify = utils.try_value_to_bool(self.ssl_verify,
strict_mode=False)
if self.silent_ssl_warnings:
urllib3.disable_warnings()
def _construct_url(self, relative_path, query_params=None,
extattrs=None, force_proxy=False):
if query_params is None:
query_params = {}
if extattrs is None:
extattrs = {}
if force_proxy:
query_params['_proxy_search'] = 'GM'
if not relative_path or relative_path[0] == '/':
raise ValueError('Path in request must be relative.')
query = ''
if query_params or extattrs:
query = '?'
if extattrs:
attrs_queries = []
for key, value in extattrs.items():
param = "*%s" % key
value = value['value']
if isinstance(value, list):
for item in value:
attrs_queries.append(self._urlencode({param: item}))
else:
attrs_queries.append(self._urlencode({param: value}))
query += '&'.join(attrs_queries)
if query_params:
if len(query) > 1:
query += '&'
query += self._urlencode(query_params)
base_url = self._urljoin(self.wapi_url,
self._quote(relative_path))
return base_url + query
@staticmethod
def _validate_obj_type_or_die(obj_type, obj_type_expected=True):
if not obj_type:
raise ValueError('NIOS object type cannot be empty.')
if obj_type_expected and '/' in obj_type:
raise ValueError('NIOS object type cannot contain slash.')
@staticmethod
def _validate_authorized(response):
if response.status_code == requests.codes.UNAUTHORIZED:
raise ib_ex.InfobloxBadWAPICredential(response='')
@staticmethod
def _build_query_params(payload=None, return_fields=None,
max_results=None, paging=False):
if payload:
query_params = payload
else:
query_params = dict()
if return_fields:
if 'default' in return_fields:
return_fields.remove('default')
query_params['_return_fields+'] = ','.join(return_fields)
else:
query_params['_return_fields'] = ','.join(return_fields)
if max_results:
query_params['_max_results'] = max_results
if paging:
query_params['_paging'] = 1
query_params['_return_as_object'] = 1
return query_params
def _get_request_options(self, data=None):
opts = dict(timeout=self.http_request_timeout,
headers=self.DEFAULT_HEADER,
verify=self.session.verify)
if data:
opts['data'] = jsonutils.dumps(data)
return opts
@staticmethod
def _parse_reply(request):
"""Tries to parse reply from NIOS.
Raises exception with content if reply is not in json format
"""
try:
return jsonutils.loads(request.content)
except ValueError:
raise ib_ex.InfobloxConnectionError(reason=request.content)
def _log_request(self, type, url, opts):
message = ("Sending %s request to %s with parameters %s",
type, url, opts)
if self.log_api_calls_as_info:
LOG.info(*message)
else:
LOG.debug(*message)
@reraise_neutron_exception
def _handle_get_object(self, obj_type, query_params, extattrs,
proxy_flag=False):
if '_paging' in query_params:
if not ('_max_results' in query_params):
query_params['_max_results'] = 1000
if query_params['_max_results'] < 0:
# Since pagination is enabled with _max_results < 0,
# set _max_results = 1000.
query_params['_max_results'] = 1000
result = []
while True:
url = self._construct_url(obj_type, query_params, extattrs,
force_proxy=proxy_flag)
resp = self._get_object(obj_type, url)
if not resp:
return None
if not ('next_page_id' in resp):
result.extend(resp['result'])
query_params.pop('_page_id', None)
return result
else:
query_params['_page_id'] = resp['next_page_id']
result.extend(resp['result'])
else:
url = self._construct_url(obj_type, query_params, extattrs,
force_proxy=proxy_flag)
return self._get_object(obj_type, url)
def _get_object(self, obj_type, url):
opts = self._get_request_options()
self._log_request('get', url, opts)
if(self.session.cookies):
# the first 'get' or 'post' action will generate a cookie
# after that, we don't need to re-authenticate
self.session.auth = None
r = self.session.get(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.ok:
LOG.warning("Failed on object search with url %s: %s",
url, r.content)
return None
return self._parse_reply(r)
@reraise_neutron_exception
def create_object(self, obj_type, payload, return_fields=None):
"""Create an Infoblox object of type 'obj_type'
Args:
obj_type (str): Infoblox object type,
e.g. 'network', 'range', etc.
payload (dict): Payload with data to send
return_fields (list): List of fields to be returned
Returns:
The object reference of the newly create object
Raises:
InfobloxException
"""
self._validate_obj_type_or_die(obj_type)
query_params = self._build_query_params(return_fields=return_fields)
url = self._construct_url(obj_type, query_params)
opts = self._get_request_options(data=payload)
self._log_request('post', url, opts)
if(self.session.cookies):
# the first 'get' or 'post' action will generate a cookie
# after that, we don't need to re-authenticate
self.session.auth = None
r = self.session.post(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.CREATED:
response = utils.safe_json_load(r.content)
already_assigned = 'is assigned to another network view'
if response and already_assigned in response.get('text'):
exception = ib_ex.InfobloxMemberAlreadyAssigned
else:
exception = ib_ex.InfobloxCannotCreateObject
raise exception(
response=response,
obj_type=obj_type,
content=r.content,
args=payload,
code=r.status_code)
return self._parse_reply(r)
def _check_service_availability(self, operation, resp, ref):
if resp.status_code == requests.codes.SERVICE_UNAVAILABLE:
raise ib_ex.InfobloxGridTemporaryUnavailable(
response=resp.content,
operation=operation,
ref=ref,
content=resp.content,
code=resp.status_code)
@reraise_neutron_exception
def call_func(self, func_name, ref, payload, return_fields=None):
query_params = self._build_query_params(return_fields=return_fields)
query_params['_function'] = func_name
url = self._construct_url(ref, query_params)
opts = self._get_request_options(data=payload)
self._log_request('post', url, opts)
r = self.session.post(url, **opts)
self._validate_authorized(r)
if r.status_code not in (requests.codes.CREATED,
requests.codes.ok):
self._check_service_availability('call_func', r, ref)
raise ib_ex.InfobloxFuncException(
response=jsonutils.loads(r.content),
ref=ref,
func_name=func_name,
content=r.content,
code=r.status_code)
return self._parse_reply(r)
@reraise_neutron_exception
def update_object(self, ref, payload, return_fields=None):
"""Update an Infoblox object
Args:
ref (str): Infoblox object reference
payload (dict): Payload with data to send
Returns:
The object reference of the updated object
Raises:
InfobloxException
"""
query_params = self._build_query_params(return_fields=return_fields)
opts = self._get_request_options(data=payload)
url = self._construct_url(ref, query_params)
self._log_request('put', url, opts)
r = self.session.put(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.ok:
self._check_service_availability('update', r, ref)
raise ib_ex.InfobloxCannotUpdateObject(
response=jsonutils.loads(r.content),
ref=ref,
content=r.content,
code=r.status_code)
return self._parse_reply(r)
@reraise_neutron_exception
def delete_object(self, ref, delete_arguments=None):
"""Remove an Infoblox object
Args:
ref (str): Object reference
delete_arguments (dict): Extra delete arguments
Returns:
The object reference of the removed object
Raises:
InfobloxException
"""
opts = self._get_request_options()
if not isinstance(delete_arguments, dict):
delete_arguments = {}
url = self._construct_url(ref, query_params=delete_arguments)
self._log_request('delete', url, opts)
r = self.session.delete(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.ok:
self._check_service_availability('delete', r, ref)
raise ib_ex.InfobloxCannotDeleteObject(
response=jsonutils.loads(r.content),
ref=ref,
content=r.content,
code=r.status_code)
return self._parse_reply(r)
@staticmethod
def is_cloud_wapi(wapi_version):
valid = wapi_version and isinstance(wapi_version, six.string_types)
if not valid:
raise ValueError("Invalid argument was passed")
version_match = re.search(r'(\d+)\.(\d+)', wapi_version)
if version_match:
if int(version_match.group(1)) >= \
CLOUD_WAPI_MAJOR_VERSION:
return True
return False
|
infobloxopen/infoblox-client | infoblox_client/connector.py | Connector.create_object | python | def create_object(self, obj_type, payload, return_fields=None):
self._validate_obj_type_or_die(obj_type)
query_params = self._build_query_params(return_fields=return_fields)
url = self._construct_url(obj_type, query_params)
opts = self._get_request_options(data=payload)
self._log_request('post', url, opts)
if(self.session.cookies):
# the first 'get' or 'post' action will generate a cookie
# after that, we don't need to re-authenticate
self.session.auth = None
r = self.session.post(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.CREATED:
response = utils.safe_json_load(r.content)
already_assigned = 'is assigned to another network view'
if response and already_assigned in response.get('text'):
exception = ib_ex.InfobloxMemberAlreadyAssigned
else:
exception = ib_ex.InfobloxCannotCreateObject
raise exception(
response=response,
obj_type=obj_type,
content=r.content,
args=payload,
code=r.status_code)
return self._parse_reply(r) | Create an Infoblox object of type 'obj_type'
Args:
obj_type (str): Infoblox object type,
e.g. 'network', 'range', etc.
payload (dict): Payload with data to send
return_fields (list): List of fields to be returned
Returns:
The object reference of the newly create object
Raises:
InfobloxException | train | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/connector.py#L345-L387 | [
"def safe_json_load(data):\n try:\n return jsonutils.loads(data)\n except ValueError:\n LOG.warn(\"Could not decode reply into json: %s\", data)\n",
"def _construct_url(self, relative_path, query_params=None,\n extattrs=None, force_proxy=False):\n if query_params is None:\... | class Connector(object):
"""Connector stands for interacting with Infoblox NIOS
Defines methods for getting, creating, updating and
removing objects from an Infoblox server instance.
"""
DEFAULT_HEADER = {'Content-type': 'application/json'}
DEFAULT_OPTIONS = {'ssl_verify': False,
'silent_ssl_warnings': False,
'http_request_timeout': 10,
'http_pool_connections': 10,
'http_pool_maxsize': 10,
'max_retries': 3,
'wapi_version': '2.1',
'max_results': None,
'log_api_calls_as_info': False,
'paging': False}
def __init__(self, options):
self._parse_options(options)
self._configure_session()
# urllib has different interface for py27 and py34
try:
self._urlencode = urllib.urlencode
self._quote = urllib.quote
self._urljoin = urlparse.urljoin
except AttributeError:
self._urlencode = urlparse.urlencode
self._quote = urlparse.quote
self._urljoin = urlparse.urljoin
def _parse_options(self, options):
"""Copy needed options to self"""
attributes = ('host', 'wapi_version', 'username', 'password',
'ssl_verify', 'http_request_timeout', 'max_retries',
'http_pool_connections', 'http_pool_maxsize',
'silent_ssl_warnings', 'log_api_calls_as_info',
'max_results', 'paging')
for attr in attributes:
if isinstance(options, dict) and attr in options:
setattr(self, attr, options[attr])
elif hasattr(options, attr):
value = getattr(options, attr)
setattr(self, attr, value)
elif attr in self.DEFAULT_OPTIONS:
setattr(self, attr, self.DEFAULT_OPTIONS[attr])
else:
msg = "WAPI config error. Option %s is not defined" % attr
raise ib_ex.InfobloxConfigException(msg=msg)
for attr in ('host', 'username', 'password'):
if not getattr(self, attr):
msg = "WAPI config error. Option %s can not be blank" % attr
raise ib_ex.InfobloxConfigException(msg=msg)
self.wapi_url = "https://%s/wapi/v%s/" % (self.host,
self.wapi_version)
self.cloud_api_enabled = self.is_cloud_wapi(self.wapi_version)
def _configure_session(self):
self.session = requests.Session()
adapter = requests.adapters.HTTPAdapter(
pool_connections=self.http_pool_connections,
pool_maxsize=self.http_pool_maxsize,
max_retries=self.max_retries)
self.session.mount('http://', adapter)
self.session.mount('https://', adapter)
self.session.auth = (self.username, self.password)
self.session.verify = utils.try_value_to_bool(self.ssl_verify,
strict_mode=False)
if self.silent_ssl_warnings:
urllib3.disable_warnings()
def _construct_url(self, relative_path, query_params=None,
extattrs=None, force_proxy=False):
if query_params is None:
query_params = {}
if extattrs is None:
extattrs = {}
if force_proxy:
query_params['_proxy_search'] = 'GM'
if not relative_path or relative_path[0] == '/':
raise ValueError('Path in request must be relative.')
query = ''
if query_params or extattrs:
query = '?'
if extattrs:
attrs_queries = []
for key, value in extattrs.items():
param = "*%s" % key
value = value['value']
if isinstance(value, list):
for item in value:
attrs_queries.append(self._urlencode({param: item}))
else:
attrs_queries.append(self._urlencode({param: value}))
query += '&'.join(attrs_queries)
if query_params:
if len(query) > 1:
query += '&'
query += self._urlencode(query_params)
base_url = self._urljoin(self.wapi_url,
self._quote(relative_path))
return base_url + query
@staticmethod
def _validate_obj_type_or_die(obj_type, obj_type_expected=True):
if not obj_type:
raise ValueError('NIOS object type cannot be empty.')
if obj_type_expected and '/' in obj_type:
raise ValueError('NIOS object type cannot contain slash.')
@staticmethod
def _validate_authorized(response):
if response.status_code == requests.codes.UNAUTHORIZED:
raise ib_ex.InfobloxBadWAPICredential(response='')
@staticmethod
def _build_query_params(payload=None, return_fields=None,
max_results=None, paging=False):
if payload:
query_params = payload
else:
query_params = dict()
if return_fields:
if 'default' in return_fields:
return_fields.remove('default')
query_params['_return_fields+'] = ','.join(return_fields)
else:
query_params['_return_fields'] = ','.join(return_fields)
if max_results:
query_params['_max_results'] = max_results
if paging:
query_params['_paging'] = 1
query_params['_return_as_object'] = 1
return query_params
def _get_request_options(self, data=None):
opts = dict(timeout=self.http_request_timeout,
headers=self.DEFAULT_HEADER,
verify=self.session.verify)
if data:
opts['data'] = jsonutils.dumps(data)
return opts
@staticmethod
def _parse_reply(request):
"""Tries to parse reply from NIOS.
Raises exception with content if reply is not in json format
"""
try:
return jsonutils.loads(request.content)
except ValueError:
raise ib_ex.InfobloxConnectionError(reason=request.content)
def _log_request(self, type, url, opts):
message = ("Sending %s request to %s with parameters %s",
type, url, opts)
if self.log_api_calls_as_info:
LOG.info(*message)
else:
LOG.debug(*message)
@reraise_neutron_exception
def get_object(self, obj_type, payload=None, return_fields=None,
extattrs=None, force_proxy=False, max_results=None,
paging=False):
"""Retrieve a list of Infoblox objects of type 'obj_type'
Some get requests like 'ipv4address' should be always
proxied to GM on Hellfire
If request is cloud and proxy is not forced yet,
then plan to do 2 request:
- the first one is not proxied to GM
- the second is proxied to GM
Args:
obj_type (str): Infoblox object type, e.g. 'network',
'range', etc.
payload (dict): Payload with data to send
return_fields (list): List of fields to be returned
extattrs (dict): List of Extensible Attributes
force_proxy (bool): Set _proxy_search flag
to process requests on GM
max_results (int): Maximum number of objects to be returned.
If set to a negative number the appliance will return an error
when the number of returned objects would exceed the setting.
The default is -1000. If this is set to a positive number,
the results will be truncated when necessary.
paging (bool): Enables paging to wapi calls if paging = True,
it uses _max_results to set paging size of the wapi calls.
If _max_results is negative it will take paging size as 1000.
Returns:
A list of the Infoblox objects requested
Raises:
InfobloxObjectNotFound
"""
self._validate_obj_type_or_die(obj_type, obj_type_expected=False)
# max_results passed to get_object has priority over
# one defined as connector option
if max_results is None and self.max_results:
max_results = self.max_results
if paging is False and self.paging:
paging = self.paging
query_params = self._build_query_params(payload=payload,
return_fields=return_fields,
max_results=max_results,
paging=paging)
# Clear proxy flag if wapi version is too old (non-cloud)
proxy_flag = self.cloud_api_enabled and force_proxy
ib_object = self._handle_get_object(obj_type, query_params, extattrs,
proxy_flag)
if ib_object:
return ib_object
# Do second get call with force_proxy if not done yet
if self.cloud_api_enabled and not force_proxy:
ib_object = self._handle_get_object(obj_type, query_params,
extattrs, proxy_flag=True)
if ib_object:
return ib_object
return None
def _handle_get_object(self, obj_type, query_params, extattrs,
proxy_flag=False):
if '_paging' in query_params:
if not ('_max_results' in query_params):
query_params['_max_results'] = 1000
if query_params['_max_results'] < 0:
# Since pagination is enabled with _max_results < 0,
# set _max_results = 1000.
query_params['_max_results'] = 1000
result = []
while True:
url = self._construct_url(obj_type, query_params, extattrs,
force_proxy=proxy_flag)
resp = self._get_object(obj_type, url)
if not resp:
return None
if not ('next_page_id' in resp):
result.extend(resp['result'])
query_params.pop('_page_id', None)
return result
else:
query_params['_page_id'] = resp['next_page_id']
result.extend(resp['result'])
else:
url = self._construct_url(obj_type, query_params, extattrs,
force_proxy=proxy_flag)
return self._get_object(obj_type, url)
def _get_object(self, obj_type, url):
opts = self._get_request_options()
self._log_request('get', url, opts)
if(self.session.cookies):
# the first 'get' or 'post' action will generate a cookie
# after that, we don't need to re-authenticate
self.session.auth = None
r = self.session.get(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.ok:
LOG.warning("Failed on object search with url %s: %s",
url, r.content)
return None
return self._parse_reply(r)
@reraise_neutron_exception
def _check_service_availability(self, operation, resp, ref):
if resp.status_code == requests.codes.SERVICE_UNAVAILABLE:
raise ib_ex.InfobloxGridTemporaryUnavailable(
response=resp.content,
operation=operation,
ref=ref,
content=resp.content,
code=resp.status_code)
@reraise_neutron_exception
def call_func(self, func_name, ref, payload, return_fields=None):
query_params = self._build_query_params(return_fields=return_fields)
query_params['_function'] = func_name
url = self._construct_url(ref, query_params)
opts = self._get_request_options(data=payload)
self._log_request('post', url, opts)
r = self.session.post(url, **opts)
self._validate_authorized(r)
if r.status_code not in (requests.codes.CREATED,
requests.codes.ok):
self._check_service_availability('call_func', r, ref)
raise ib_ex.InfobloxFuncException(
response=jsonutils.loads(r.content),
ref=ref,
func_name=func_name,
content=r.content,
code=r.status_code)
return self._parse_reply(r)
@reraise_neutron_exception
def update_object(self, ref, payload, return_fields=None):
"""Update an Infoblox object
Args:
ref (str): Infoblox object reference
payload (dict): Payload with data to send
Returns:
The object reference of the updated object
Raises:
InfobloxException
"""
query_params = self._build_query_params(return_fields=return_fields)
opts = self._get_request_options(data=payload)
url = self._construct_url(ref, query_params)
self._log_request('put', url, opts)
r = self.session.put(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.ok:
self._check_service_availability('update', r, ref)
raise ib_ex.InfobloxCannotUpdateObject(
response=jsonutils.loads(r.content),
ref=ref,
content=r.content,
code=r.status_code)
return self._parse_reply(r)
@reraise_neutron_exception
def delete_object(self, ref, delete_arguments=None):
"""Remove an Infoblox object
Args:
ref (str): Object reference
delete_arguments (dict): Extra delete arguments
Returns:
The object reference of the removed object
Raises:
InfobloxException
"""
opts = self._get_request_options()
if not isinstance(delete_arguments, dict):
delete_arguments = {}
url = self._construct_url(ref, query_params=delete_arguments)
self._log_request('delete', url, opts)
r = self.session.delete(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.ok:
self._check_service_availability('delete', r, ref)
raise ib_ex.InfobloxCannotDeleteObject(
response=jsonutils.loads(r.content),
ref=ref,
content=r.content,
code=r.status_code)
return self._parse_reply(r)
@staticmethod
def is_cloud_wapi(wapi_version):
valid = wapi_version and isinstance(wapi_version, six.string_types)
if not valid:
raise ValueError("Invalid argument was passed")
version_match = re.search(r'(\d+)\.(\d+)', wapi_version)
if version_match:
if int(version_match.group(1)) >= \
CLOUD_WAPI_MAJOR_VERSION:
return True
return False
|
infobloxopen/infoblox-client | infoblox_client/connector.py | Connector.update_object | python | def update_object(self, ref, payload, return_fields=None):
query_params = self._build_query_params(return_fields=return_fields)
opts = self._get_request_options(data=payload)
url = self._construct_url(ref, query_params)
self._log_request('put', url, opts)
r = self.session.put(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.ok:
self._check_service_availability('update', r, ref)
raise ib_ex.InfobloxCannotUpdateObject(
response=jsonutils.loads(r.content),
ref=ref,
content=r.content,
code=r.status_code)
return self._parse_reply(r) | Update an Infoblox object
Args:
ref (str): Infoblox object reference
payload (dict): Payload with data to send
Returns:
The object reference of the updated object
Raises:
InfobloxException | train | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/connector.py#L424-L453 | [
"def _construct_url(self, relative_path, query_params=None,\n extattrs=None, force_proxy=False):\n if query_params is None:\n query_params = {}\n if extattrs is None:\n extattrs = {}\n if force_proxy:\n query_params['_proxy_search'] = 'GM'\n\n if not relative_path ... | class Connector(object):
"""Connector stands for interacting with Infoblox NIOS
Defines methods for getting, creating, updating and
removing objects from an Infoblox server instance.
"""
DEFAULT_HEADER = {'Content-type': 'application/json'}
DEFAULT_OPTIONS = {'ssl_verify': False,
'silent_ssl_warnings': False,
'http_request_timeout': 10,
'http_pool_connections': 10,
'http_pool_maxsize': 10,
'max_retries': 3,
'wapi_version': '2.1',
'max_results': None,
'log_api_calls_as_info': False,
'paging': False}
def __init__(self, options):
self._parse_options(options)
self._configure_session()
# urllib has different interface for py27 and py34
try:
self._urlencode = urllib.urlencode
self._quote = urllib.quote
self._urljoin = urlparse.urljoin
except AttributeError:
self._urlencode = urlparse.urlencode
self._quote = urlparse.quote
self._urljoin = urlparse.urljoin
def _parse_options(self, options):
"""Copy needed options to self"""
attributes = ('host', 'wapi_version', 'username', 'password',
'ssl_verify', 'http_request_timeout', 'max_retries',
'http_pool_connections', 'http_pool_maxsize',
'silent_ssl_warnings', 'log_api_calls_as_info',
'max_results', 'paging')
for attr in attributes:
if isinstance(options, dict) and attr in options:
setattr(self, attr, options[attr])
elif hasattr(options, attr):
value = getattr(options, attr)
setattr(self, attr, value)
elif attr in self.DEFAULT_OPTIONS:
setattr(self, attr, self.DEFAULT_OPTIONS[attr])
else:
msg = "WAPI config error. Option %s is not defined" % attr
raise ib_ex.InfobloxConfigException(msg=msg)
for attr in ('host', 'username', 'password'):
if not getattr(self, attr):
msg = "WAPI config error. Option %s can not be blank" % attr
raise ib_ex.InfobloxConfigException(msg=msg)
self.wapi_url = "https://%s/wapi/v%s/" % (self.host,
self.wapi_version)
self.cloud_api_enabled = self.is_cloud_wapi(self.wapi_version)
def _configure_session(self):
self.session = requests.Session()
adapter = requests.adapters.HTTPAdapter(
pool_connections=self.http_pool_connections,
pool_maxsize=self.http_pool_maxsize,
max_retries=self.max_retries)
self.session.mount('http://', adapter)
self.session.mount('https://', adapter)
self.session.auth = (self.username, self.password)
self.session.verify = utils.try_value_to_bool(self.ssl_verify,
strict_mode=False)
if self.silent_ssl_warnings:
urllib3.disable_warnings()
def _construct_url(self, relative_path, query_params=None,
extattrs=None, force_proxy=False):
if query_params is None:
query_params = {}
if extattrs is None:
extattrs = {}
if force_proxy:
query_params['_proxy_search'] = 'GM'
if not relative_path or relative_path[0] == '/':
raise ValueError('Path in request must be relative.')
query = ''
if query_params or extattrs:
query = '?'
if extattrs:
attrs_queries = []
for key, value in extattrs.items():
param = "*%s" % key
value = value['value']
if isinstance(value, list):
for item in value:
attrs_queries.append(self._urlencode({param: item}))
else:
attrs_queries.append(self._urlencode({param: value}))
query += '&'.join(attrs_queries)
if query_params:
if len(query) > 1:
query += '&'
query += self._urlencode(query_params)
base_url = self._urljoin(self.wapi_url,
self._quote(relative_path))
return base_url + query
@staticmethod
def _validate_obj_type_or_die(obj_type, obj_type_expected=True):
if not obj_type:
raise ValueError('NIOS object type cannot be empty.')
if obj_type_expected and '/' in obj_type:
raise ValueError('NIOS object type cannot contain slash.')
@staticmethod
def _validate_authorized(response):
if response.status_code == requests.codes.UNAUTHORIZED:
raise ib_ex.InfobloxBadWAPICredential(response='')
@staticmethod
def _build_query_params(payload=None, return_fields=None,
max_results=None, paging=False):
if payload:
query_params = payload
else:
query_params = dict()
if return_fields:
if 'default' in return_fields:
return_fields.remove('default')
query_params['_return_fields+'] = ','.join(return_fields)
else:
query_params['_return_fields'] = ','.join(return_fields)
if max_results:
query_params['_max_results'] = max_results
if paging:
query_params['_paging'] = 1
query_params['_return_as_object'] = 1
return query_params
def _get_request_options(self, data=None):
opts = dict(timeout=self.http_request_timeout,
headers=self.DEFAULT_HEADER,
verify=self.session.verify)
if data:
opts['data'] = jsonutils.dumps(data)
return opts
@staticmethod
def _parse_reply(request):
"""Tries to parse reply from NIOS.
Raises exception with content if reply is not in json format
"""
try:
return jsonutils.loads(request.content)
except ValueError:
raise ib_ex.InfobloxConnectionError(reason=request.content)
def _log_request(self, type, url, opts):
message = ("Sending %s request to %s with parameters %s",
type, url, opts)
if self.log_api_calls_as_info:
LOG.info(*message)
else:
LOG.debug(*message)
@reraise_neutron_exception
def get_object(self, obj_type, payload=None, return_fields=None,
extattrs=None, force_proxy=False, max_results=None,
paging=False):
"""Retrieve a list of Infoblox objects of type 'obj_type'
Some get requests like 'ipv4address' should be always
proxied to GM on Hellfire
If request is cloud and proxy is not forced yet,
then plan to do 2 request:
- the first one is not proxied to GM
- the second is proxied to GM
Args:
obj_type (str): Infoblox object type, e.g. 'network',
'range', etc.
payload (dict): Payload with data to send
return_fields (list): List of fields to be returned
extattrs (dict): List of Extensible Attributes
force_proxy (bool): Set _proxy_search flag
to process requests on GM
max_results (int): Maximum number of objects to be returned.
If set to a negative number the appliance will return an error
when the number of returned objects would exceed the setting.
The default is -1000. If this is set to a positive number,
the results will be truncated when necessary.
paging (bool): Enables paging to wapi calls if paging = True,
it uses _max_results to set paging size of the wapi calls.
If _max_results is negative it will take paging size as 1000.
Returns:
A list of the Infoblox objects requested
Raises:
InfobloxObjectNotFound
"""
self._validate_obj_type_or_die(obj_type, obj_type_expected=False)
# max_results passed to get_object has priority over
# one defined as connector option
if max_results is None and self.max_results:
max_results = self.max_results
if paging is False and self.paging:
paging = self.paging
query_params = self._build_query_params(payload=payload,
return_fields=return_fields,
max_results=max_results,
paging=paging)
# Clear proxy flag if wapi version is too old (non-cloud)
proxy_flag = self.cloud_api_enabled and force_proxy
ib_object = self._handle_get_object(obj_type, query_params, extattrs,
proxy_flag)
if ib_object:
return ib_object
# Do second get call with force_proxy if not done yet
if self.cloud_api_enabled and not force_proxy:
ib_object = self._handle_get_object(obj_type, query_params,
extattrs, proxy_flag=True)
if ib_object:
return ib_object
return None
def _handle_get_object(self, obj_type, query_params, extattrs,
proxy_flag=False):
if '_paging' in query_params:
if not ('_max_results' in query_params):
query_params['_max_results'] = 1000
if query_params['_max_results'] < 0:
# Since pagination is enabled with _max_results < 0,
# set _max_results = 1000.
query_params['_max_results'] = 1000
result = []
while True:
url = self._construct_url(obj_type, query_params, extattrs,
force_proxy=proxy_flag)
resp = self._get_object(obj_type, url)
if not resp:
return None
if not ('next_page_id' in resp):
result.extend(resp['result'])
query_params.pop('_page_id', None)
return result
else:
query_params['_page_id'] = resp['next_page_id']
result.extend(resp['result'])
else:
url = self._construct_url(obj_type, query_params, extattrs,
force_proxy=proxy_flag)
return self._get_object(obj_type, url)
def _get_object(self, obj_type, url):
opts = self._get_request_options()
self._log_request('get', url, opts)
if(self.session.cookies):
# the first 'get' or 'post' action will generate a cookie
# after that, we don't need to re-authenticate
self.session.auth = None
r = self.session.get(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.ok:
LOG.warning("Failed on object search with url %s: %s",
url, r.content)
return None
return self._parse_reply(r)
@reraise_neutron_exception
def create_object(self, obj_type, payload, return_fields=None):
"""Create an Infoblox object of type 'obj_type'
Args:
obj_type (str): Infoblox object type,
e.g. 'network', 'range', etc.
payload (dict): Payload with data to send
return_fields (list): List of fields to be returned
Returns:
The object reference of the newly create object
Raises:
InfobloxException
"""
self._validate_obj_type_or_die(obj_type)
query_params = self._build_query_params(return_fields=return_fields)
url = self._construct_url(obj_type, query_params)
opts = self._get_request_options(data=payload)
self._log_request('post', url, opts)
if(self.session.cookies):
# the first 'get' or 'post' action will generate a cookie
# after that, we don't need to re-authenticate
self.session.auth = None
r = self.session.post(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.CREATED:
response = utils.safe_json_load(r.content)
already_assigned = 'is assigned to another network view'
if response and already_assigned in response.get('text'):
exception = ib_ex.InfobloxMemberAlreadyAssigned
else:
exception = ib_ex.InfobloxCannotCreateObject
raise exception(
response=response,
obj_type=obj_type,
content=r.content,
args=payload,
code=r.status_code)
return self._parse_reply(r)
def _check_service_availability(self, operation, resp, ref):
if resp.status_code == requests.codes.SERVICE_UNAVAILABLE:
raise ib_ex.InfobloxGridTemporaryUnavailable(
response=resp.content,
operation=operation,
ref=ref,
content=resp.content,
code=resp.status_code)
@reraise_neutron_exception
def call_func(self, func_name, ref, payload, return_fields=None):
query_params = self._build_query_params(return_fields=return_fields)
query_params['_function'] = func_name
url = self._construct_url(ref, query_params)
opts = self._get_request_options(data=payload)
self._log_request('post', url, opts)
r = self.session.post(url, **opts)
self._validate_authorized(r)
if r.status_code not in (requests.codes.CREATED,
requests.codes.ok):
self._check_service_availability('call_func', r, ref)
raise ib_ex.InfobloxFuncException(
response=jsonutils.loads(r.content),
ref=ref,
func_name=func_name,
content=r.content,
code=r.status_code)
return self._parse_reply(r)
@reraise_neutron_exception
@reraise_neutron_exception
def delete_object(self, ref, delete_arguments=None):
"""Remove an Infoblox object
Args:
ref (str): Object reference
delete_arguments (dict): Extra delete arguments
Returns:
The object reference of the removed object
Raises:
InfobloxException
"""
opts = self._get_request_options()
if not isinstance(delete_arguments, dict):
delete_arguments = {}
url = self._construct_url(ref, query_params=delete_arguments)
self._log_request('delete', url, opts)
r = self.session.delete(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.ok:
self._check_service_availability('delete', r, ref)
raise ib_ex.InfobloxCannotDeleteObject(
response=jsonutils.loads(r.content),
ref=ref,
content=r.content,
code=r.status_code)
return self._parse_reply(r)
@staticmethod
def is_cloud_wapi(wapi_version):
valid = wapi_version and isinstance(wapi_version, six.string_types)
if not valid:
raise ValueError("Invalid argument was passed")
version_match = re.search(r'(\d+)\.(\d+)', wapi_version)
if version_match:
if int(version_match.group(1)) >= \
CLOUD_WAPI_MAJOR_VERSION:
return True
return False
|
infobloxopen/infoblox-client | infoblox_client/connector.py | Connector.delete_object | python | def delete_object(self, ref, delete_arguments=None):
opts = self._get_request_options()
if not isinstance(delete_arguments, dict):
delete_arguments = {}
url = self._construct_url(ref, query_params=delete_arguments)
self._log_request('delete', url, opts)
r = self.session.delete(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.ok:
self._check_service_availability('delete', r, ref)
raise ib_ex.InfobloxCannotDeleteObject(
response=jsonutils.loads(r.content),
ref=ref,
content=r.content,
code=r.status_code)
return self._parse_reply(r) | Remove an Infoblox object
Args:
ref (str): Object reference
delete_arguments (dict): Extra delete arguments
Returns:
The object reference of the removed object
Raises:
InfobloxException | train | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/connector.py#L456-L485 | [
"def _construct_url(self, relative_path, query_params=None,\n extattrs=None, force_proxy=False):\n if query_params is None:\n query_params = {}\n if extattrs is None:\n extattrs = {}\n if force_proxy:\n query_params['_proxy_search'] = 'GM'\n\n if not relative_path ... | class Connector(object):
"""Connector stands for interacting with Infoblox NIOS
Defines methods for getting, creating, updating and
removing objects from an Infoblox server instance.
"""
DEFAULT_HEADER = {'Content-type': 'application/json'}
DEFAULT_OPTIONS = {'ssl_verify': False,
'silent_ssl_warnings': False,
'http_request_timeout': 10,
'http_pool_connections': 10,
'http_pool_maxsize': 10,
'max_retries': 3,
'wapi_version': '2.1',
'max_results': None,
'log_api_calls_as_info': False,
'paging': False}
def __init__(self, options):
self._parse_options(options)
self._configure_session()
# urllib has different interface for py27 and py34
try:
self._urlencode = urllib.urlencode
self._quote = urllib.quote
self._urljoin = urlparse.urljoin
except AttributeError:
self._urlencode = urlparse.urlencode
self._quote = urlparse.quote
self._urljoin = urlparse.urljoin
def _parse_options(self, options):
"""Copy needed options to self"""
attributes = ('host', 'wapi_version', 'username', 'password',
'ssl_verify', 'http_request_timeout', 'max_retries',
'http_pool_connections', 'http_pool_maxsize',
'silent_ssl_warnings', 'log_api_calls_as_info',
'max_results', 'paging')
for attr in attributes:
if isinstance(options, dict) and attr in options:
setattr(self, attr, options[attr])
elif hasattr(options, attr):
value = getattr(options, attr)
setattr(self, attr, value)
elif attr in self.DEFAULT_OPTIONS:
setattr(self, attr, self.DEFAULT_OPTIONS[attr])
else:
msg = "WAPI config error. Option %s is not defined" % attr
raise ib_ex.InfobloxConfigException(msg=msg)
for attr in ('host', 'username', 'password'):
if not getattr(self, attr):
msg = "WAPI config error. Option %s can not be blank" % attr
raise ib_ex.InfobloxConfigException(msg=msg)
self.wapi_url = "https://%s/wapi/v%s/" % (self.host,
self.wapi_version)
self.cloud_api_enabled = self.is_cloud_wapi(self.wapi_version)
def _configure_session(self):
self.session = requests.Session()
adapter = requests.adapters.HTTPAdapter(
pool_connections=self.http_pool_connections,
pool_maxsize=self.http_pool_maxsize,
max_retries=self.max_retries)
self.session.mount('http://', adapter)
self.session.mount('https://', adapter)
self.session.auth = (self.username, self.password)
self.session.verify = utils.try_value_to_bool(self.ssl_verify,
strict_mode=False)
if self.silent_ssl_warnings:
urllib3.disable_warnings()
def _construct_url(self, relative_path, query_params=None,
extattrs=None, force_proxy=False):
if query_params is None:
query_params = {}
if extattrs is None:
extattrs = {}
if force_proxy:
query_params['_proxy_search'] = 'GM'
if not relative_path or relative_path[0] == '/':
raise ValueError('Path in request must be relative.')
query = ''
if query_params or extattrs:
query = '?'
if extattrs:
attrs_queries = []
for key, value in extattrs.items():
param = "*%s" % key
value = value['value']
if isinstance(value, list):
for item in value:
attrs_queries.append(self._urlencode({param: item}))
else:
attrs_queries.append(self._urlencode({param: value}))
query += '&'.join(attrs_queries)
if query_params:
if len(query) > 1:
query += '&'
query += self._urlencode(query_params)
base_url = self._urljoin(self.wapi_url,
self._quote(relative_path))
return base_url + query
@staticmethod
def _validate_obj_type_or_die(obj_type, obj_type_expected=True):
if not obj_type:
raise ValueError('NIOS object type cannot be empty.')
if obj_type_expected and '/' in obj_type:
raise ValueError('NIOS object type cannot contain slash.')
@staticmethod
def _validate_authorized(response):
if response.status_code == requests.codes.UNAUTHORIZED:
raise ib_ex.InfobloxBadWAPICredential(response='')
@staticmethod
def _build_query_params(payload=None, return_fields=None,
max_results=None, paging=False):
if payload:
query_params = payload
else:
query_params = dict()
if return_fields:
if 'default' in return_fields:
return_fields.remove('default')
query_params['_return_fields+'] = ','.join(return_fields)
else:
query_params['_return_fields'] = ','.join(return_fields)
if max_results:
query_params['_max_results'] = max_results
if paging:
query_params['_paging'] = 1
query_params['_return_as_object'] = 1
return query_params
def _get_request_options(self, data=None):
opts = dict(timeout=self.http_request_timeout,
headers=self.DEFAULT_HEADER,
verify=self.session.verify)
if data:
opts['data'] = jsonutils.dumps(data)
return opts
@staticmethod
def _parse_reply(request):
"""Tries to parse reply from NIOS.
Raises exception with content if reply is not in json format
"""
try:
return jsonutils.loads(request.content)
except ValueError:
raise ib_ex.InfobloxConnectionError(reason=request.content)
def _log_request(self, type, url, opts):
message = ("Sending %s request to %s with parameters %s",
type, url, opts)
if self.log_api_calls_as_info:
LOG.info(*message)
else:
LOG.debug(*message)
@reraise_neutron_exception
def get_object(self, obj_type, payload=None, return_fields=None,
extattrs=None, force_proxy=False, max_results=None,
paging=False):
"""Retrieve a list of Infoblox objects of type 'obj_type'
Some get requests like 'ipv4address' should be always
proxied to GM on Hellfire
If request is cloud and proxy is not forced yet,
then plan to do 2 request:
- the first one is not proxied to GM
- the second is proxied to GM
Args:
obj_type (str): Infoblox object type, e.g. 'network',
'range', etc.
payload (dict): Payload with data to send
return_fields (list): List of fields to be returned
extattrs (dict): List of Extensible Attributes
force_proxy (bool): Set _proxy_search flag
to process requests on GM
max_results (int): Maximum number of objects to be returned.
If set to a negative number the appliance will return an error
when the number of returned objects would exceed the setting.
The default is -1000. If this is set to a positive number,
the results will be truncated when necessary.
paging (bool): Enables paging to wapi calls if paging = True,
it uses _max_results to set paging size of the wapi calls.
If _max_results is negative it will take paging size as 1000.
Returns:
A list of the Infoblox objects requested
Raises:
InfobloxObjectNotFound
"""
self._validate_obj_type_or_die(obj_type, obj_type_expected=False)
# max_results passed to get_object has priority over
# one defined as connector option
if max_results is None and self.max_results:
max_results = self.max_results
if paging is False and self.paging:
paging = self.paging
query_params = self._build_query_params(payload=payload,
return_fields=return_fields,
max_results=max_results,
paging=paging)
# Clear proxy flag if wapi version is too old (non-cloud)
proxy_flag = self.cloud_api_enabled and force_proxy
ib_object = self._handle_get_object(obj_type, query_params, extattrs,
proxy_flag)
if ib_object:
return ib_object
# Do second get call with force_proxy if not done yet
if self.cloud_api_enabled and not force_proxy:
ib_object = self._handle_get_object(obj_type, query_params,
extattrs, proxy_flag=True)
if ib_object:
return ib_object
return None
def _handle_get_object(self, obj_type, query_params, extattrs,
proxy_flag=False):
if '_paging' in query_params:
if not ('_max_results' in query_params):
query_params['_max_results'] = 1000
if query_params['_max_results'] < 0:
# Since pagination is enabled with _max_results < 0,
# set _max_results = 1000.
query_params['_max_results'] = 1000
result = []
while True:
url = self._construct_url(obj_type, query_params, extattrs,
force_proxy=proxy_flag)
resp = self._get_object(obj_type, url)
if not resp:
return None
if not ('next_page_id' in resp):
result.extend(resp['result'])
query_params.pop('_page_id', None)
return result
else:
query_params['_page_id'] = resp['next_page_id']
result.extend(resp['result'])
else:
url = self._construct_url(obj_type, query_params, extattrs,
force_proxy=proxy_flag)
return self._get_object(obj_type, url)
def _get_object(self, obj_type, url):
opts = self._get_request_options()
self._log_request('get', url, opts)
if(self.session.cookies):
# the first 'get' or 'post' action will generate a cookie
# after that, we don't need to re-authenticate
self.session.auth = None
r = self.session.get(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.ok:
LOG.warning("Failed on object search with url %s: %s",
url, r.content)
return None
return self._parse_reply(r)
@reraise_neutron_exception
def create_object(self, obj_type, payload, return_fields=None):
"""Create an Infoblox object of type 'obj_type'
Args:
obj_type (str): Infoblox object type,
e.g. 'network', 'range', etc.
payload (dict): Payload with data to send
return_fields (list): List of fields to be returned
Returns:
The object reference of the newly create object
Raises:
InfobloxException
"""
self._validate_obj_type_or_die(obj_type)
query_params = self._build_query_params(return_fields=return_fields)
url = self._construct_url(obj_type, query_params)
opts = self._get_request_options(data=payload)
self._log_request('post', url, opts)
if(self.session.cookies):
# the first 'get' or 'post' action will generate a cookie
# after that, we don't need to re-authenticate
self.session.auth = None
r = self.session.post(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.CREATED:
response = utils.safe_json_load(r.content)
already_assigned = 'is assigned to another network view'
if response and already_assigned in response.get('text'):
exception = ib_ex.InfobloxMemberAlreadyAssigned
else:
exception = ib_ex.InfobloxCannotCreateObject
raise exception(
response=response,
obj_type=obj_type,
content=r.content,
args=payload,
code=r.status_code)
return self._parse_reply(r)
def _check_service_availability(self, operation, resp, ref):
if resp.status_code == requests.codes.SERVICE_UNAVAILABLE:
raise ib_ex.InfobloxGridTemporaryUnavailable(
response=resp.content,
operation=operation,
ref=ref,
content=resp.content,
code=resp.status_code)
@reraise_neutron_exception
def call_func(self, func_name, ref, payload, return_fields=None):
query_params = self._build_query_params(return_fields=return_fields)
query_params['_function'] = func_name
url = self._construct_url(ref, query_params)
opts = self._get_request_options(data=payload)
self._log_request('post', url, opts)
r = self.session.post(url, **opts)
self._validate_authorized(r)
if r.status_code not in (requests.codes.CREATED,
requests.codes.ok):
self._check_service_availability('call_func', r, ref)
raise ib_ex.InfobloxFuncException(
response=jsonutils.loads(r.content),
ref=ref,
func_name=func_name,
content=r.content,
code=r.status_code)
return self._parse_reply(r)
@reraise_neutron_exception
def update_object(self, ref, payload, return_fields=None):
"""Update an Infoblox object
Args:
ref (str): Infoblox object reference
payload (dict): Payload with data to send
Returns:
The object reference of the updated object
Raises:
InfobloxException
"""
query_params = self._build_query_params(return_fields=return_fields)
opts = self._get_request_options(data=payload)
url = self._construct_url(ref, query_params)
self._log_request('put', url, opts)
r = self.session.put(url, **opts)
self._validate_authorized(r)
if r.status_code != requests.codes.ok:
self._check_service_availability('update', r, ref)
raise ib_ex.InfobloxCannotUpdateObject(
response=jsonutils.loads(r.content),
ref=ref,
content=r.content,
code=r.status_code)
return self._parse_reply(r)
@reraise_neutron_exception
@staticmethod
def is_cloud_wapi(wapi_version):
valid = wapi_version and isinstance(wapi_version, six.string_types)
if not valid:
raise ValueError("Invalid argument was passed")
version_match = re.search(r'(\d+)\.(\d+)', wapi_version)
if version_match:
if int(version_match.group(1)) >= \
CLOUD_WAPI_MAJOR_VERSION:
return True
return False
|
infobloxopen/infoblox-client | infoblox_client/objects.py | BaseObject._remap_fields | python | def _remap_fields(cls, kwargs):
mapped = {}
for key in kwargs:
if key in cls._remap:
mapped[cls._remap[key]] = kwargs[key]
else:
mapped[key] = kwargs[key]
return mapped | Map fields from kwargs into dict acceptable by NIOS | train | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L87-L95 | null | class BaseObject(object):
"""Base class that provides minimal new object model interface
This class add next features to objects:
- initialize public instance variables with None for fields
defined in '_fields' and '_shadow_fields'
- accept fields from '_fields' and '_shadow_fields' as a parameter on init
- dynamically remap one fields into another using _remap dict,
mapping is in effect on all stages (on init, getter and setter)
- provides nice object representation that contains class
and not None object fields (useful in python interpretter)
"""
_fields = []
_shadow_fields = []
_remap = {}
_infoblox_type = None
def __init__(self, **kwargs):
mapped_args = self._remap_fields(kwargs)
for field in self._fields + self._shadow_fields:
if field in mapped_args:
setattr(self, field, mapped_args[field])
else:
# Init all not initialized fields with None
if not hasattr(self, field):
setattr(self, field, None)
def __getattr__(self, name):
# Map aliases into real fields
if name in self._remap:
return getattr(self, self._remap[name])
else:
# Default behaviour
raise AttributeError
def __setattr__(self, name, value):
if name in self._remap:
return setattr(self, self._remap[name], value)
else:
super(BaseObject, self).__setattr__(name, value)
def __eq__(self, other):
if isinstance(other, self.__class__):
for field in self._fields:
if getattr(self, field) != getattr(other, field):
return False
return True
return False
def __repr__(self):
data = {field: getattr(self, field)
for field in self._fields + self._shadow_fields
if hasattr(self, field) and getattr(self, field) is not None}
data_str = ', '.join(
"{0}=\"{1}\"".format(key, data[key]) for key in data)
return "{0}: {1}".format(self.__class__.__name__, data_str)
@classmethod
@classmethod
def from_dict(cls, ip_dict):
return cls(**ip_dict)
def to_dict(self):
return {field: getattr(self, field) for field in self._fields
if getattr(self, field, None) is not None}
@property
def ref(self):
if hasattr(self, '_ref'):
return self._ref
|
infobloxopen/infoblox-client | infoblox_client/objects.py | EA.from_dict | python | def from_dict(cls, eas_from_nios):
if not eas_from_nios:
return
return cls({name: cls._process_value(ib_utils.try_value_to_bool,
eas_from_nios[name]['value'])
for name in eas_from_nios}) | Converts extensible attributes from the NIOS reply. | train | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L141-L147 | null | class EA(object):
"""Extensible Attributes
This class represents extensible attributes (EA).
Converts EAs into format suitable for NIOS (to_dict)
and builds EA class from NIOS reply (from_dict).
"""
def __init__(self, ea_dict=None):
"""Optionally accept EAs as a dict on init.
Expected EA format is {ea_name: ea_value}
"""
if ea_dict is None:
ea_dict = {}
self._ea_dict = ea_dict
def __repr__(self):
eas = ()
if self._ea_dict:
eas = ("{0}={1}".format(name, self._ea_dict[name])
for name in self._ea_dict)
return "EAs:{0}".format(','.join(eas))
@property
def ea_dict(self):
"""Returns dict with EAs in {ea_name: ea_value} format."""
return self._ea_dict.copy()
@classmethod
def to_dict(self):
"""Converts extensible attributes into the format suitable for NIOS."""
return {name: {'value': self._process_value(str, value)}
for name, value in self._ea_dict.items()
if not (value is None or value == "" or value == [])}
@staticmethod
def _process_value(func, value):
"""Applies processing method for value or each element in it.
:param func: method to be called with value
:param value: value to process
:return: if 'value' is list/tupe, returns iterable with func results,
else func result is returned
"""
if isinstance(value, (list, tuple)):
return [func(item) for item in value]
return func(value)
def get(self, name, default=None):
"""Return value of requested EA."""
return self._ea_dict.get(name, default)
def set(self, name, value):
"""Set value of requested EA."""
self._ea_dict[name] = value
|
infobloxopen/infoblox-client | infoblox_client/objects.py | EA.to_dict | python | def to_dict(self):
return {name: {'value': self._process_value(str, value)}
for name, value in self._ea_dict.items()
if not (value is None or value == "" or value == [])} | Converts extensible attributes into the format suitable for NIOS. | train | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L149-L153 | null | class EA(object):
"""Extensible Attributes
This class represents extensible attributes (EA).
Converts EAs into format suitable for NIOS (to_dict)
and builds EA class from NIOS reply (from_dict).
"""
def __init__(self, ea_dict=None):
"""Optionally accept EAs as a dict on init.
Expected EA format is {ea_name: ea_value}
"""
if ea_dict is None:
ea_dict = {}
self._ea_dict = ea_dict
def __repr__(self):
eas = ()
if self._ea_dict:
eas = ("{0}={1}".format(name, self._ea_dict[name])
for name in self._ea_dict)
return "EAs:{0}".format(','.join(eas))
@property
def ea_dict(self):
"""Returns dict with EAs in {ea_name: ea_value} format."""
return self._ea_dict.copy()
@classmethod
def from_dict(cls, eas_from_nios):
"""Converts extensible attributes from the NIOS reply."""
if not eas_from_nios:
return
return cls({name: cls._process_value(ib_utils.try_value_to_bool,
eas_from_nios[name]['value'])
for name in eas_from_nios})
@staticmethod
def _process_value(func, value):
"""Applies processing method for value or each element in it.
:param func: method to be called with value
:param value: value to process
:return: if 'value' is list/tupe, returns iterable with func results,
else func result is returned
"""
if isinstance(value, (list, tuple)):
return [func(item) for item in value]
return func(value)
def get(self, name, default=None):
"""Return value of requested EA."""
return self._ea_dict.get(name, default)
def set(self, name, value):
"""Set value of requested EA."""
self._ea_dict[name] = value
|
infobloxopen/infoblox-client | infoblox_client/objects.py | EA._process_value | python | def _process_value(func, value):
if isinstance(value, (list, tuple)):
return [func(item) for item in value]
return func(value) | Applies processing method for value or each element in it.
:param func: method to be called with value
:param value: value to process
:return: if 'value' is list/tupe, returns iterable with func results,
else func result is returned | train | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L156-L166 | null | class EA(object):
"""Extensible Attributes
This class represents extensible attributes (EA).
Converts EAs into format suitable for NIOS (to_dict)
and builds EA class from NIOS reply (from_dict).
"""
def __init__(self, ea_dict=None):
"""Optionally accept EAs as a dict on init.
Expected EA format is {ea_name: ea_value}
"""
if ea_dict is None:
ea_dict = {}
self._ea_dict = ea_dict
def __repr__(self):
eas = ()
if self._ea_dict:
eas = ("{0}={1}".format(name, self._ea_dict[name])
for name in self._ea_dict)
return "EAs:{0}".format(','.join(eas))
@property
def ea_dict(self):
"""Returns dict with EAs in {ea_name: ea_value} format."""
return self._ea_dict.copy()
@classmethod
def from_dict(cls, eas_from_nios):
"""Converts extensible attributes from the NIOS reply."""
if not eas_from_nios:
return
return cls({name: cls._process_value(ib_utils.try_value_to_bool,
eas_from_nios[name]['value'])
for name in eas_from_nios})
def to_dict(self):
"""Converts extensible attributes into the format suitable for NIOS."""
return {name: {'value': self._process_value(str, value)}
for name, value in self._ea_dict.items()
if not (value is None or value == "" or value == [])}
@staticmethod
def get(self, name, default=None):
"""Return value of requested EA."""
return self._ea_dict.get(name, default)
def set(self, name, value):
"""Set value of requested EA."""
self._ea_dict[name] = value
|
infobloxopen/infoblox-client | infoblox_client/objects.py | InfobloxObject.from_dict | python | def from_dict(cls, connector, ip_dict):
mapping = cls._global_field_processing.copy()
mapping.update(cls._custom_field_processing)
# Process fields that require building themselves as objects
for field in mapping:
if field in ip_dict:
ip_dict[field] = mapping[field](ip_dict[field])
return cls(connector, **ip_dict) | Build dict fields as SubObjects if needed.
Checks if lambda for building object from dict exists.
_global_field_processing and _custom_field_processing rules
are checked. | train | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L243-L256 | null | class InfobloxObject(BaseObject):
"""Base class for all Infoblox related objects
_fields - fields that represents NIOS object (WAPI fields) and
are sent to NIOS on object creation
_search_for_update_fields - field/fields used to find an object during an
update operation. this should be the smallest number of fields that
uniquely identify an object
_all_searchable_fields - all fields that can be used to find object on NIOS
side
_updateable_search_fields - fields that can be used to find object on
NIOS side, but also can be changed, so has to be sent on update.
_shadow_fields - fields that object usually has but they should not
be sent to NIOS. These fields can be received from NIOS. Examples:
[_ref, is_default]
_return_fields - fields requested to be returned from NIOS side
if object is found/created
_infoblox_type - string representing wapi type of described object
_remap - dict that maps user faced names into internal
representation (_fields)
_custom_field_processing - dict that define rules (lambda) for building
objects from data returned by NIOS side.
Expected to be redefined in child class as needed,
_custom_field_processing has priority over _global_field_processing,
so can redefine for child class global rules
defined in _global_field_processing.
_global_field_processing - almost the same as _custom_field_processing,
but defines rules for building field on global level.
Fields defined in this dict will be processed in the same way in all
child classes. Is not expected to be redefined in child classes.
_ip_version - ip version of the object, used to mark version
specific classes. Value other than None indicates that
no versioned class lookup needed.
"""
_fields = []
_search_for_update_fields = []
_all_searchable_fields = []
_updateable_search_fields = []
_shadow_fields = []
_infoblox_type = None
_remap = {}
_return_fields = []
_custom_field_processing = {}
_global_field_processing = {'extattrs': EA.from_dict}
_ip_version = None
def __new__(cls, connector, **kwargs):
return super(InfobloxObject,
cls).__new__(cls.get_class_from_args(kwargs))
def __init__(self, connector, **kwargs):
self.connector = connector
super(InfobloxObject, self).__init__(**kwargs)
def update_from_dict(self, ip_dict, only_ref=False):
if only_ref:
self._ref = ip_dict['_ref']
return
mapped_args = self._remap_fields(ip_dict)
for field in self._fields + self._shadow_fields:
if field in ip_dict:
setattr(self, field, mapped_args[field])
@classmethod
@staticmethod
def value_to_dict(value):
return value.to_dict() if hasattr(value, 'to_dict') else value
def field_to_dict(self, field):
"""Read field value and converts to dict if possible"""
value = getattr(self, field)
if isinstance(value, (list, tuple)):
return [self.value_to_dict(val) for val in value]
return self.value_to_dict(value)
def to_dict(self, search_fields=None):
"""Builds dict without None object fields"""
fields = self._fields
if search_fields == 'update':
fields = self._search_for_update_fields
elif search_fields == 'all':
fields = self._all_searchable_fields
elif search_fields == 'exclude':
# exclude search fields for update actions,
# but include updateable_search_fields
fields = [field for field in self._fields
if field in self._updateable_search_fields or
field not in self._search_for_update_fields]
return {field: self.field_to_dict(field) for field in fields
if getattr(self, field, None) is not None}
@staticmethod
def _object_from_reply(parse_class, connector, reply):
if not reply:
return None
if isinstance(reply, dict):
return parse_class.from_dict(connector, reply)
# If no return fields were requested reply contains only string
# with reference to object
return_dict = {'_ref': reply}
return parse_class.from_dict(connector, return_dict)
@classmethod
def create_check_exists(cls, connector, check_if_exists=True,
update_if_exists=False, **kwargs):
# obj_created is used to check if object is being created or
# pre-exists. obj_created is True if object is not pre-exists
# and getting created with this function call
obj_created = False
local_obj = cls(connector, **kwargs)
if check_if_exists:
if local_obj.fetch(only_ref=True):
LOG.info(("Infoblox %(obj_type)s already exists: "
"%(ib_obj)s"),
{'obj_type': local_obj.infoblox_type,
'ib_obj': local_obj})
if not update_if_exists:
return local_obj, obj_created
reply = None
if not local_obj.ref:
reply = connector.create_object(local_obj.infoblox_type,
local_obj.to_dict(),
local_obj.return_fields)
obj_created = True
LOG.info("Infoblox %(obj_type)s was created: %(ib_obj)s",
{'obj_type': local_obj.infoblox_type,
'ib_obj': local_obj})
elif update_if_exists:
update_fields = local_obj.to_dict(search_fields='exclude')
reply = connector.update_object(local_obj.ref,
update_fields,
local_obj.return_fields)
LOG.info('Infoblox object was updated: %s', local_obj.ref)
return cls._object_from_reply(local_obj, connector, reply), obj_created
@classmethod
def create(cls, connector, check_if_exists=True,
update_if_exists=False, **kwargs):
ib_object, _ = (
cls.create_check_exists(connector,
check_if_exists=check_if_exists,
update_if_exists=update_if_exists,
**kwargs))
return ib_object
@classmethod
def _search(cls, connector, return_fields=None,
search_extattrs=None, force_proxy=False,
max_results=None, **kwargs):
ib_obj_for_search = cls(connector, **kwargs)
search_dict = ib_obj_for_search.to_dict(search_fields='all')
if return_fields is None and ib_obj_for_search.return_fields:
return_fields = ib_obj_for_search.return_fields
# allow search_extattrs to be instance of EA class
# or dict in NIOS format
extattrs = search_extattrs
if hasattr(search_extattrs, 'to_dict'):
extattrs = search_extattrs.to_dict()
reply = connector.get_object(ib_obj_for_search.infoblox_type,
search_dict,
return_fields=return_fields,
extattrs=extattrs,
force_proxy=force_proxy,
max_results=max_results)
return reply, ib_obj_for_search
@classmethod
def search(cls, connector, **kwargs):
ib_obj, parse_class = cls._search(
connector, **kwargs)
if ib_obj:
return parse_class.from_dict(connector, ib_obj[0])
@classmethod
def search_all(cls, connector, **kwargs):
ib_objects, parsing_class = cls._search(
connector, **kwargs)
if ib_objects:
return [parsing_class.from_dict(connector, obj)
for obj in ib_objects]
return []
def fetch(self, only_ref=False):
"""Fetch object from NIOS by _ref or searchfields
Update existent object with fields returned from NIOS
Return True on successful object fetch
"""
if self.ref:
reply = self.connector.get_object(
self.ref, return_fields=self.return_fields)
if reply:
self.update_from_dict(reply)
return True
search_dict = self.to_dict(search_fields='update')
return_fields = [] if only_ref else self.return_fields
reply = self.connector.get_object(self.infoblox_type,
search_dict,
return_fields=return_fields)
if reply:
self.update_from_dict(reply[0], only_ref=only_ref)
return True
return False
def update(self):
update_fields = self.to_dict(search_fields='exclude')
ib_obj = self.connector.update_object(self.ref,
update_fields,
self.return_fields)
LOG.info('Infoblox object was updated: %s', self.ref)
return self._object_from_reply(self, self.connector, ib_obj)
def delete(self):
try:
self.connector.delete_object(self.ref)
except ib_ex.InfobloxCannotDeleteObject as e:
LOG.info("Failed to delete an object: %s", e)
@property
def infoblox_type(self):
return self._infoblox_type
@property
def return_fields(self):
return self._return_fields
@property
def ip_version(self):
return self._ip_version
@classmethod
def get_class_from_args(cls, kwargs):
# skip processing if cls already versioned class
if cls._ip_version:
return cls
for field in ['ip', 'cidr', 'start_ip', 'ip_address', 'network',
'start_addr', 'end_addr']:
if field in kwargs:
if ib_utils.determine_ip_version(kwargs[field]) == 6:
return cls.get_v6_class()
else:
return cls.get_v4_class()
# fallback to IPv4 object if find nothing
return cls.get_v4_class()
@classmethod
def get_v4_class(cls):
return cls
@classmethod
def get_v6_class(cls):
return cls
|
infobloxopen/infoblox-client | infoblox_client/objects.py | InfobloxObject.field_to_dict | python | def field_to_dict(self, field):
value = getattr(self, field)
if isinstance(value, (list, tuple)):
return [self.value_to_dict(val) for val in value]
return self.value_to_dict(value) | Read field value and converts to dict if possible | train | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L262-L267 | [
"def value_to_dict(value):\n return value.to_dict() if hasattr(value, 'to_dict') else value\n"
] | class InfobloxObject(BaseObject):
"""Base class for all Infoblox related objects
_fields - fields that represents NIOS object (WAPI fields) and
are sent to NIOS on object creation
_search_for_update_fields - field/fields used to find an object during an
update operation. this should be the smallest number of fields that
uniquely identify an object
_all_searchable_fields - all fields that can be used to find object on NIOS
side
_updateable_search_fields - fields that can be used to find object on
NIOS side, but also can be changed, so has to be sent on update.
_shadow_fields - fields that object usually has but they should not
be sent to NIOS. These fields can be received from NIOS. Examples:
[_ref, is_default]
_return_fields - fields requested to be returned from NIOS side
if object is found/created
_infoblox_type - string representing wapi type of described object
_remap - dict that maps user faced names into internal
representation (_fields)
_custom_field_processing - dict that define rules (lambda) for building
objects from data returned by NIOS side.
Expected to be redefined in child class as needed,
_custom_field_processing has priority over _global_field_processing,
so can redefine for child class global rules
defined in _global_field_processing.
_global_field_processing - almost the same as _custom_field_processing,
but defines rules for building field on global level.
Fields defined in this dict will be processed in the same way in all
child classes. Is not expected to be redefined in child classes.
_ip_version - ip version of the object, used to mark version
specific classes. Value other than None indicates that
no versioned class lookup needed.
"""
_fields = []
_search_for_update_fields = []
_all_searchable_fields = []
_updateable_search_fields = []
_shadow_fields = []
_infoblox_type = None
_remap = {}
_return_fields = []
_custom_field_processing = {}
_global_field_processing = {'extattrs': EA.from_dict}
_ip_version = None
def __new__(cls, connector, **kwargs):
return super(InfobloxObject,
cls).__new__(cls.get_class_from_args(kwargs))
def __init__(self, connector, **kwargs):
self.connector = connector
super(InfobloxObject, self).__init__(**kwargs)
def update_from_dict(self, ip_dict, only_ref=False):
if only_ref:
self._ref = ip_dict['_ref']
return
mapped_args = self._remap_fields(ip_dict)
for field in self._fields + self._shadow_fields:
if field in ip_dict:
setattr(self, field, mapped_args[field])
@classmethod
def from_dict(cls, connector, ip_dict):
"""Build dict fields as SubObjects if needed.
Checks if lambda for building object from dict exists.
_global_field_processing and _custom_field_processing rules
are checked.
"""
mapping = cls._global_field_processing.copy()
mapping.update(cls._custom_field_processing)
# Process fields that require building themselves as objects
for field in mapping:
if field in ip_dict:
ip_dict[field] = mapping[field](ip_dict[field])
return cls(connector, **ip_dict)
@staticmethod
def value_to_dict(value):
return value.to_dict() if hasattr(value, 'to_dict') else value
def to_dict(self, search_fields=None):
"""Builds dict without None object fields"""
fields = self._fields
if search_fields == 'update':
fields = self._search_for_update_fields
elif search_fields == 'all':
fields = self._all_searchable_fields
elif search_fields == 'exclude':
# exclude search fields for update actions,
# but include updateable_search_fields
fields = [field for field in self._fields
if field in self._updateable_search_fields or
field not in self._search_for_update_fields]
return {field: self.field_to_dict(field) for field in fields
if getattr(self, field, None) is not None}
@staticmethod
def _object_from_reply(parse_class, connector, reply):
if not reply:
return None
if isinstance(reply, dict):
return parse_class.from_dict(connector, reply)
# If no return fields were requested reply contains only string
# with reference to object
return_dict = {'_ref': reply}
return parse_class.from_dict(connector, return_dict)
@classmethod
def create_check_exists(cls, connector, check_if_exists=True,
update_if_exists=False, **kwargs):
# obj_created is used to check if object is being created or
# pre-exists. obj_created is True if object is not pre-exists
# and getting created with this function call
obj_created = False
local_obj = cls(connector, **kwargs)
if check_if_exists:
if local_obj.fetch(only_ref=True):
LOG.info(("Infoblox %(obj_type)s already exists: "
"%(ib_obj)s"),
{'obj_type': local_obj.infoblox_type,
'ib_obj': local_obj})
if not update_if_exists:
return local_obj, obj_created
reply = None
if not local_obj.ref:
reply = connector.create_object(local_obj.infoblox_type,
local_obj.to_dict(),
local_obj.return_fields)
obj_created = True
LOG.info("Infoblox %(obj_type)s was created: %(ib_obj)s",
{'obj_type': local_obj.infoblox_type,
'ib_obj': local_obj})
elif update_if_exists:
update_fields = local_obj.to_dict(search_fields='exclude')
reply = connector.update_object(local_obj.ref,
update_fields,
local_obj.return_fields)
LOG.info('Infoblox object was updated: %s', local_obj.ref)
return cls._object_from_reply(local_obj, connector, reply), obj_created
@classmethod
def create(cls, connector, check_if_exists=True,
update_if_exists=False, **kwargs):
ib_object, _ = (
cls.create_check_exists(connector,
check_if_exists=check_if_exists,
update_if_exists=update_if_exists,
**kwargs))
return ib_object
@classmethod
def _search(cls, connector, return_fields=None,
search_extattrs=None, force_proxy=False,
max_results=None, **kwargs):
ib_obj_for_search = cls(connector, **kwargs)
search_dict = ib_obj_for_search.to_dict(search_fields='all')
if return_fields is None and ib_obj_for_search.return_fields:
return_fields = ib_obj_for_search.return_fields
# allow search_extattrs to be instance of EA class
# or dict in NIOS format
extattrs = search_extattrs
if hasattr(search_extattrs, 'to_dict'):
extattrs = search_extattrs.to_dict()
reply = connector.get_object(ib_obj_for_search.infoblox_type,
search_dict,
return_fields=return_fields,
extattrs=extattrs,
force_proxy=force_proxy,
max_results=max_results)
return reply, ib_obj_for_search
@classmethod
def search(cls, connector, **kwargs):
ib_obj, parse_class = cls._search(
connector, **kwargs)
if ib_obj:
return parse_class.from_dict(connector, ib_obj[0])
@classmethod
def search_all(cls, connector, **kwargs):
ib_objects, parsing_class = cls._search(
connector, **kwargs)
if ib_objects:
return [parsing_class.from_dict(connector, obj)
for obj in ib_objects]
return []
def fetch(self, only_ref=False):
"""Fetch object from NIOS by _ref or searchfields
Update existent object with fields returned from NIOS
Return True on successful object fetch
"""
if self.ref:
reply = self.connector.get_object(
self.ref, return_fields=self.return_fields)
if reply:
self.update_from_dict(reply)
return True
search_dict = self.to_dict(search_fields='update')
return_fields = [] if only_ref else self.return_fields
reply = self.connector.get_object(self.infoblox_type,
search_dict,
return_fields=return_fields)
if reply:
self.update_from_dict(reply[0], only_ref=only_ref)
return True
return False
def update(self):
update_fields = self.to_dict(search_fields='exclude')
ib_obj = self.connector.update_object(self.ref,
update_fields,
self.return_fields)
LOG.info('Infoblox object was updated: %s', self.ref)
return self._object_from_reply(self, self.connector, ib_obj)
def delete(self):
try:
self.connector.delete_object(self.ref)
except ib_ex.InfobloxCannotDeleteObject as e:
LOG.info("Failed to delete an object: %s", e)
@property
def infoblox_type(self):
return self._infoblox_type
@property
def return_fields(self):
return self._return_fields
@property
def ip_version(self):
return self._ip_version
@classmethod
def get_class_from_args(cls, kwargs):
# skip processing if cls already versioned class
if cls._ip_version:
return cls
for field in ['ip', 'cidr', 'start_ip', 'ip_address', 'network',
'start_addr', 'end_addr']:
if field in kwargs:
if ib_utils.determine_ip_version(kwargs[field]) == 6:
return cls.get_v6_class()
else:
return cls.get_v4_class()
# fallback to IPv4 object if find nothing
return cls.get_v4_class()
@classmethod
def get_v4_class(cls):
return cls
@classmethod
def get_v6_class(cls):
return cls
|
infobloxopen/infoblox-client | infoblox_client/objects.py | InfobloxObject.to_dict | python | def to_dict(self, search_fields=None):
fields = self._fields
if search_fields == 'update':
fields = self._search_for_update_fields
elif search_fields == 'all':
fields = self._all_searchable_fields
elif search_fields == 'exclude':
# exclude search fields for update actions,
# but include updateable_search_fields
fields = [field for field in self._fields
if field in self._updateable_search_fields or
field not in self._search_for_update_fields]
return {field: self.field_to_dict(field) for field in fields
if getattr(self, field, None) is not None} | Builds dict without None object fields | train | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L269-L284 | null | class InfobloxObject(BaseObject):
"""Base class for all Infoblox related objects
_fields - fields that represents NIOS object (WAPI fields) and
are sent to NIOS on object creation
_search_for_update_fields - field/fields used to find an object during an
update operation. this should be the smallest number of fields that
uniquely identify an object
_all_searchable_fields - all fields that can be used to find object on NIOS
side
_updateable_search_fields - fields that can be used to find object on
NIOS side, but also can be changed, so has to be sent on update.
_shadow_fields - fields that object usually has but they should not
be sent to NIOS. These fields can be received from NIOS. Examples:
[_ref, is_default]
_return_fields - fields requested to be returned from NIOS side
if object is found/created
_infoblox_type - string representing wapi type of described object
_remap - dict that maps user faced names into internal
representation (_fields)
_custom_field_processing - dict that define rules (lambda) for building
objects from data returned by NIOS side.
Expected to be redefined in child class as needed,
_custom_field_processing has priority over _global_field_processing,
so can redefine for child class global rules
defined in _global_field_processing.
_global_field_processing - almost the same as _custom_field_processing,
but defines rules for building field on global level.
Fields defined in this dict will be processed in the same way in all
child classes. Is not expected to be redefined in child classes.
_ip_version - ip version of the object, used to mark version
specific classes. Value other than None indicates that
no versioned class lookup needed.
"""
_fields = []
_search_for_update_fields = []
_all_searchable_fields = []
_updateable_search_fields = []
_shadow_fields = []
_infoblox_type = None
_remap = {}
_return_fields = []
_custom_field_processing = {}
_global_field_processing = {'extattrs': EA.from_dict}
_ip_version = None
def __new__(cls, connector, **kwargs):
return super(InfobloxObject,
cls).__new__(cls.get_class_from_args(kwargs))
def __init__(self, connector, **kwargs):
self.connector = connector
super(InfobloxObject, self).__init__(**kwargs)
def update_from_dict(self, ip_dict, only_ref=False):
if only_ref:
self._ref = ip_dict['_ref']
return
mapped_args = self._remap_fields(ip_dict)
for field in self._fields + self._shadow_fields:
if field in ip_dict:
setattr(self, field, mapped_args[field])
@classmethod
def from_dict(cls, connector, ip_dict):
"""Build dict fields as SubObjects if needed.
Checks if lambda for building object from dict exists.
_global_field_processing and _custom_field_processing rules
are checked.
"""
mapping = cls._global_field_processing.copy()
mapping.update(cls._custom_field_processing)
# Process fields that require building themselves as objects
for field in mapping:
if field in ip_dict:
ip_dict[field] = mapping[field](ip_dict[field])
return cls(connector, **ip_dict)
@staticmethod
def value_to_dict(value):
return value.to_dict() if hasattr(value, 'to_dict') else value
def field_to_dict(self, field):
"""Read field value and converts to dict if possible"""
value = getattr(self, field)
if isinstance(value, (list, tuple)):
return [self.value_to_dict(val) for val in value]
return self.value_to_dict(value)
@staticmethod
def _object_from_reply(parse_class, connector, reply):
if not reply:
return None
if isinstance(reply, dict):
return parse_class.from_dict(connector, reply)
# If no return fields were requested reply contains only string
# with reference to object
return_dict = {'_ref': reply}
return parse_class.from_dict(connector, return_dict)
@classmethod
def create_check_exists(cls, connector, check_if_exists=True,
update_if_exists=False, **kwargs):
# obj_created is used to check if object is being created or
# pre-exists. obj_created is True if object is not pre-exists
# and getting created with this function call
obj_created = False
local_obj = cls(connector, **kwargs)
if check_if_exists:
if local_obj.fetch(only_ref=True):
LOG.info(("Infoblox %(obj_type)s already exists: "
"%(ib_obj)s"),
{'obj_type': local_obj.infoblox_type,
'ib_obj': local_obj})
if not update_if_exists:
return local_obj, obj_created
reply = None
if not local_obj.ref:
reply = connector.create_object(local_obj.infoblox_type,
local_obj.to_dict(),
local_obj.return_fields)
obj_created = True
LOG.info("Infoblox %(obj_type)s was created: %(ib_obj)s",
{'obj_type': local_obj.infoblox_type,
'ib_obj': local_obj})
elif update_if_exists:
update_fields = local_obj.to_dict(search_fields='exclude')
reply = connector.update_object(local_obj.ref,
update_fields,
local_obj.return_fields)
LOG.info('Infoblox object was updated: %s', local_obj.ref)
return cls._object_from_reply(local_obj, connector, reply), obj_created
@classmethod
def create(cls, connector, check_if_exists=True,
update_if_exists=False, **kwargs):
ib_object, _ = (
cls.create_check_exists(connector,
check_if_exists=check_if_exists,
update_if_exists=update_if_exists,
**kwargs))
return ib_object
@classmethod
def _search(cls, connector, return_fields=None,
search_extattrs=None, force_proxy=False,
max_results=None, **kwargs):
ib_obj_for_search = cls(connector, **kwargs)
search_dict = ib_obj_for_search.to_dict(search_fields='all')
if return_fields is None and ib_obj_for_search.return_fields:
return_fields = ib_obj_for_search.return_fields
# allow search_extattrs to be instance of EA class
# or dict in NIOS format
extattrs = search_extattrs
if hasattr(search_extattrs, 'to_dict'):
extattrs = search_extattrs.to_dict()
reply = connector.get_object(ib_obj_for_search.infoblox_type,
search_dict,
return_fields=return_fields,
extattrs=extattrs,
force_proxy=force_proxy,
max_results=max_results)
return reply, ib_obj_for_search
@classmethod
def search(cls, connector, **kwargs):
ib_obj, parse_class = cls._search(
connector, **kwargs)
if ib_obj:
return parse_class.from_dict(connector, ib_obj[0])
@classmethod
def search_all(cls, connector, **kwargs):
ib_objects, parsing_class = cls._search(
connector, **kwargs)
if ib_objects:
return [parsing_class.from_dict(connector, obj)
for obj in ib_objects]
return []
def fetch(self, only_ref=False):
"""Fetch object from NIOS by _ref or searchfields
Update existent object with fields returned from NIOS
Return True on successful object fetch
"""
if self.ref:
reply = self.connector.get_object(
self.ref, return_fields=self.return_fields)
if reply:
self.update_from_dict(reply)
return True
search_dict = self.to_dict(search_fields='update')
return_fields = [] if only_ref else self.return_fields
reply = self.connector.get_object(self.infoblox_type,
search_dict,
return_fields=return_fields)
if reply:
self.update_from_dict(reply[0], only_ref=only_ref)
return True
return False
def update(self):
update_fields = self.to_dict(search_fields='exclude')
ib_obj = self.connector.update_object(self.ref,
update_fields,
self.return_fields)
LOG.info('Infoblox object was updated: %s', self.ref)
return self._object_from_reply(self, self.connector, ib_obj)
def delete(self):
try:
self.connector.delete_object(self.ref)
except ib_ex.InfobloxCannotDeleteObject as e:
LOG.info("Failed to delete an object: %s", e)
@property
def infoblox_type(self):
return self._infoblox_type
@property
def return_fields(self):
return self._return_fields
@property
def ip_version(self):
return self._ip_version
@classmethod
def get_class_from_args(cls, kwargs):
# skip processing if cls already versioned class
if cls._ip_version:
return cls
for field in ['ip', 'cidr', 'start_ip', 'ip_address', 'network',
'start_addr', 'end_addr']:
if field in kwargs:
if ib_utils.determine_ip_version(kwargs[field]) == 6:
return cls.get_v6_class()
else:
return cls.get_v4_class()
# fallback to IPv4 object if find nothing
return cls.get_v4_class()
@classmethod
def get_v4_class(cls):
return cls
@classmethod
def get_v6_class(cls):
return cls
|
infobloxopen/infoblox-client | infoblox_client/objects.py | InfobloxObject.fetch | python | def fetch(self, only_ref=False):
if self.ref:
reply = self.connector.get_object(
self.ref, return_fields=self.return_fields)
if reply:
self.update_from_dict(reply)
return True
search_dict = self.to_dict(search_fields='update')
return_fields = [] if only_ref else self.return_fields
reply = self.connector.get_object(self.infoblox_type,
search_dict,
return_fields=return_fields)
if reply:
self.update_from_dict(reply[0], only_ref=only_ref)
return True
return False | Fetch object from NIOS by _ref or searchfields
Update existent object with fields returned from NIOS
Return True on successful object fetch | train | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L378-L399 | [
"def update_from_dict(self, ip_dict, only_ref=False):\n if only_ref:\n self._ref = ip_dict['_ref']\n return\n\n mapped_args = self._remap_fields(ip_dict)\n for field in self._fields + self._shadow_fields:\n if field in ip_dict:\n setattr(self, field, mapped_args[field])\n",
... | class InfobloxObject(BaseObject):
"""Base class for all Infoblox related objects
_fields - fields that represents NIOS object (WAPI fields) and
are sent to NIOS on object creation
_search_for_update_fields - field/fields used to find an object during an
update operation. this should be the smallest number of fields that
uniquely identify an object
_all_searchable_fields - all fields that can be used to find object on NIOS
side
_updateable_search_fields - fields that can be used to find object on
NIOS side, but also can be changed, so has to be sent on update.
_shadow_fields - fields that object usually has but they should not
be sent to NIOS. These fields can be received from NIOS. Examples:
[_ref, is_default]
_return_fields - fields requested to be returned from NIOS side
if object is found/created
_infoblox_type - string representing wapi type of described object
_remap - dict that maps user faced names into internal
representation (_fields)
_custom_field_processing - dict that define rules (lambda) for building
objects from data returned by NIOS side.
Expected to be redefined in child class as needed,
_custom_field_processing has priority over _global_field_processing,
so can redefine for child class global rules
defined in _global_field_processing.
_global_field_processing - almost the same as _custom_field_processing,
but defines rules for building field on global level.
Fields defined in this dict will be processed in the same way in all
child classes. Is not expected to be redefined in child classes.
_ip_version - ip version of the object, used to mark version
specific classes. Value other than None indicates that
no versioned class lookup needed.
"""
_fields = []
_search_for_update_fields = []
_all_searchable_fields = []
_updateable_search_fields = []
_shadow_fields = []
_infoblox_type = None
_remap = {}
_return_fields = []
_custom_field_processing = {}
_global_field_processing = {'extattrs': EA.from_dict}
_ip_version = None
def __new__(cls, connector, **kwargs):
return super(InfobloxObject,
cls).__new__(cls.get_class_from_args(kwargs))
def __init__(self, connector, **kwargs):
self.connector = connector
super(InfobloxObject, self).__init__(**kwargs)
def update_from_dict(self, ip_dict, only_ref=False):
if only_ref:
self._ref = ip_dict['_ref']
return
mapped_args = self._remap_fields(ip_dict)
for field in self._fields + self._shadow_fields:
if field in ip_dict:
setattr(self, field, mapped_args[field])
@classmethod
def from_dict(cls, connector, ip_dict):
"""Build dict fields as SubObjects if needed.
Checks if lambda for building object from dict exists.
_global_field_processing and _custom_field_processing rules
are checked.
"""
mapping = cls._global_field_processing.copy()
mapping.update(cls._custom_field_processing)
# Process fields that require building themselves as objects
for field in mapping:
if field in ip_dict:
ip_dict[field] = mapping[field](ip_dict[field])
return cls(connector, **ip_dict)
@staticmethod
def value_to_dict(value):
return value.to_dict() if hasattr(value, 'to_dict') else value
def field_to_dict(self, field):
"""Read field value and converts to dict if possible"""
value = getattr(self, field)
if isinstance(value, (list, tuple)):
return [self.value_to_dict(val) for val in value]
return self.value_to_dict(value)
def to_dict(self, search_fields=None):
"""Builds dict without None object fields"""
fields = self._fields
if search_fields == 'update':
fields = self._search_for_update_fields
elif search_fields == 'all':
fields = self._all_searchable_fields
elif search_fields == 'exclude':
# exclude search fields for update actions,
# but include updateable_search_fields
fields = [field for field in self._fields
if field in self._updateable_search_fields or
field not in self._search_for_update_fields]
return {field: self.field_to_dict(field) for field in fields
if getattr(self, field, None) is not None}
@staticmethod
def _object_from_reply(parse_class, connector, reply):
if not reply:
return None
if isinstance(reply, dict):
return parse_class.from_dict(connector, reply)
# If no return fields were requested reply contains only string
# with reference to object
return_dict = {'_ref': reply}
return parse_class.from_dict(connector, return_dict)
@classmethod
def create_check_exists(cls, connector, check_if_exists=True,
update_if_exists=False, **kwargs):
# obj_created is used to check if object is being created or
# pre-exists. obj_created is True if object is not pre-exists
# and getting created with this function call
obj_created = False
local_obj = cls(connector, **kwargs)
if check_if_exists:
if local_obj.fetch(only_ref=True):
LOG.info(("Infoblox %(obj_type)s already exists: "
"%(ib_obj)s"),
{'obj_type': local_obj.infoblox_type,
'ib_obj': local_obj})
if not update_if_exists:
return local_obj, obj_created
reply = None
if not local_obj.ref:
reply = connector.create_object(local_obj.infoblox_type,
local_obj.to_dict(),
local_obj.return_fields)
obj_created = True
LOG.info("Infoblox %(obj_type)s was created: %(ib_obj)s",
{'obj_type': local_obj.infoblox_type,
'ib_obj': local_obj})
elif update_if_exists:
update_fields = local_obj.to_dict(search_fields='exclude')
reply = connector.update_object(local_obj.ref,
update_fields,
local_obj.return_fields)
LOG.info('Infoblox object was updated: %s', local_obj.ref)
return cls._object_from_reply(local_obj, connector, reply), obj_created
@classmethod
def create(cls, connector, check_if_exists=True,
update_if_exists=False, **kwargs):
ib_object, _ = (
cls.create_check_exists(connector,
check_if_exists=check_if_exists,
update_if_exists=update_if_exists,
**kwargs))
return ib_object
@classmethod
def _search(cls, connector, return_fields=None,
search_extattrs=None, force_proxy=False,
max_results=None, **kwargs):
ib_obj_for_search = cls(connector, **kwargs)
search_dict = ib_obj_for_search.to_dict(search_fields='all')
if return_fields is None and ib_obj_for_search.return_fields:
return_fields = ib_obj_for_search.return_fields
# allow search_extattrs to be instance of EA class
# or dict in NIOS format
extattrs = search_extattrs
if hasattr(search_extattrs, 'to_dict'):
extattrs = search_extattrs.to_dict()
reply = connector.get_object(ib_obj_for_search.infoblox_type,
search_dict,
return_fields=return_fields,
extattrs=extattrs,
force_proxy=force_proxy,
max_results=max_results)
return reply, ib_obj_for_search
@classmethod
def search(cls, connector, **kwargs):
ib_obj, parse_class = cls._search(
connector, **kwargs)
if ib_obj:
return parse_class.from_dict(connector, ib_obj[0])
@classmethod
def search_all(cls, connector, **kwargs):
ib_objects, parsing_class = cls._search(
connector, **kwargs)
if ib_objects:
return [parsing_class.from_dict(connector, obj)
for obj in ib_objects]
return []
def update(self):
update_fields = self.to_dict(search_fields='exclude')
ib_obj = self.connector.update_object(self.ref,
update_fields,
self.return_fields)
LOG.info('Infoblox object was updated: %s', self.ref)
return self._object_from_reply(self, self.connector, ib_obj)
def delete(self):
try:
self.connector.delete_object(self.ref)
except ib_ex.InfobloxCannotDeleteObject as e:
LOG.info("Failed to delete an object: %s", e)
@property
def infoblox_type(self):
return self._infoblox_type
@property
def return_fields(self):
return self._return_fields
@property
def ip_version(self):
return self._ip_version
@classmethod
def get_class_from_args(cls, kwargs):
# skip processing if cls already versioned class
if cls._ip_version:
return cls
for field in ['ip', 'cidr', 'start_ip', 'ip_address', 'network',
'start_addr', 'end_addr']:
if field in kwargs:
if ib_utils.determine_ip_version(kwargs[field]) == 6:
return cls.get_v6_class()
else:
return cls.get_v4_class()
# fallback to IPv4 object if find nothing
return cls.get_v4_class()
@classmethod
def get_v4_class(cls):
return cls
@classmethod
def get_v6_class(cls):
return cls
|
infobloxopen/infoblox-client | infoblox_client/objects.py | HostRecord._ip_setter | python | def _ip_setter(self, ipaddr_name, ipaddrs_name, ips):
if isinstance(ips, six.string_types):
setattr(self, ipaddr_name, ips)
elif isinstance(ips, (list, tuple)) and isinstance(ips[0], IP):
setattr(self, ipaddr_name, ips[0].ip)
setattr(self, ipaddrs_name, ips)
elif isinstance(ips, IP):
setattr(self, ipaddr_name, ips.ip)
setattr(self, ipaddrs_name, [ips])
elif ips is None:
setattr(self, ipaddr_name, None)
setattr(self, ipaddrs_name, None)
else:
raise ValueError(
"Invalid format of ip passed in: %s."
"Should be string or list of NIOS IP objects." % ips) | Setter for ip fields
Accept as input string or list of IP instances.
String case:
only ipvXaddr is going to be filled, that is enough to perform
host record search using ip
List of IP instances case:
ipvXaddrs is going to be filled with ips content,
so create can be issues, since fully prepared IP objects in place.
ipXaddr is also filled to be able perform search on NIOS
and verify that no such host record exists yet. | train | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L527-L554 | null | class HostRecord(InfobloxObject):
"""Base class for HostRecords
HostRecord uses ipvXaddr for search and ipvXaddrs for object creation.
ipvXaddr and ipvXaddrs are quite different:
ipvXaddr is single ip as a string
ipvXaddrs is list of dicts with ipvXaddr, mac, configure_for_dhcp
and host keys.
In 'ipvXaddr' 'X' stands for 4 or 6 depending on ip version of the class.
To find HostRecord use next syntax:
hr = HostRecord.search(connector, ip='192.168.1.25', view='some-view')
To create host record create IP object first:
ip = IP(ip='192.168.1.25', mac='aa:ab;ce:12:23:34')
hr = HostRecord.create(connector, ip=ip, view='some-view')
"""
_infoblox_type = 'record:host'
@classmethod
def get_v4_class(cls):
return HostRecordV4
@classmethod
def get_v6_class(cls):
return HostRecordV6
|
infobloxopen/infoblox-client | infoblox_client/objects.py | FixedAddressV6.mac | python | def mac(self, mac):
self._mac = mac
if mac:
self.duid = ib_utils.generate_duid(mac)
elif not hasattr(self, 'duid'):
self.duid = None | Set mac and duid fields
To have common interface with FixedAddress accept mac address
and set duid as a side effect.
'mac' was added to _shadow_fields to prevent sending it out over wapi. | train | https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L821-L832 | [
"def generate_duid(mac):\n \"\"\"DUID is consisted of 10 hex numbers.\n\n 0x00 + mac with last 3 hex + mac with 6 hex\n \"\"\"\n valid = mac and isinstance(mac, six.string_types)\n if not valid:\n raise ValueError(\"Invalid argument was passed\")\n return \"00:\" + mac[9:] + \":\" + mac\n"
... | class FixedAddressV6(FixedAddress):
"""FixedAddress for IPv6"""
_infoblox_type = 'ipv6fixedaddress'
_fields = ['ipv6addr', 'duid', 'network_view', 'extattrs', 'network',
'comment']
_search_for_update_fields = ['ipv6addr', 'duid', 'network_view', 'network']
_all_searchable_fields = _search_for_update_fields
_return_fields = ['ipv6addr', 'duid', 'network_view', 'extattrs']
_shadow_fields = ['_ref', 'mac', 'ip']
_remap = {'ipv6addr': 'ip'}
_ip_version = 6
@property
def mac(self):
return self._mac
@mac.setter
|
chrislim2888/IP2Location-Python | IP2Location.py | IP2Location.open | python | def open(self, filename):
''' Opens a database file '''
# Ensure old file is closed before opening a new one
self.close()
self._f = open(filename, 'rb')
self._dbtype = struct.unpack('B', self._f.read(1))[0]
self._dbcolumn = struct.unpack('B', self._f.read(1))[0]
self._dbyear = struct.unpack('B', self._f.read(1))[0]
self._dbmonth = struct.unpack('B', self._f.read(1))[0]
self._dbday = struct.unpack('B', self._f.read(1))[0]
self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0] | Opens a database file | train | https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L105-L121 | [
"def close(self):\n if hasattr(self, '_f'):\n # If there is file close it.\n self._f.close()\n del self._f\n"
] | class IP2Location(object):
''' IP2Location database '''
def __init__(self, filename=None):
''' Creates a database object and opens a file if filename is given
'''
if filename:
self.open(filename)
def __enter__(self):
if not hasattr(self, '_f') or self._f.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if hasattr(self, '_f'):
# If there is file close it.
self._f.close()
del self._f
def get_country_short(self, ip):
''' Get country_short '''
rec = self.get_all(ip)
return rec and rec.country_short
def get_country_long(self, ip):
''' Get country_long '''
rec = self.get_all(ip)
return rec and rec.country_long
def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region
def get_city(self, ip):
''' Get city '''
rec = self.get_all(ip)
return rec and rec.city
def get_isp(self, ip):
''' Get isp '''
rec = self.get_all(ip)
return rec and rec.isp
def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude
def get_longitude(self, ip):
''' Get longitude '''
rec = self.get_all(ip)
return rec and rec.longitude
def get_domain(self, ip):
''' Get domain '''
rec = self.get_all(ip)
return rec and rec.domain
def get_zipcode(self, ip):
''' Get zipcode '''
rec = self.get_all(ip)
return rec and rec.zipcode
def get_timezone(self, ip):
''' Get timezone '''
rec = self.get_all(ip)
return rec and rec.timezone
def get_netspeed(self, ip):
''' Get netspeed '''
rec = self.get_all(ip)
return rec and rec.netspeed
def get_idd_code(self, ip):
''' Get idd_code '''
rec = self.get_all(ip)
return rec and rec.idd_code
def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code
def get_weather_code(self, ip):
''' Get weather_code '''
rec = self.get_all(ip)
return rec and rec.weather_code
def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc
def get_mnc(self, ip):
''' Get mnc '''
rec = self.get_all(ip)
return rec and rec.mnc
def get_mobile_brand(self, ip):
''' Get mobile_brand '''
rec = self.get_all(ip)
return rec and rec.mobile_brand
def get_elevation(self, ip):
''' Get elevation '''
rec = self.get_all(ip)
return rec and rec.elevation
def get_usage_type(self, ip):
''' Get usage_type '''
rec = self.get_all(ip)
return rec and rec.usage_type
def get_all(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def find(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def _reads(self, offset):
self._f.seek(offset - 1)
n = struct.unpack('B', self._f.read(1))[0]
#return u(self._f.read(n))
return self._f.read(n).decode('iso-8859-1').encode('utf-8')
def _readi(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<I', self._f.read(4))[0]
def _readf(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<f', self._f.read(4))[0]
def _readip(self, offset, ipv):
if ipv == 4:
return self._readi(offset)
elif ipv == 6:
a, b, c, d = self._readi(offset), self._readi(offset + 4), self._readi(offset + 8), self._readi(offset + 12)
return (d << 96) | (c << 64) | (b << 32) | a
def _readips(self, offset, ipv):
if ipv == 4:
return socket.inet_ntoa(struct.pack('!L', self._readi(offset)))
elif ipv == 6:
return str(self._readip(offset, ipv))
def _read_record(self, mid, ipv):
rec = IP2LocationRecord()
if ipv == 4:
off = 0
baseaddr = self._ipv4dbaddr
elif ipv == 6:
off = 12
baseaddr = self._ipv6dbaddr
rec.ip = self._readips(baseaddr + (mid) * self._dbcolumn * 4, ipv)
def calc_off(what, mid):
return baseaddr + mid * (self._dbcolumn * 4 + off) + off + 4 * (what[self._dbtype]-1)
if _COUNTRY_POSITION[self._dbtype] != 0:
rec.country_short = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 1)
rec.country_long = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 4)
if _REGION_POSITION[self._dbtype] != 0:
rec.region = self._reads(self._readi(calc_off(_REGION_POSITION, mid)) + 1)
if _CITY_POSITION[self._dbtype] != 0:
rec.city = self._reads(self._readi(calc_off(_CITY_POSITION, mid)) + 1)
if _ISP_POSITION[self._dbtype] != 0:
rec.isp = self._reads(self._readi(calc_off(_ISP_POSITION, mid)) + 1)
if _LATITUDE_POSITION[self._dbtype] != 0:
rec.latitude = round(self._readf(calc_off(_LATITUDE_POSITION, mid)), 6)
if _LONGITUDE_POSITION[self._dbtype] != 0:
rec.longitude = round(self._readf(calc_off(_LONGITUDE_POSITION, mid)), 6)
if _DOMAIN_POSITION[self._dbtype] != 0:
rec.domain = self._reads(self._readi(calc_off(_DOMAIN_POSITION, mid)) + 1)
if _ZIPCODE_POSITION[self._dbtype] != 0:
rec.zipcode = self._reads(self._readi(calc_off(_ZIPCODE_POSITION, mid)) + 1)
if _TIMEZONE_POSITION[self._dbtype] != 0:
rec.timezone = self._reads(self._readi(calc_off(_TIMEZONE_POSITION, mid)) + 1)
if _NETSPEED_POSITION[self._dbtype] != 0:
rec.netspeed = self._reads(self._readi(calc_off(_NETSPEED_POSITION, mid)) + 1)
if _IDDCODE_POSITION[self._dbtype] != 0:
rec.idd_code = self._reads(self._readi(calc_off(_IDDCODE_POSITION, mid)) + 1)
if _AREACODE_POSITION[self._dbtype] != 0:
rec.area_code = self._reads(self._readi(calc_off(_AREACODE_POSITION, mid)) + 1)
if _WEATHERSTATIONCODE_POSITION[self._dbtype] != 0:
rec.weather_code = self._reads(self._readi(calc_off(_WEATHERSTATIONCODE_POSITION, mid)) + 1)
if _WEATHERSTATIONNAME_POSITION[self._dbtype] != 0:
rec.weather_name = self._reads(self._readi(calc_off(_WEATHERSTATIONNAME_POSITION, mid)) + 1)
if _MCC_POSITION[self._dbtype] != 0:
rec.mcc = self._reads(self._readi(calc_off(_MCC_POSITION, mid)) + 1)
if _MNC_POSITION[self._dbtype] != 0:
rec.mnc = self._reads(self._readi(calc_off(_MNC_POSITION, mid)) + 1)
if _MOBILEBRAND_POSITION[self._dbtype] != 0:
rec.mobile_brand = self._reads(self._readi(calc_off(_MOBILEBRAND_POSITION, mid)) + 1)
if _ELEVATION_POSITION[self._dbtype] != 0:
rec.elevation = self._reads(self._readi(calc_off(_ELEVATION_POSITION, mid)) + 1)
if _USAGETYPE_POSITION[self._dbtype] != 0:
rec.usage_type = self._reads(self._readi(calc_off(_USAGETYPE_POSITION, mid)) + 1)
return rec
def __iter__(self):
low, high = 0, self._ipv4dbcount
while low <= high:
yield self._read_record(low, 4)
low += 1
low, high = 0, self._ipv6dbcount
while low <= high:
yield self._read_record(low, 6)
low += 1
def _parse_addr(self, addr):
''' Parses address and returns IP version. Raises exception on invalid argument '''
ipv = 0
try:
socket.inet_pton(socket.AF_INET6, addr)
# Convert ::FFFF:x.y.z.y to IPv4
if addr.lower().startswith('::ffff:'):
try:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
except:
ipv = 6
else:
ipv = 6
except:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
return ipv
def _get_record(self, ip):
low = 0
ipv = self._parse_addr(ip)
if ipv == 4:
ipno = struct.unpack('!L', socket.inet_pton(socket.AF_INET, ip))[0]
off = 0
baseaddr = self._ipv4dbaddr
high = self._ipv4dbcount
if self._ipv4indexbaseaddr > 0:
indexpos = ((ipno >> 16) << 3) + self._ipv4indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
elif ipv == 6:
if self._ipv6dbcount == 0:
raise ValueError('Please use IPv6 BIN file for IPv6 Address.')
a, b = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip))
ipno = (a << 64) | b
off = 12
baseaddr = self._ipv6dbaddr
high = self._ipv6dbcount
if self._ipv6indexbaseaddr > 0:
indexpos = ((ipno >> 112) << 3) + self._ipv6indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
while low <= high:
mid = int((low + high) / 2)
ipfrom = self._readip(baseaddr + (mid) * (self._dbcolumn * 4 + off), ipv)
ipto = self._readip(baseaddr + (mid + 1) * (self._dbcolumn * 4 + off), ipv)
if ipfrom <= ipno < ipto:
return self._read_record(mid, ipv)
else:
if ipno < ipfrom:
high = mid - 1
else:
low = mid + 1
|
chrislim2888/IP2Location-Python | IP2Location.py | IP2Location.get_country_short | python | def get_country_short(self, ip):
''' Get country_short '''
rec = self.get_all(ip)
return rec and rec.country_short | Get country_short | train | https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L129-L132 | [
"def get_all(self, addr):\n ''' Get the whole record with all fields read from the file\n\n Arguments:\n\n addr: IPv4 or IPv6 address as a string\n\n Returns IP2LocationRecord or None if address not found in file\n '''\n return self._get_record(addr)\n"
] | class IP2Location(object):
''' IP2Location database '''
def __init__(self, filename=None):
''' Creates a database object and opens a file if filename is given
'''
if filename:
self.open(filename)
def __enter__(self):
if not hasattr(self, '_f') or self._f.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open(self, filename):
''' Opens a database file '''
# Ensure old file is closed before opening a new one
self.close()
self._f = open(filename, 'rb')
self._dbtype = struct.unpack('B', self._f.read(1))[0]
self._dbcolumn = struct.unpack('B', self._f.read(1))[0]
self._dbyear = struct.unpack('B', self._f.read(1))[0]
self._dbmonth = struct.unpack('B', self._f.read(1))[0]
self._dbday = struct.unpack('B', self._f.read(1))[0]
self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
def close(self):
if hasattr(self, '_f'):
# If there is file close it.
self._f.close()
del self._f
def get_country_long(self, ip):
''' Get country_long '''
rec = self.get_all(ip)
return rec and rec.country_long
def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region
def get_city(self, ip):
''' Get city '''
rec = self.get_all(ip)
return rec and rec.city
def get_isp(self, ip):
''' Get isp '''
rec = self.get_all(ip)
return rec and rec.isp
def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude
def get_longitude(self, ip):
''' Get longitude '''
rec = self.get_all(ip)
return rec and rec.longitude
def get_domain(self, ip):
''' Get domain '''
rec = self.get_all(ip)
return rec and rec.domain
def get_zipcode(self, ip):
''' Get zipcode '''
rec = self.get_all(ip)
return rec and rec.zipcode
def get_timezone(self, ip):
''' Get timezone '''
rec = self.get_all(ip)
return rec and rec.timezone
def get_netspeed(self, ip):
''' Get netspeed '''
rec = self.get_all(ip)
return rec and rec.netspeed
def get_idd_code(self, ip):
''' Get idd_code '''
rec = self.get_all(ip)
return rec and rec.idd_code
def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code
def get_weather_code(self, ip):
''' Get weather_code '''
rec = self.get_all(ip)
return rec and rec.weather_code
def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc
def get_mnc(self, ip):
''' Get mnc '''
rec = self.get_all(ip)
return rec and rec.mnc
def get_mobile_brand(self, ip):
''' Get mobile_brand '''
rec = self.get_all(ip)
return rec and rec.mobile_brand
def get_elevation(self, ip):
''' Get elevation '''
rec = self.get_all(ip)
return rec and rec.elevation
def get_usage_type(self, ip):
''' Get usage_type '''
rec = self.get_all(ip)
return rec and rec.usage_type
def get_all(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def find(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def _reads(self, offset):
self._f.seek(offset - 1)
n = struct.unpack('B', self._f.read(1))[0]
#return u(self._f.read(n))
return self._f.read(n).decode('iso-8859-1').encode('utf-8')
def _readi(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<I', self._f.read(4))[0]
def _readf(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<f', self._f.read(4))[0]
def _readip(self, offset, ipv):
if ipv == 4:
return self._readi(offset)
elif ipv == 6:
a, b, c, d = self._readi(offset), self._readi(offset + 4), self._readi(offset + 8), self._readi(offset + 12)
return (d << 96) | (c << 64) | (b << 32) | a
def _readips(self, offset, ipv):
if ipv == 4:
return socket.inet_ntoa(struct.pack('!L', self._readi(offset)))
elif ipv == 6:
return str(self._readip(offset, ipv))
def _read_record(self, mid, ipv):
rec = IP2LocationRecord()
if ipv == 4:
off = 0
baseaddr = self._ipv4dbaddr
elif ipv == 6:
off = 12
baseaddr = self._ipv6dbaddr
rec.ip = self._readips(baseaddr + (mid) * self._dbcolumn * 4, ipv)
def calc_off(what, mid):
return baseaddr + mid * (self._dbcolumn * 4 + off) + off + 4 * (what[self._dbtype]-1)
if _COUNTRY_POSITION[self._dbtype] != 0:
rec.country_short = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 1)
rec.country_long = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 4)
if _REGION_POSITION[self._dbtype] != 0:
rec.region = self._reads(self._readi(calc_off(_REGION_POSITION, mid)) + 1)
if _CITY_POSITION[self._dbtype] != 0:
rec.city = self._reads(self._readi(calc_off(_CITY_POSITION, mid)) + 1)
if _ISP_POSITION[self._dbtype] != 0:
rec.isp = self._reads(self._readi(calc_off(_ISP_POSITION, mid)) + 1)
if _LATITUDE_POSITION[self._dbtype] != 0:
rec.latitude = round(self._readf(calc_off(_LATITUDE_POSITION, mid)), 6)
if _LONGITUDE_POSITION[self._dbtype] != 0:
rec.longitude = round(self._readf(calc_off(_LONGITUDE_POSITION, mid)), 6)
if _DOMAIN_POSITION[self._dbtype] != 0:
rec.domain = self._reads(self._readi(calc_off(_DOMAIN_POSITION, mid)) + 1)
if _ZIPCODE_POSITION[self._dbtype] != 0:
rec.zipcode = self._reads(self._readi(calc_off(_ZIPCODE_POSITION, mid)) + 1)
if _TIMEZONE_POSITION[self._dbtype] != 0:
rec.timezone = self._reads(self._readi(calc_off(_TIMEZONE_POSITION, mid)) + 1)
if _NETSPEED_POSITION[self._dbtype] != 0:
rec.netspeed = self._reads(self._readi(calc_off(_NETSPEED_POSITION, mid)) + 1)
if _IDDCODE_POSITION[self._dbtype] != 0:
rec.idd_code = self._reads(self._readi(calc_off(_IDDCODE_POSITION, mid)) + 1)
if _AREACODE_POSITION[self._dbtype] != 0:
rec.area_code = self._reads(self._readi(calc_off(_AREACODE_POSITION, mid)) + 1)
if _WEATHERSTATIONCODE_POSITION[self._dbtype] != 0:
rec.weather_code = self._reads(self._readi(calc_off(_WEATHERSTATIONCODE_POSITION, mid)) + 1)
if _WEATHERSTATIONNAME_POSITION[self._dbtype] != 0:
rec.weather_name = self._reads(self._readi(calc_off(_WEATHERSTATIONNAME_POSITION, mid)) + 1)
if _MCC_POSITION[self._dbtype] != 0:
rec.mcc = self._reads(self._readi(calc_off(_MCC_POSITION, mid)) + 1)
if _MNC_POSITION[self._dbtype] != 0:
rec.mnc = self._reads(self._readi(calc_off(_MNC_POSITION, mid)) + 1)
if _MOBILEBRAND_POSITION[self._dbtype] != 0:
rec.mobile_brand = self._reads(self._readi(calc_off(_MOBILEBRAND_POSITION, mid)) + 1)
if _ELEVATION_POSITION[self._dbtype] != 0:
rec.elevation = self._reads(self._readi(calc_off(_ELEVATION_POSITION, mid)) + 1)
if _USAGETYPE_POSITION[self._dbtype] != 0:
rec.usage_type = self._reads(self._readi(calc_off(_USAGETYPE_POSITION, mid)) + 1)
return rec
def __iter__(self):
low, high = 0, self._ipv4dbcount
while low <= high:
yield self._read_record(low, 4)
low += 1
low, high = 0, self._ipv6dbcount
while low <= high:
yield self._read_record(low, 6)
low += 1
def _parse_addr(self, addr):
''' Parses address and returns IP version. Raises exception on invalid argument '''
ipv = 0
try:
socket.inet_pton(socket.AF_INET6, addr)
# Convert ::FFFF:x.y.z.y to IPv4
if addr.lower().startswith('::ffff:'):
try:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
except:
ipv = 6
else:
ipv = 6
except:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
return ipv
def _get_record(self, ip):
low = 0
ipv = self._parse_addr(ip)
if ipv == 4:
ipno = struct.unpack('!L', socket.inet_pton(socket.AF_INET, ip))[0]
off = 0
baseaddr = self._ipv4dbaddr
high = self._ipv4dbcount
if self._ipv4indexbaseaddr > 0:
indexpos = ((ipno >> 16) << 3) + self._ipv4indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
elif ipv == 6:
if self._ipv6dbcount == 0:
raise ValueError('Please use IPv6 BIN file for IPv6 Address.')
a, b = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip))
ipno = (a << 64) | b
off = 12
baseaddr = self._ipv6dbaddr
high = self._ipv6dbcount
if self._ipv6indexbaseaddr > 0:
indexpos = ((ipno >> 112) << 3) + self._ipv6indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
while low <= high:
mid = int((low + high) / 2)
ipfrom = self._readip(baseaddr + (mid) * (self._dbcolumn * 4 + off), ipv)
ipto = self._readip(baseaddr + (mid + 1) * (self._dbcolumn * 4 + off), ipv)
if ipfrom <= ipno < ipto:
return self._read_record(mid, ipv)
else:
if ipno < ipfrom:
high = mid - 1
else:
low = mid + 1
|
chrislim2888/IP2Location-Python | IP2Location.py | IP2Location.get_country_long | python | def get_country_long(self, ip):
''' Get country_long '''
rec = self.get_all(ip)
return rec and rec.country_long | Get country_long | train | https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L133-L136 | [
"def get_all(self, addr):\n ''' Get the whole record with all fields read from the file\n\n Arguments:\n\n addr: IPv4 or IPv6 address as a string\n\n Returns IP2LocationRecord or None if address not found in file\n '''\n return self._get_record(addr)\n"
] | class IP2Location(object):
''' IP2Location database '''
def __init__(self, filename=None):
''' Creates a database object and opens a file if filename is given
'''
if filename:
self.open(filename)
def __enter__(self):
if not hasattr(self, '_f') or self._f.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open(self, filename):
''' Opens a database file '''
# Ensure old file is closed before opening a new one
self.close()
self._f = open(filename, 'rb')
self._dbtype = struct.unpack('B', self._f.read(1))[0]
self._dbcolumn = struct.unpack('B', self._f.read(1))[0]
self._dbyear = struct.unpack('B', self._f.read(1))[0]
self._dbmonth = struct.unpack('B', self._f.read(1))[0]
self._dbday = struct.unpack('B', self._f.read(1))[0]
self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
def close(self):
if hasattr(self, '_f'):
# If there is file close it.
self._f.close()
del self._f
def get_country_short(self, ip):
''' Get country_short '''
rec = self.get_all(ip)
return rec and rec.country_short
def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region
def get_city(self, ip):
''' Get city '''
rec = self.get_all(ip)
return rec and rec.city
def get_isp(self, ip):
''' Get isp '''
rec = self.get_all(ip)
return rec and rec.isp
def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude
def get_longitude(self, ip):
''' Get longitude '''
rec = self.get_all(ip)
return rec and rec.longitude
def get_domain(self, ip):
''' Get domain '''
rec = self.get_all(ip)
return rec and rec.domain
def get_zipcode(self, ip):
''' Get zipcode '''
rec = self.get_all(ip)
return rec and rec.zipcode
def get_timezone(self, ip):
''' Get timezone '''
rec = self.get_all(ip)
return rec and rec.timezone
def get_netspeed(self, ip):
''' Get netspeed '''
rec = self.get_all(ip)
return rec and rec.netspeed
def get_idd_code(self, ip):
''' Get idd_code '''
rec = self.get_all(ip)
return rec and rec.idd_code
def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code
def get_weather_code(self, ip):
''' Get weather_code '''
rec = self.get_all(ip)
return rec and rec.weather_code
def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc
def get_mnc(self, ip):
''' Get mnc '''
rec = self.get_all(ip)
return rec and rec.mnc
def get_mobile_brand(self, ip):
''' Get mobile_brand '''
rec = self.get_all(ip)
return rec and rec.mobile_brand
def get_elevation(self, ip):
''' Get elevation '''
rec = self.get_all(ip)
return rec and rec.elevation
def get_usage_type(self, ip):
''' Get usage_type '''
rec = self.get_all(ip)
return rec and rec.usage_type
def get_all(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def find(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def _reads(self, offset):
self._f.seek(offset - 1)
n = struct.unpack('B', self._f.read(1))[0]
#return u(self._f.read(n))
return self._f.read(n).decode('iso-8859-1').encode('utf-8')
def _readi(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<I', self._f.read(4))[0]
def _readf(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<f', self._f.read(4))[0]
def _readip(self, offset, ipv):
if ipv == 4:
return self._readi(offset)
elif ipv == 6:
a, b, c, d = self._readi(offset), self._readi(offset + 4), self._readi(offset + 8), self._readi(offset + 12)
return (d << 96) | (c << 64) | (b << 32) | a
def _readips(self, offset, ipv):
if ipv == 4:
return socket.inet_ntoa(struct.pack('!L', self._readi(offset)))
elif ipv == 6:
return str(self._readip(offset, ipv))
def _read_record(self, mid, ipv):
rec = IP2LocationRecord()
if ipv == 4:
off = 0
baseaddr = self._ipv4dbaddr
elif ipv == 6:
off = 12
baseaddr = self._ipv6dbaddr
rec.ip = self._readips(baseaddr + (mid) * self._dbcolumn * 4, ipv)
def calc_off(what, mid):
return baseaddr + mid * (self._dbcolumn * 4 + off) + off + 4 * (what[self._dbtype]-1)
if _COUNTRY_POSITION[self._dbtype] != 0:
rec.country_short = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 1)
rec.country_long = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 4)
if _REGION_POSITION[self._dbtype] != 0:
rec.region = self._reads(self._readi(calc_off(_REGION_POSITION, mid)) + 1)
if _CITY_POSITION[self._dbtype] != 0:
rec.city = self._reads(self._readi(calc_off(_CITY_POSITION, mid)) + 1)
if _ISP_POSITION[self._dbtype] != 0:
rec.isp = self._reads(self._readi(calc_off(_ISP_POSITION, mid)) + 1)
if _LATITUDE_POSITION[self._dbtype] != 0:
rec.latitude = round(self._readf(calc_off(_LATITUDE_POSITION, mid)), 6)
if _LONGITUDE_POSITION[self._dbtype] != 0:
rec.longitude = round(self._readf(calc_off(_LONGITUDE_POSITION, mid)), 6)
if _DOMAIN_POSITION[self._dbtype] != 0:
rec.domain = self._reads(self._readi(calc_off(_DOMAIN_POSITION, mid)) + 1)
if _ZIPCODE_POSITION[self._dbtype] != 0:
rec.zipcode = self._reads(self._readi(calc_off(_ZIPCODE_POSITION, mid)) + 1)
if _TIMEZONE_POSITION[self._dbtype] != 0:
rec.timezone = self._reads(self._readi(calc_off(_TIMEZONE_POSITION, mid)) + 1)
if _NETSPEED_POSITION[self._dbtype] != 0:
rec.netspeed = self._reads(self._readi(calc_off(_NETSPEED_POSITION, mid)) + 1)
if _IDDCODE_POSITION[self._dbtype] != 0:
rec.idd_code = self._reads(self._readi(calc_off(_IDDCODE_POSITION, mid)) + 1)
if _AREACODE_POSITION[self._dbtype] != 0:
rec.area_code = self._reads(self._readi(calc_off(_AREACODE_POSITION, mid)) + 1)
if _WEATHERSTATIONCODE_POSITION[self._dbtype] != 0:
rec.weather_code = self._reads(self._readi(calc_off(_WEATHERSTATIONCODE_POSITION, mid)) + 1)
if _WEATHERSTATIONNAME_POSITION[self._dbtype] != 0:
rec.weather_name = self._reads(self._readi(calc_off(_WEATHERSTATIONNAME_POSITION, mid)) + 1)
if _MCC_POSITION[self._dbtype] != 0:
rec.mcc = self._reads(self._readi(calc_off(_MCC_POSITION, mid)) + 1)
if _MNC_POSITION[self._dbtype] != 0:
rec.mnc = self._reads(self._readi(calc_off(_MNC_POSITION, mid)) + 1)
if _MOBILEBRAND_POSITION[self._dbtype] != 0:
rec.mobile_brand = self._reads(self._readi(calc_off(_MOBILEBRAND_POSITION, mid)) + 1)
if _ELEVATION_POSITION[self._dbtype] != 0:
rec.elevation = self._reads(self._readi(calc_off(_ELEVATION_POSITION, mid)) + 1)
if _USAGETYPE_POSITION[self._dbtype] != 0:
rec.usage_type = self._reads(self._readi(calc_off(_USAGETYPE_POSITION, mid)) + 1)
return rec
def __iter__(self):
low, high = 0, self._ipv4dbcount
while low <= high:
yield self._read_record(low, 4)
low += 1
low, high = 0, self._ipv6dbcount
while low <= high:
yield self._read_record(low, 6)
low += 1
def _parse_addr(self, addr):
''' Parses address and returns IP version. Raises exception on invalid argument '''
ipv = 0
try:
socket.inet_pton(socket.AF_INET6, addr)
# Convert ::FFFF:x.y.z.y to IPv4
if addr.lower().startswith('::ffff:'):
try:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
except:
ipv = 6
else:
ipv = 6
except:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
return ipv
def _get_record(self, ip):
low = 0
ipv = self._parse_addr(ip)
if ipv == 4:
ipno = struct.unpack('!L', socket.inet_pton(socket.AF_INET, ip))[0]
off = 0
baseaddr = self._ipv4dbaddr
high = self._ipv4dbcount
if self._ipv4indexbaseaddr > 0:
indexpos = ((ipno >> 16) << 3) + self._ipv4indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
elif ipv == 6:
if self._ipv6dbcount == 0:
raise ValueError('Please use IPv6 BIN file for IPv6 Address.')
a, b = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip))
ipno = (a << 64) | b
off = 12
baseaddr = self._ipv6dbaddr
high = self._ipv6dbcount
if self._ipv6indexbaseaddr > 0:
indexpos = ((ipno >> 112) << 3) + self._ipv6indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
while low <= high:
mid = int((low + high) / 2)
ipfrom = self._readip(baseaddr + (mid) * (self._dbcolumn * 4 + off), ipv)
ipto = self._readip(baseaddr + (mid + 1) * (self._dbcolumn * 4 + off), ipv)
if ipfrom <= ipno < ipto:
return self._read_record(mid, ipv)
else:
if ipno < ipfrom:
high = mid - 1
else:
low = mid + 1
|
chrislim2888/IP2Location-Python | IP2Location.py | IP2Location.get_region | python | def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region | Get region | train | https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L137-L140 | [
"def get_all(self, addr):\n ''' Get the whole record with all fields read from the file\n\n Arguments:\n\n addr: IPv4 or IPv6 address as a string\n\n Returns IP2LocationRecord or None if address not found in file\n '''\n return self._get_record(addr)\n"
] | class IP2Location(object):
''' IP2Location database '''
def __init__(self, filename=None):
''' Creates a database object and opens a file if filename is given
'''
if filename:
self.open(filename)
def __enter__(self):
if not hasattr(self, '_f') or self._f.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open(self, filename):
''' Opens a database file '''
# Ensure old file is closed before opening a new one
self.close()
self._f = open(filename, 'rb')
self._dbtype = struct.unpack('B', self._f.read(1))[0]
self._dbcolumn = struct.unpack('B', self._f.read(1))[0]
self._dbyear = struct.unpack('B', self._f.read(1))[0]
self._dbmonth = struct.unpack('B', self._f.read(1))[0]
self._dbday = struct.unpack('B', self._f.read(1))[0]
self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
def close(self):
if hasattr(self, '_f'):
# If there is file close it.
self._f.close()
del self._f
def get_country_short(self, ip):
''' Get country_short '''
rec = self.get_all(ip)
return rec and rec.country_short
def get_country_long(self, ip):
''' Get country_long '''
rec = self.get_all(ip)
return rec and rec.country_long
def get_city(self, ip):
''' Get city '''
rec = self.get_all(ip)
return rec and rec.city
def get_isp(self, ip):
''' Get isp '''
rec = self.get_all(ip)
return rec and rec.isp
def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude
def get_longitude(self, ip):
''' Get longitude '''
rec = self.get_all(ip)
return rec and rec.longitude
def get_domain(self, ip):
''' Get domain '''
rec = self.get_all(ip)
return rec and rec.domain
def get_zipcode(self, ip):
''' Get zipcode '''
rec = self.get_all(ip)
return rec and rec.zipcode
def get_timezone(self, ip):
''' Get timezone '''
rec = self.get_all(ip)
return rec and rec.timezone
def get_netspeed(self, ip):
''' Get netspeed '''
rec = self.get_all(ip)
return rec and rec.netspeed
def get_idd_code(self, ip):
''' Get idd_code '''
rec = self.get_all(ip)
return rec and rec.idd_code
def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code
def get_weather_code(self, ip):
''' Get weather_code '''
rec = self.get_all(ip)
return rec and rec.weather_code
def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc
def get_mnc(self, ip):
''' Get mnc '''
rec = self.get_all(ip)
return rec and rec.mnc
def get_mobile_brand(self, ip):
''' Get mobile_brand '''
rec = self.get_all(ip)
return rec and rec.mobile_brand
def get_elevation(self, ip):
''' Get elevation '''
rec = self.get_all(ip)
return rec and rec.elevation
def get_usage_type(self, ip):
''' Get usage_type '''
rec = self.get_all(ip)
return rec and rec.usage_type
def get_all(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def find(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def _reads(self, offset):
self._f.seek(offset - 1)
n = struct.unpack('B', self._f.read(1))[0]
#return u(self._f.read(n))
return self._f.read(n).decode('iso-8859-1').encode('utf-8')
def _readi(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<I', self._f.read(4))[0]
def _readf(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<f', self._f.read(4))[0]
def _readip(self, offset, ipv):
if ipv == 4:
return self._readi(offset)
elif ipv == 6:
a, b, c, d = self._readi(offset), self._readi(offset + 4), self._readi(offset + 8), self._readi(offset + 12)
return (d << 96) | (c << 64) | (b << 32) | a
def _readips(self, offset, ipv):
if ipv == 4:
return socket.inet_ntoa(struct.pack('!L', self._readi(offset)))
elif ipv == 6:
return str(self._readip(offset, ipv))
def _read_record(self, mid, ipv):
rec = IP2LocationRecord()
if ipv == 4:
off = 0
baseaddr = self._ipv4dbaddr
elif ipv == 6:
off = 12
baseaddr = self._ipv6dbaddr
rec.ip = self._readips(baseaddr + (mid) * self._dbcolumn * 4, ipv)
def calc_off(what, mid):
return baseaddr + mid * (self._dbcolumn * 4 + off) + off + 4 * (what[self._dbtype]-1)
if _COUNTRY_POSITION[self._dbtype] != 0:
rec.country_short = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 1)
rec.country_long = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 4)
if _REGION_POSITION[self._dbtype] != 0:
rec.region = self._reads(self._readi(calc_off(_REGION_POSITION, mid)) + 1)
if _CITY_POSITION[self._dbtype] != 0:
rec.city = self._reads(self._readi(calc_off(_CITY_POSITION, mid)) + 1)
if _ISP_POSITION[self._dbtype] != 0:
rec.isp = self._reads(self._readi(calc_off(_ISP_POSITION, mid)) + 1)
if _LATITUDE_POSITION[self._dbtype] != 0:
rec.latitude = round(self._readf(calc_off(_LATITUDE_POSITION, mid)), 6)
if _LONGITUDE_POSITION[self._dbtype] != 0:
rec.longitude = round(self._readf(calc_off(_LONGITUDE_POSITION, mid)), 6)
if _DOMAIN_POSITION[self._dbtype] != 0:
rec.domain = self._reads(self._readi(calc_off(_DOMAIN_POSITION, mid)) + 1)
if _ZIPCODE_POSITION[self._dbtype] != 0:
rec.zipcode = self._reads(self._readi(calc_off(_ZIPCODE_POSITION, mid)) + 1)
if _TIMEZONE_POSITION[self._dbtype] != 0:
rec.timezone = self._reads(self._readi(calc_off(_TIMEZONE_POSITION, mid)) + 1)
if _NETSPEED_POSITION[self._dbtype] != 0:
rec.netspeed = self._reads(self._readi(calc_off(_NETSPEED_POSITION, mid)) + 1)
if _IDDCODE_POSITION[self._dbtype] != 0:
rec.idd_code = self._reads(self._readi(calc_off(_IDDCODE_POSITION, mid)) + 1)
if _AREACODE_POSITION[self._dbtype] != 0:
rec.area_code = self._reads(self._readi(calc_off(_AREACODE_POSITION, mid)) + 1)
if _WEATHERSTATIONCODE_POSITION[self._dbtype] != 0:
rec.weather_code = self._reads(self._readi(calc_off(_WEATHERSTATIONCODE_POSITION, mid)) + 1)
if _WEATHERSTATIONNAME_POSITION[self._dbtype] != 0:
rec.weather_name = self._reads(self._readi(calc_off(_WEATHERSTATIONNAME_POSITION, mid)) + 1)
if _MCC_POSITION[self._dbtype] != 0:
rec.mcc = self._reads(self._readi(calc_off(_MCC_POSITION, mid)) + 1)
if _MNC_POSITION[self._dbtype] != 0:
rec.mnc = self._reads(self._readi(calc_off(_MNC_POSITION, mid)) + 1)
if _MOBILEBRAND_POSITION[self._dbtype] != 0:
rec.mobile_brand = self._reads(self._readi(calc_off(_MOBILEBRAND_POSITION, mid)) + 1)
if _ELEVATION_POSITION[self._dbtype] != 0:
rec.elevation = self._reads(self._readi(calc_off(_ELEVATION_POSITION, mid)) + 1)
if _USAGETYPE_POSITION[self._dbtype] != 0:
rec.usage_type = self._reads(self._readi(calc_off(_USAGETYPE_POSITION, mid)) + 1)
return rec
def __iter__(self):
low, high = 0, self._ipv4dbcount
while low <= high:
yield self._read_record(low, 4)
low += 1
low, high = 0, self._ipv6dbcount
while low <= high:
yield self._read_record(low, 6)
low += 1
def _parse_addr(self, addr):
''' Parses address and returns IP version. Raises exception on invalid argument '''
ipv = 0
try:
socket.inet_pton(socket.AF_INET6, addr)
# Convert ::FFFF:x.y.z.y to IPv4
if addr.lower().startswith('::ffff:'):
try:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
except:
ipv = 6
else:
ipv = 6
except:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
return ipv
def _get_record(self, ip):
low = 0
ipv = self._parse_addr(ip)
if ipv == 4:
ipno = struct.unpack('!L', socket.inet_pton(socket.AF_INET, ip))[0]
off = 0
baseaddr = self._ipv4dbaddr
high = self._ipv4dbcount
if self._ipv4indexbaseaddr > 0:
indexpos = ((ipno >> 16) << 3) + self._ipv4indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
elif ipv == 6:
if self._ipv6dbcount == 0:
raise ValueError('Please use IPv6 BIN file for IPv6 Address.')
a, b = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip))
ipno = (a << 64) | b
off = 12
baseaddr = self._ipv6dbaddr
high = self._ipv6dbcount
if self._ipv6indexbaseaddr > 0:
indexpos = ((ipno >> 112) << 3) + self._ipv6indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
while low <= high:
mid = int((low + high) / 2)
ipfrom = self._readip(baseaddr + (mid) * (self._dbcolumn * 4 + off), ipv)
ipto = self._readip(baseaddr + (mid + 1) * (self._dbcolumn * 4 + off), ipv)
if ipfrom <= ipno < ipto:
return self._read_record(mid, ipv)
else:
if ipno < ipfrom:
high = mid - 1
else:
low = mid + 1
|
chrislim2888/IP2Location-Python | IP2Location.py | IP2Location.get_city | python | def get_city(self, ip):
''' Get city '''
rec = self.get_all(ip)
return rec and rec.city | Get city | train | https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L141-L144 | [
"def get_all(self, addr):\n ''' Get the whole record with all fields read from the file\n\n Arguments:\n\n addr: IPv4 or IPv6 address as a string\n\n Returns IP2LocationRecord or None if address not found in file\n '''\n return self._get_record(addr)\n"
] | class IP2Location(object):
''' IP2Location database '''
def __init__(self, filename=None):
''' Creates a database object and opens a file if filename is given
'''
if filename:
self.open(filename)
def __enter__(self):
if not hasattr(self, '_f') or self._f.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open(self, filename):
''' Opens a database file '''
# Ensure old file is closed before opening a new one
self.close()
self._f = open(filename, 'rb')
self._dbtype = struct.unpack('B', self._f.read(1))[0]
self._dbcolumn = struct.unpack('B', self._f.read(1))[0]
self._dbyear = struct.unpack('B', self._f.read(1))[0]
self._dbmonth = struct.unpack('B', self._f.read(1))[0]
self._dbday = struct.unpack('B', self._f.read(1))[0]
self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
def close(self):
if hasattr(self, '_f'):
# If there is file close it.
self._f.close()
del self._f
def get_country_short(self, ip):
''' Get country_short '''
rec = self.get_all(ip)
return rec and rec.country_short
def get_country_long(self, ip):
''' Get country_long '''
rec = self.get_all(ip)
return rec and rec.country_long
def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region
def get_isp(self, ip):
''' Get isp '''
rec = self.get_all(ip)
return rec and rec.isp
def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude
def get_longitude(self, ip):
''' Get longitude '''
rec = self.get_all(ip)
return rec and rec.longitude
def get_domain(self, ip):
''' Get domain '''
rec = self.get_all(ip)
return rec and rec.domain
def get_zipcode(self, ip):
''' Get zipcode '''
rec = self.get_all(ip)
return rec and rec.zipcode
def get_timezone(self, ip):
''' Get timezone '''
rec = self.get_all(ip)
return rec and rec.timezone
def get_netspeed(self, ip):
''' Get netspeed '''
rec = self.get_all(ip)
return rec and rec.netspeed
def get_idd_code(self, ip):
''' Get idd_code '''
rec = self.get_all(ip)
return rec and rec.idd_code
def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code
def get_weather_code(self, ip):
''' Get weather_code '''
rec = self.get_all(ip)
return rec and rec.weather_code
def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc
def get_mnc(self, ip):
''' Get mnc '''
rec = self.get_all(ip)
return rec and rec.mnc
def get_mobile_brand(self, ip):
''' Get mobile_brand '''
rec = self.get_all(ip)
return rec and rec.mobile_brand
def get_elevation(self, ip):
''' Get elevation '''
rec = self.get_all(ip)
return rec and rec.elevation
def get_usage_type(self, ip):
''' Get usage_type '''
rec = self.get_all(ip)
return rec and rec.usage_type
def get_all(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def find(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def _reads(self, offset):
self._f.seek(offset - 1)
n = struct.unpack('B', self._f.read(1))[0]
#return u(self._f.read(n))
return self._f.read(n).decode('iso-8859-1').encode('utf-8')
def _readi(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<I', self._f.read(4))[0]
def _readf(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<f', self._f.read(4))[0]
def _readip(self, offset, ipv):
if ipv == 4:
return self._readi(offset)
elif ipv == 6:
a, b, c, d = self._readi(offset), self._readi(offset + 4), self._readi(offset + 8), self._readi(offset + 12)
return (d << 96) | (c << 64) | (b << 32) | a
def _readips(self, offset, ipv):
if ipv == 4:
return socket.inet_ntoa(struct.pack('!L', self._readi(offset)))
elif ipv == 6:
return str(self._readip(offset, ipv))
def _read_record(self, mid, ipv):
rec = IP2LocationRecord()
if ipv == 4:
off = 0
baseaddr = self._ipv4dbaddr
elif ipv == 6:
off = 12
baseaddr = self._ipv6dbaddr
rec.ip = self._readips(baseaddr + (mid) * self._dbcolumn * 4, ipv)
def calc_off(what, mid):
return baseaddr + mid * (self._dbcolumn * 4 + off) + off + 4 * (what[self._dbtype]-1)
if _COUNTRY_POSITION[self._dbtype] != 0:
rec.country_short = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 1)
rec.country_long = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 4)
if _REGION_POSITION[self._dbtype] != 0:
rec.region = self._reads(self._readi(calc_off(_REGION_POSITION, mid)) + 1)
if _CITY_POSITION[self._dbtype] != 0:
rec.city = self._reads(self._readi(calc_off(_CITY_POSITION, mid)) + 1)
if _ISP_POSITION[self._dbtype] != 0:
rec.isp = self._reads(self._readi(calc_off(_ISP_POSITION, mid)) + 1)
if _LATITUDE_POSITION[self._dbtype] != 0:
rec.latitude = round(self._readf(calc_off(_LATITUDE_POSITION, mid)), 6)
if _LONGITUDE_POSITION[self._dbtype] != 0:
rec.longitude = round(self._readf(calc_off(_LONGITUDE_POSITION, mid)), 6)
if _DOMAIN_POSITION[self._dbtype] != 0:
rec.domain = self._reads(self._readi(calc_off(_DOMAIN_POSITION, mid)) + 1)
if _ZIPCODE_POSITION[self._dbtype] != 0:
rec.zipcode = self._reads(self._readi(calc_off(_ZIPCODE_POSITION, mid)) + 1)
if _TIMEZONE_POSITION[self._dbtype] != 0:
rec.timezone = self._reads(self._readi(calc_off(_TIMEZONE_POSITION, mid)) + 1)
if _NETSPEED_POSITION[self._dbtype] != 0:
rec.netspeed = self._reads(self._readi(calc_off(_NETSPEED_POSITION, mid)) + 1)
if _IDDCODE_POSITION[self._dbtype] != 0:
rec.idd_code = self._reads(self._readi(calc_off(_IDDCODE_POSITION, mid)) + 1)
if _AREACODE_POSITION[self._dbtype] != 0:
rec.area_code = self._reads(self._readi(calc_off(_AREACODE_POSITION, mid)) + 1)
if _WEATHERSTATIONCODE_POSITION[self._dbtype] != 0:
rec.weather_code = self._reads(self._readi(calc_off(_WEATHERSTATIONCODE_POSITION, mid)) + 1)
if _WEATHERSTATIONNAME_POSITION[self._dbtype] != 0:
rec.weather_name = self._reads(self._readi(calc_off(_WEATHERSTATIONNAME_POSITION, mid)) + 1)
if _MCC_POSITION[self._dbtype] != 0:
rec.mcc = self._reads(self._readi(calc_off(_MCC_POSITION, mid)) + 1)
if _MNC_POSITION[self._dbtype] != 0:
rec.mnc = self._reads(self._readi(calc_off(_MNC_POSITION, mid)) + 1)
if _MOBILEBRAND_POSITION[self._dbtype] != 0:
rec.mobile_brand = self._reads(self._readi(calc_off(_MOBILEBRAND_POSITION, mid)) + 1)
if _ELEVATION_POSITION[self._dbtype] != 0:
rec.elevation = self._reads(self._readi(calc_off(_ELEVATION_POSITION, mid)) + 1)
if _USAGETYPE_POSITION[self._dbtype] != 0:
rec.usage_type = self._reads(self._readi(calc_off(_USAGETYPE_POSITION, mid)) + 1)
return rec
def __iter__(self):
low, high = 0, self._ipv4dbcount
while low <= high:
yield self._read_record(low, 4)
low += 1
low, high = 0, self._ipv6dbcount
while low <= high:
yield self._read_record(low, 6)
low += 1
def _parse_addr(self, addr):
''' Parses address and returns IP version. Raises exception on invalid argument '''
ipv = 0
try:
socket.inet_pton(socket.AF_INET6, addr)
# Convert ::FFFF:x.y.z.y to IPv4
if addr.lower().startswith('::ffff:'):
try:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
except:
ipv = 6
else:
ipv = 6
except:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
return ipv
def _get_record(self, ip):
low = 0
ipv = self._parse_addr(ip)
if ipv == 4:
ipno = struct.unpack('!L', socket.inet_pton(socket.AF_INET, ip))[0]
off = 0
baseaddr = self._ipv4dbaddr
high = self._ipv4dbcount
if self._ipv4indexbaseaddr > 0:
indexpos = ((ipno >> 16) << 3) + self._ipv4indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
elif ipv == 6:
if self._ipv6dbcount == 0:
raise ValueError('Please use IPv6 BIN file for IPv6 Address.')
a, b = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip))
ipno = (a << 64) | b
off = 12
baseaddr = self._ipv6dbaddr
high = self._ipv6dbcount
if self._ipv6indexbaseaddr > 0:
indexpos = ((ipno >> 112) << 3) + self._ipv6indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
while low <= high:
mid = int((low + high) / 2)
ipfrom = self._readip(baseaddr + (mid) * (self._dbcolumn * 4 + off), ipv)
ipto = self._readip(baseaddr + (mid + 1) * (self._dbcolumn * 4 + off), ipv)
if ipfrom <= ipno < ipto:
return self._read_record(mid, ipv)
else:
if ipno < ipfrom:
high = mid - 1
else:
low = mid + 1
|
chrislim2888/IP2Location-Python | IP2Location.py | IP2Location.get_isp | python | def get_isp(self, ip):
''' Get isp '''
rec = self.get_all(ip)
return rec and rec.isp | Get isp | train | https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L145-L148 | [
"def get_all(self, addr):\n ''' Get the whole record with all fields read from the file\n\n Arguments:\n\n addr: IPv4 or IPv6 address as a string\n\n Returns IP2LocationRecord or None if address not found in file\n '''\n return self._get_record(addr)\n"
] | class IP2Location(object):
''' IP2Location database '''
def __init__(self, filename=None):
''' Creates a database object and opens a file if filename is given
'''
if filename:
self.open(filename)
def __enter__(self):
if not hasattr(self, '_f') or self._f.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open(self, filename):
''' Opens a database file '''
# Ensure old file is closed before opening a new one
self.close()
self._f = open(filename, 'rb')
self._dbtype = struct.unpack('B', self._f.read(1))[0]
self._dbcolumn = struct.unpack('B', self._f.read(1))[0]
self._dbyear = struct.unpack('B', self._f.read(1))[0]
self._dbmonth = struct.unpack('B', self._f.read(1))[0]
self._dbday = struct.unpack('B', self._f.read(1))[0]
self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
def close(self):
if hasattr(self, '_f'):
# If there is file close it.
self._f.close()
del self._f
def get_country_short(self, ip):
''' Get country_short '''
rec = self.get_all(ip)
return rec and rec.country_short
def get_country_long(self, ip):
''' Get country_long '''
rec = self.get_all(ip)
return rec and rec.country_long
def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region
def get_city(self, ip):
''' Get city '''
rec = self.get_all(ip)
return rec and rec.city
def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude
def get_longitude(self, ip):
''' Get longitude '''
rec = self.get_all(ip)
return rec and rec.longitude
def get_domain(self, ip):
''' Get domain '''
rec = self.get_all(ip)
return rec and rec.domain
def get_zipcode(self, ip):
''' Get zipcode '''
rec = self.get_all(ip)
return rec and rec.zipcode
def get_timezone(self, ip):
''' Get timezone '''
rec = self.get_all(ip)
return rec and rec.timezone
def get_netspeed(self, ip):
''' Get netspeed '''
rec = self.get_all(ip)
return rec and rec.netspeed
def get_idd_code(self, ip):
''' Get idd_code '''
rec = self.get_all(ip)
return rec and rec.idd_code
def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code
def get_weather_code(self, ip):
''' Get weather_code '''
rec = self.get_all(ip)
return rec and rec.weather_code
def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc
def get_mnc(self, ip):
''' Get mnc '''
rec = self.get_all(ip)
return rec and rec.mnc
def get_mobile_brand(self, ip):
''' Get mobile_brand '''
rec = self.get_all(ip)
return rec and rec.mobile_brand
def get_elevation(self, ip):
''' Get elevation '''
rec = self.get_all(ip)
return rec and rec.elevation
def get_usage_type(self, ip):
''' Get usage_type '''
rec = self.get_all(ip)
return rec and rec.usage_type
def get_all(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def find(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def _reads(self, offset):
self._f.seek(offset - 1)
n = struct.unpack('B', self._f.read(1))[0]
#return u(self._f.read(n))
return self._f.read(n).decode('iso-8859-1').encode('utf-8')
def _readi(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<I', self._f.read(4))[0]
def _readf(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<f', self._f.read(4))[0]
def _readip(self, offset, ipv):
if ipv == 4:
return self._readi(offset)
elif ipv == 6:
a, b, c, d = self._readi(offset), self._readi(offset + 4), self._readi(offset + 8), self._readi(offset + 12)
return (d << 96) | (c << 64) | (b << 32) | a
def _readips(self, offset, ipv):
if ipv == 4:
return socket.inet_ntoa(struct.pack('!L', self._readi(offset)))
elif ipv == 6:
return str(self._readip(offset, ipv))
def _read_record(self, mid, ipv):
rec = IP2LocationRecord()
if ipv == 4:
off = 0
baseaddr = self._ipv4dbaddr
elif ipv == 6:
off = 12
baseaddr = self._ipv6dbaddr
rec.ip = self._readips(baseaddr + (mid) * self._dbcolumn * 4, ipv)
def calc_off(what, mid):
return baseaddr + mid * (self._dbcolumn * 4 + off) + off + 4 * (what[self._dbtype]-1)
if _COUNTRY_POSITION[self._dbtype] != 0:
rec.country_short = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 1)
rec.country_long = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 4)
if _REGION_POSITION[self._dbtype] != 0:
rec.region = self._reads(self._readi(calc_off(_REGION_POSITION, mid)) + 1)
if _CITY_POSITION[self._dbtype] != 0:
rec.city = self._reads(self._readi(calc_off(_CITY_POSITION, mid)) + 1)
if _ISP_POSITION[self._dbtype] != 0:
rec.isp = self._reads(self._readi(calc_off(_ISP_POSITION, mid)) + 1)
if _LATITUDE_POSITION[self._dbtype] != 0:
rec.latitude = round(self._readf(calc_off(_LATITUDE_POSITION, mid)), 6)
if _LONGITUDE_POSITION[self._dbtype] != 0:
rec.longitude = round(self._readf(calc_off(_LONGITUDE_POSITION, mid)), 6)
if _DOMAIN_POSITION[self._dbtype] != 0:
rec.domain = self._reads(self._readi(calc_off(_DOMAIN_POSITION, mid)) + 1)
if _ZIPCODE_POSITION[self._dbtype] != 0:
rec.zipcode = self._reads(self._readi(calc_off(_ZIPCODE_POSITION, mid)) + 1)
if _TIMEZONE_POSITION[self._dbtype] != 0:
rec.timezone = self._reads(self._readi(calc_off(_TIMEZONE_POSITION, mid)) + 1)
if _NETSPEED_POSITION[self._dbtype] != 0:
rec.netspeed = self._reads(self._readi(calc_off(_NETSPEED_POSITION, mid)) + 1)
if _IDDCODE_POSITION[self._dbtype] != 0:
rec.idd_code = self._reads(self._readi(calc_off(_IDDCODE_POSITION, mid)) + 1)
if _AREACODE_POSITION[self._dbtype] != 0:
rec.area_code = self._reads(self._readi(calc_off(_AREACODE_POSITION, mid)) + 1)
if _WEATHERSTATIONCODE_POSITION[self._dbtype] != 0:
rec.weather_code = self._reads(self._readi(calc_off(_WEATHERSTATIONCODE_POSITION, mid)) + 1)
if _WEATHERSTATIONNAME_POSITION[self._dbtype] != 0:
rec.weather_name = self._reads(self._readi(calc_off(_WEATHERSTATIONNAME_POSITION, mid)) + 1)
if _MCC_POSITION[self._dbtype] != 0:
rec.mcc = self._reads(self._readi(calc_off(_MCC_POSITION, mid)) + 1)
if _MNC_POSITION[self._dbtype] != 0:
rec.mnc = self._reads(self._readi(calc_off(_MNC_POSITION, mid)) + 1)
if _MOBILEBRAND_POSITION[self._dbtype] != 0:
rec.mobile_brand = self._reads(self._readi(calc_off(_MOBILEBRAND_POSITION, mid)) + 1)
if _ELEVATION_POSITION[self._dbtype] != 0:
rec.elevation = self._reads(self._readi(calc_off(_ELEVATION_POSITION, mid)) + 1)
if _USAGETYPE_POSITION[self._dbtype] != 0:
rec.usage_type = self._reads(self._readi(calc_off(_USAGETYPE_POSITION, mid)) + 1)
return rec
def __iter__(self):
low, high = 0, self._ipv4dbcount
while low <= high:
yield self._read_record(low, 4)
low += 1
low, high = 0, self._ipv6dbcount
while low <= high:
yield self._read_record(low, 6)
low += 1
def _parse_addr(self, addr):
''' Parses address and returns IP version. Raises exception on invalid argument '''
ipv = 0
try:
socket.inet_pton(socket.AF_INET6, addr)
# Convert ::FFFF:x.y.z.y to IPv4
if addr.lower().startswith('::ffff:'):
try:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
except:
ipv = 6
else:
ipv = 6
except:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
return ipv
def _get_record(self, ip):
low = 0
ipv = self._parse_addr(ip)
if ipv == 4:
ipno = struct.unpack('!L', socket.inet_pton(socket.AF_INET, ip))[0]
off = 0
baseaddr = self._ipv4dbaddr
high = self._ipv4dbcount
if self._ipv4indexbaseaddr > 0:
indexpos = ((ipno >> 16) << 3) + self._ipv4indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
elif ipv == 6:
if self._ipv6dbcount == 0:
raise ValueError('Please use IPv6 BIN file for IPv6 Address.')
a, b = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip))
ipno = (a << 64) | b
off = 12
baseaddr = self._ipv6dbaddr
high = self._ipv6dbcount
if self._ipv6indexbaseaddr > 0:
indexpos = ((ipno >> 112) << 3) + self._ipv6indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
while low <= high:
mid = int((low + high) / 2)
ipfrom = self._readip(baseaddr + (mid) * (self._dbcolumn * 4 + off), ipv)
ipto = self._readip(baseaddr + (mid + 1) * (self._dbcolumn * 4 + off), ipv)
if ipfrom <= ipno < ipto:
return self._read_record(mid, ipv)
else:
if ipno < ipfrom:
high = mid - 1
else:
low = mid + 1
|
chrislim2888/IP2Location-Python | IP2Location.py | IP2Location.get_latitude | python | def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude | Get latitude | train | https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L149-L152 | [
"def get_all(self, addr):\n ''' Get the whole record with all fields read from the file\n\n Arguments:\n\n addr: IPv4 or IPv6 address as a string\n\n Returns IP2LocationRecord or None if address not found in file\n '''\n return self._get_record(addr)\n"
] | class IP2Location(object):
''' IP2Location database '''
def __init__(self, filename=None):
''' Creates a database object and opens a file if filename is given
'''
if filename:
self.open(filename)
def __enter__(self):
if not hasattr(self, '_f') or self._f.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open(self, filename):
''' Opens a database file '''
# Ensure old file is closed before opening a new one
self.close()
self._f = open(filename, 'rb')
self._dbtype = struct.unpack('B', self._f.read(1))[0]
self._dbcolumn = struct.unpack('B', self._f.read(1))[0]
self._dbyear = struct.unpack('B', self._f.read(1))[0]
self._dbmonth = struct.unpack('B', self._f.read(1))[0]
self._dbday = struct.unpack('B', self._f.read(1))[0]
self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
def close(self):
if hasattr(self, '_f'):
# If there is file close it.
self._f.close()
del self._f
def get_country_short(self, ip):
''' Get country_short '''
rec = self.get_all(ip)
return rec and rec.country_short
def get_country_long(self, ip):
''' Get country_long '''
rec = self.get_all(ip)
return rec and rec.country_long
def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region
def get_city(self, ip):
''' Get city '''
rec = self.get_all(ip)
return rec and rec.city
def get_isp(self, ip):
''' Get isp '''
rec = self.get_all(ip)
return rec and rec.isp
def get_longitude(self, ip):
''' Get longitude '''
rec = self.get_all(ip)
return rec and rec.longitude
def get_domain(self, ip):
''' Get domain '''
rec = self.get_all(ip)
return rec and rec.domain
def get_zipcode(self, ip):
''' Get zipcode '''
rec = self.get_all(ip)
return rec and rec.zipcode
def get_timezone(self, ip):
''' Get timezone '''
rec = self.get_all(ip)
return rec and rec.timezone
def get_netspeed(self, ip):
''' Get netspeed '''
rec = self.get_all(ip)
return rec and rec.netspeed
def get_idd_code(self, ip):
''' Get idd_code '''
rec = self.get_all(ip)
return rec and rec.idd_code
def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code
def get_weather_code(self, ip):
''' Get weather_code '''
rec = self.get_all(ip)
return rec and rec.weather_code
def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc
def get_mnc(self, ip):
''' Get mnc '''
rec = self.get_all(ip)
return rec and rec.mnc
def get_mobile_brand(self, ip):
''' Get mobile_brand '''
rec = self.get_all(ip)
return rec and rec.mobile_brand
def get_elevation(self, ip):
''' Get elevation '''
rec = self.get_all(ip)
return rec and rec.elevation
def get_usage_type(self, ip):
''' Get usage_type '''
rec = self.get_all(ip)
return rec and rec.usage_type
def get_all(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def find(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def _reads(self, offset):
self._f.seek(offset - 1)
n = struct.unpack('B', self._f.read(1))[0]
#return u(self._f.read(n))
return self._f.read(n).decode('iso-8859-1').encode('utf-8')
def _readi(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<I', self._f.read(4))[0]
def _readf(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<f', self._f.read(4))[0]
def _readip(self, offset, ipv):
if ipv == 4:
return self._readi(offset)
elif ipv == 6:
a, b, c, d = self._readi(offset), self._readi(offset + 4), self._readi(offset + 8), self._readi(offset + 12)
return (d << 96) | (c << 64) | (b << 32) | a
def _readips(self, offset, ipv):
if ipv == 4:
return socket.inet_ntoa(struct.pack('!L', self._readi(offset)))
elif ipv == 6:
return str(self._readip(offset, ipv))
def _read_record(self, mid, ipv):
rec = IP2LocationRecord()
if ipv == 4:
off = 0
baseaddr = self._ipv4dbaddr
elif ipv == 6:
off = 12
baseaddr = self._ipv6dbaddr
rec.ip = self._readips(baseaddr + (mid) * self._dbcolumn * 4, ipv)
def calc_off(what, mid):
return baseaddr + mid * (self._dbcolumn * 4 + off) + off + 4 * (what[self._dbtype]-1)
if _COUNTRY_POSITION[self._dbtype] != 0:
rec.country_short = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 1)
rec.country_long = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 4)
if _REGION_POSITION[self._dbtype] != 0:
rec.region = self._reads(self._readi(calc_off(_REGION_POSITION, mid)) + 1)
if _CITY_POSITION[self._dbtype] != 0:
rec.city = self._reads(self._readi(calc_off(_CITY_POSITION, mid)) + 1)
if _ISP_POSITION[self._dbtype] != 0:
rec.isp = self._reads(self._readi(calc_off(_ISP_POSITION, mid)) + 1)
if _LATITUDE_POSITION[self._dbtype] != 0:
rec.latitude = round(self._readf(calc_off(_LATITUDE_POSITION, mid)), 6)
if _LONGITUDE_POSITION[self._dbtype] != 0:
rec.longitude = round(self._readf(calc_off(_LONGITUDE_POSITION, mid)), 6)
if _DOMAIN_POSITION[self._dbtype] != 0:
rec.domain = self._reads(self._readi(calc_off(_DOMAIN_POSITION, mid)) + 1)
if _ZIPCODE_POSITION[self._dbtype] != 0:
rec.zipcode = self._reads(self._readi(calc_off(_ZIPCODE_POSITION, mid)) + 1)
if _TIMEZONE_POSITION[self._dbtype] != 0:
rec.timezone = self._reads(self._readi(calc_off(_TIMEZONE_POSITION, mid)) + 1)
if _NETSPEED_POSITION[self._dbtype] != 0:
rec.netspeed = self._reads(self._readi(calc_off(_NETSPEED_POSITION, mid)) + 1)
if _IDDCODE_POSITION[self._dbtype] != 0:
rec.idd_code = self._reads(self._readi(calc_off(_IDDCODE_POSITION, mid)) + 1)
if _AREACODE_POSITION[self._dbtype] != 0:
rec.area_code = self._reads(self._readi(calc_off(_AREACODE_POSITION, mid)) + 1)
if _WEATHERSTATIONCODE_POSITION[self._dbtype] != 0:
rec.weather_code = self._reads(self._readi(calc_off(_WEATHERSTATIONCODE_POSITION, mid)) + 1)
if _WEATHERSTATIONNAME_POSITION[self._dbtype] != 0:
rec.weather_name = self._reads(self._readi(calc_off(_WEATHERSTATIONNAME_POSITION, mid)) + 1)
if _MCC_POSITION[self._dbtype] != 0:
rec.mcc = self._reads(self._readi(calc_off(_MCC_POSITION, mid)) + 1)
if _MNC_POSITION[self._dbtype] != 0:
rec.mnc = self._reads(self._readi(calc_off(_MNC_POSITION, mid)) + 1)
if _MOBILEBRAND_POSITION[self._dbtype] != 0:
rec.mobile_brand = self._reads(self._readi(calc_off(_MOBILEBRAND_POSITION, mid)) + 1)
if _ELEVATION_POSITION[self._dbtype] != 0:
rec.elevation = self._reads(self._readi(calc_off(_ELEVATION_POSITION, mid)) + 1)
if _USAGETYPE_POSITION[self._dbtype] != 0:
rec.usage_type = self._reads(self._readi(calc_off(_USAGETYPE_POSITION, mid)) + 1)
return rec
def __iter__(self):
low, high = 0, self._ipv4dbcount
while low <= high:
yield self._read_record(low, 4)
low += 1
low, high = 0, self._ipv6dbcount
while low <= high:
yield self._read_record(low, 6)
low += 1
def _parse_addr(self, addr):
''' Parses address and returns IP version. Raises exception on invalid argument '''
ipv = 0
try:
socket.inet_pton(socket.AF_INET6, addr)
# Convert ::FFFF:x.y.z.y to IPv4
if addr.lower().startswith('::ffff:'):
try:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
except:
ipv = 6
else:
ipv = 6
except:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
return ipv
def _get_record(self, ip):
low = 0
ipv = self._parse_addr(ip)
if ipv == 4:
ipno = struct.unpack('!L', socket.inet_pton(socket.AF_INET, ip))[0]
off = 0
baseaddr = self._ipv4dbaddr
high = self._ipv4dbcount
if self._ipv4indexbaseaddr > 0:
indexpos = ((ipno >> 16) << 3) + self._ipv4indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
elif ipv == 6:
if self._ipv6dbcount == 0:
raise ValueError('Please use IPv6 BIN file for IPv6 Address.')
a, b = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip))
ipno = (a << 64) | b
off = 12
baseaddr = self._ipv6dbaddr
high = self._ipv6dbcount
if self._ipv6indexbaseaddr > 0:
indexpos = ((ipno >> 112) << 3) + self._ipv6indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
while low <= high:
mid = int((low + high) / 2)
ipfrom = self._readip(baseaddr + (mid) * (self._dbcolumn * 4 + off), ipv)
ipto = self._readip(baseaddr + (mid + 1) * (self._dbcolumn * 4 + off), ipv)
if ipfrom <= ipno < ipto:
return self._read_record(mid, ipv)
else:
if ipno < ipfrom:
high = mid - 1
else:
low = mid + 1
|
chrislim2888/IP2Location-Python | IP2Location.py | IP2Location.get_longitude | python | def get_longitude(self, ip):
''' Get longitude '''
rec = self.get_all(ip)
return rec and rec.longitude | Get longitude | train | https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L153-L156 | [
"def get_all(self, addr):\n ''' Get the whole record with all fields read from the file\n\n Arguments:\n\n addr: IPv4 or IPv6 address as a string\n\n Returns IP2LocationRecord or None if address not found in file\n '''\n return self._get_record(addr)\n"
] | class IP2Location(object):
''' IP2Location database '''
def __init__(self, filename=None):
''' Creates a database object and opens a file if filename is given
'''
if filename:
self.open(filename)
def __enter__(self):
if not hasattr(self, '_f') or self._f.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open(self, filename):
''' Opens a database file '''
# Ensure old file is closed before opening a new one
self.close()
self._f = open(filename, 'rb')
self._dbtype = struct.unpack('B', self._f.read(1))[0]
self._dbcolumn = struct.unpack('B', self._f.read(1))[0]
self._dbyear = struct.unpack('B', self._f.read(1))[0]
self._dbmonth = struct.unpack('B', self._f.read(1))[0]
self._dbday = struct.unpack('B', self._f.read(1))[0]
self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
def close(self):
if hasattr(self, '_f'):
# If there is file close it.
self._f.close()
del self._f
def get_country_short(self, ip):
''' Get country_short '''
rec = self.get_all(ip)
return rec and rec.country_short
def get_country_long(self, ip):
''' Get country_long '''
rec = self.get_all(ip)
return rec and rec.country_long
def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region
def get_city(self, ip):
''' Get city '''
rec = self.get_all(ip)
return rec and rec.city
def get_isp(self, ip):
''' Get isp '''
rec = self.get_all(ip)
return rec and rec.isp
def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude
def get_domain(self, ip):
''' Get domain '''
rec = self.get_all(ip)
return rec and rec.domain
def get_zipcode(self, ip):
''' Get zipcode '''
rec = self.get_all(ip)
return rec and rec.zipcode
def get_timezone(self, ip):
''' Get timezone '''
rec = self.get_all(ip)
return rec and rec.timezone
def get_netspeed(self, ip):
''' Get netspeed '''
rec = self.get_all(ip)
return rec and rec.netspeed
def get_idd_code(self, ip):
''' Get idd_code '''
rec = self.get_all(ip)
return rec and rec.idd_code
def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code
def get_weather_code(self, ip):
''' Get weather_code '''
rec = self.get_all(ip)
return rec and rec.weather_code
def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc
def get_mnc(self, ip):
''' Get mnc '''
rec = self.get_all(ip)
return rec and rec.mnc
def get_mobile_brand(self, ip):
''' Get mobile_brand '''
rec = self.get_all(ip)
return rec and rec.mobile_brand
def get_elevation(self, ip):
''' Get elevation '''
rec = self.get_all(ip)
return rec and rec.elevation
def get_usage_type(self, ip):
''' Get usage_type '''
rec = self.get_all(ip)
return rec and rec.usage_type
def get_all(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def find(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def _reads(self, offset):
self._f.seek(offset - 1)
n = struct.unpack('B', self._f.read(1))[0]
#return u(self._f.read(n))
return self._f.read(n).decode('iso-8859-1').encode('utf-8')
def _readi(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<I', self._f.read(4))[0]
def _readf(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<f', self._f.read(4))[0]
def _readip(self, offset, ipv):
if ipv == 4:
return self._readi(offset)
elif ipv == 6:
a, b, c, d = self._readi(offset), self._readi(offset + 4), self._readi(offset + 8), self._readi(offset + 12)
return (d << 96) | (c << 64) | (b << 32) | a
def _readips(self, offset, ipv):
if ipv == 4:
return socket.inet_ntoa(struct.pack('!L', self._readi(offset)))
elif ipv == 6:
return str(self._readip(offset, ipv))
def _read_record(self, mid, ipv):
rec = IP2LocationRecord()
if ipv == 4:
off = 0
baseaddr = self._ipv4dbaddr
elif ipv == 6:
off = 12
baseaddr = self._ipv6dbaddr
rec.ip = self._readips(baseaddr + (mid) * self._dbcolumn * 4, ipv)
def calc_off(what, mid):
return baseaddr + mid * (self._dbcolumn * 4 + off) + off + 4 * (what[self._dbtype]-1)
if _COUNTRY_POSITION[self._dbtype] != 0:
rec.country_short = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 1)
rec.country_long = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 4)
if _REGION_POSITION[self._dbtype] != 0:
rec.region = self._reads(self._readi(calc_off(_REGION_POSITION, mid)) + 1)
if _CITY_POSITION[self._dbtype] != 0:
rec.city = self._reads(self._readi(calc_off(_CITY_POSITION, mid)) + 1)
if _ISP_POSITION[self._dbtype] != 0:
rec.isp = self._reads(self._readi(calc_off(_ISP_POSITION, mid)) + 1)
if _LATITUDE_POSITION[self._dbtype] != 0:
rec.latitude = round(self._readf(calc_off(_LATITUDE_POSITION, mid)), 6)
if _LONGITUDE_POSITION[self._dbtype] != 0:
rec.longitude = round(self._readf(calc_off(_LONGITUDE_POSITION, mid)), 6)
if _DOMAIN_POSITION[self._dbtype] != 0:
rec.domain = self._reads(self._readi(calc_off(_DOMAIN_POSITION, mid)) + 1)
if _ZIPCODE_POSITION[self._dbtype] != 0:
rec.zipcode = self._reads(self._readi(calc_off(_ZIPCODE_POSITION, mid)) + 1)
if _TIMEZONE_POSITION[self._dbtype] != 0:
rec.timezone = self._reads(self._readi(calc_off(_TIMEZONE_POSITION, mid)) + 1)
if _NETSPEED_POSITION[self._dbtype] != 0:
rec.netspeed = self._reads(self._readi(calc_off(_NETSPEED_POSITION, mid)) + 1)
if _IDDCODE_POSITION[self._dbtype] != 0:
rec.idd_code = self._reads(self._readi(calc_off(_IDDCODE_POSITION, mid)) + 1)
if _AREACODE_POSITION[self._dbtype] != 0:
rec.area_code = self._reads(self._readi(calc_off(_AREACODE_POSITION, mid)) + 1)
if _WEATHERSTATIONCODE_POSITION[self._dbtype] != 0:
rec.weather_code = self._reads(self._readi(calc_off(_WEATHERSTATIONCODE_POSITION, mid)) + 1)
if _WEATHERSTATIONNAME_POSITION[self._dbtype] != 0:
rec.weather_name = self._reads(self._readi(calc_off(_WEATHERSTATIONNAME_POSITION, mid)) + 1)
if _MCC_POSITION[self._dbtype] != 0:
rec.mcc = self._reads(self._readi(calc_off(_MCC_POSITION, mid)) + 1)
if _MNC_POSITION[self._dbtype] != 0:
rec.mnc = self._reads(self._readi(calc_off(_MNC_POSITION, mid)) + 1)
if _MOBILEBRAND_POSITION[self._dbtype] != 0:
rec.mobile_brand = self._reads(self._readi(calc_off(_MOBILEBRAND_POSITION, mid)) + 1)
if _ELEVATION_POSITION[self._dbtype] != 0:
rec.elevation = self._reads(self._readi(calc_off(_ELEVATION_POSITION, mid)) + 1)
if _USAGETYPE_POSITION[self._dbtype] != 0:
rec.usage_type = self._reads(self._readi(calc_off(_USAGETYPE_POSITION, mid)) + 1)
return rec
def __iter__(self):
low, high = 0, self._ipv4dbcount
while low <= high:
yield self._read_record(low, 4)
low += 1
low, high = 0, self._ipv6dbcount
while low <= high:
yield self._read_record(low, 6)
low += 1
def _parse_addr(self, addr):
''' Parses address and returns IP version. Raises exception on invalid argument '''
ipv = 0
try:
socket.inet_pton(socket.AF_INET6, addr)
# Convert ::FFFF:x.y.z.y to IPv4
if addr.lower().startswith('::ffff:'):
try:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
except:
ipv = 6
else:
ipv = 6
except:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
return ipv
def _get_record(self, ip):
low = 0
ipv = self._parse_addr(ip)
if ipv == 4:
ipno = struct.unpack('!L', socket.inet_pton(socket.AF_INET, ip))[0]
off = 0
baseaddr = self._ipv4dbaddr
high = self._ipv4dbcount
if self._ipv4indexbaseaddr > 0:
indexpos = ((ipno >> 16) << 3) + self._ipv4indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
elif ipv == 6:
if self._ipv6dbcount == 0:
raise ValueError('Please use IPv6 BIN file for IPv6 Address.')
a, b = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip))
ipno = (a << 64) | b
off = 12
baseaddr = self._ipv6dbaddr
high = self._ipv6dbcount
if self._ipv6indexbaseaddr > 0:
indexpos = ((ipno >> 112) << 3) + self._ipv6indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
while low <= high:
mid = int((low + high) / 2)
ipfrom = self._readip(baseaddr + (mid) * (self._dbcolumn * 4 + off), ipv)
ipto = self._readip(baseaddr + (mid + 1) * (self._dbcolumn * 4 + off), ipv)
if ipfrom <= ipno < ipto:
return self._read_record(mid, ipv)
else:
if ipno < ipfrom:
high = mid - 1
else:
low = mid + 1
|
chrislim2888/IP2Location-Python | IP2Location.py | IP2Location.get_domain | python | def get_domain(self, ip):
''' Get domain '''
rec = self.get_all(ip)
return rec and rec.domain | Get domain | train | https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L157-L160 | [
"def get_all(self, addr):\n ''' Get the whole record with all fields read from the file\n\n Arguments:\n\n addr: IPv4 or IPv6 address as a string\n\n Returns IP2LocationRecord or None if address not found in file\n '''\n return self._get_record(addr)\n"
] | class IP2Location(object):
''' IP2Location database '''
def __init__(self, filename=None):
''' Creates a database object and opens a file if filename is given
'''
if filename:
self.open(filename)
def __enter__(self):
if not hasattr(self, '_f') or self._f.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open(self, filename):
''' Opens a database file '''
# Ensure old file is closed before opening a new one
self.close()
self._f = open(filename, 'rb')
self._dbtype = struct.unpack('B', self._f.read(1))[0]
self._dbcolumn = struct.unpack('B', self._f.read(1))[0]
self._dbyear = struct.unpack('B', self._f.read(1))[0]
self._dbmonth = struct.unpack('B', self._f.read(1))[0]
self._dbday = struct.unpack('B', self._f.read(1))[0]
self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
def close(self):
if hasattr(self, '_f'):
# If there is file close it.
self._f.close()
del self._f
def get_country_short(self, ip):
''' Get country_short '''
rec = self.get_all(ip)
return rec and rec.country_short
def get_country_long(self, ip):
''' Get country_long '''
rec = self.get_all(ip)
return rec and rec.country_long
def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region
def get_city(self, ip):
''' Get city '''
rec = self.get_all(ip)
return rec and rec.city
def get_isp(self, ip):
''' Get isp '''
rec = self.get_all(ip)
return rec and rec.isp
def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude
def get_longitude(self, ip):
''' Get longitude '''
rec = self.get_all(ip)
return rec and rec.longitude
def get_zipcode(self, ip):
''' Get zipcode '''
rec = self.get_all(ip)
return rec and rec.zipcode
def get_timezone(self, ip):
''' Get timezone '''
rec = self.get_all(ip)
return rec and rec.timezone
def get_netspeed(self, ip):
''' Get netspeed '''
rec = self.get_all(ip)
return rec and rec.netspeed
def get_idd_code(self, ip):
''' Get idd_code '''
rec = self.get_all(ip)
return rec and rec.idd_code
def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code
def get_weather_code(self, ip):
''' Get weather_code '''
rec = self.get_all(ip)
return rec and rec.weather_code
def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc
def get_mnc(self, ip):
''' Get mnc '''
rec = self.get_all(ip)
return rec and rec.mnc
def get_mobile_brand(self, ip):
''' Get mobile_brand '''
rec = self.get_all(ip)
return rec and rec.mobile_brand
def get_elevation(self, ip):
''' Get elevation '''
rec = self.get_all(ip)
return rec and rec.elevation
def get_usage_type(self, ip):
''' Get usage_type '''
rec = self.get_all(ip)
return rec and rec.usage_type
def get_all(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def find(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def _reads(self, offset):
self._f.seek(offset - 1)
n = struct.unpack('B', self._f.read(1))[0]
#return u(self._f.read(n))
return self._f.read(n).decode('iso-8859-1').encode('utf-8')
def _readi(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<I', self._f.read(4))[0]
def _readf(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<f', self._f.read(4))[0]
def _readip(self, offset, ipv):
if ipv == 4:
return self._readi(offset)
elif ipv == 6:
a, b, c, d = self._readi(offset), self._readi(offset + 4), self._readi(offset + 8), self._readi(offset + 12)
return (d << 96) | (c << 64) | (b << 32) | a
def _readips(self, offset, ipv):
if ipv == 4:
return socket.inet_ntoa(struct.pack('!L', self._readi(offset)))
elif ipv == 6:
return str(self._readip(offset, ipv))
def _read_record(self, mid, ipv):
rec = IP2LocationRecord()
if ipv == 4:
off = 0
baseaddr = self._ipv4dbaddr
elif ipv == 6:
off = 12
baseaddr = self._ipv6dbaddr
rec.ip = self._readips(baseaddr + (mid) * self._dbcolumn * 4, ipv)
def calc_off(what, mid):
return baseaddr + mid * (self._dbcolumn * 4 + off) + off + 4 * (what[self._dbtype]-1)
if _COUNTRY_POSITION[self._dbtype] != 0:
rec.country_short = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 1)
rec.country_long = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 4)
if _REGION_POSITION[self._dbtype] != 0:
rec.region = self._reads(self._readi(calc_off(_REGION_POSITION, mid)) + 1)
if _CITY_POSITION[self._dbtype] != 0:
rec.city = self._reads(self._readi(calc_off(_CITY_POSITION, mid)) + 1)
if _ISP_POSITION[self._dbtype] != 0:
rec.isp = self._reads(self._readi(calc_off(_ISP_POSITION, mid)) + 1)
if _LATITUDE_POSITION[self._dbtype] != 0:
rec.latitude = round(self._readf(calc_off(_LATITUDE_POSITION, mid)), 6)
if _LONGITUDE_POSITION[self._dbtype] != 0:
rec.longitude = round(self._readf(calc_off(_LONGITUDE_POSITION, mid)), 6)
if _DOMAIN_POSITION[self._dbtype] != 0:
rec.domain = self._reads(self._readi(calc_off(_DOMAIN_POSITION, mid)) + 1)
if _ZIPCODE_POSITION[self._dbtype] != 0:
rec.zipcode = self._reads(self._readi(calc_off(_ZIPCODE_POSITION, mid)) + 1)
if _TIMEZONE_POSITION[self._dbtype] != 0:
rec.timezone = self._reads(self._readi(calc_off(_TIMEZONE_POSITION, mid)) + 1)
if _NETSPEED_POSITION[self._dbtype] != 0:
rec.netspeed = self._reads(self._readi(calc_off(_NETSPEED_POSITION, mid)) + 1)
if _IDDCODE_POSITION[self._dbtype] != 0:
rec.idd_code = self._reads(self._readi(calc_off(_IDDCODE_POSITION, mid)) + 1)
if _AREACODE_POSITION[self._dbtype] != 0:
rec.area_code = self._reads(self._readi(calc_off(_AREACODE_POSITION, mid)) + 1)
if _WEATHERSTATIONCODE_POSITION[self._dbtype] != 0:
rec.weather_code = self._reads(self._readi(calc_off(_WEATHERSTATIONCODE_POSITION, mid)) + 1)
if _WEATHERSTATIONNAME_POSITION[self._dbtype] != 0:
rec.weather_name = self._reads(self._readi(calc_off(_WEATHERSTATIONNAME_POSITION, mid)) + 1)
if _MCC_POSITION[self._dbtype] != 0:
rec.mcc = self._reads(self._readi(calc_off(_MCC_POSITION, mid)) + 1)
if _MNC_POSITION[self._dbtype] != 0:
rec.mnc = self._reads(self._readi(calc_off(_MNC_POSITION, mid)) + 1)
if _MOBILEBRAND_POSITION[self._dbtype] != 0:
rec.mobile_brand = self._reads(self._readi(calc_off(_MOBILEBRAND_POSITION, mid)) + 1)
if _ELEVATION_POSITION[self._dbtype] != 0:
rec.elevation = self._reads(self._readi(calc_off(_ELEVATION_POSITION, mid)) + 1)
if _USAGETYPE_POSITION[self._dbtype] != 0:
rec.usage_type = self._reads(self._readi(calc_off(_USAGETYPE_POSITION, mid)) + 1)
return rec
def __iter__(self):
low, high = 0, self._ipv4dbcount
while low <= high:
yield self._read_record(low, 4)
low += 1
low, high = 0, self._ipv6dbcount
while low <= high:
yield self._read_record(low, 6)
low += 1
def _parse_addr(self, addr):
''' Parses address and returns IP version. Raises exception on invalid argument '''
ipv = 0
try:
socket.inet_pton(socket.AF_INET6, addr)
# Convert ::FFFF:x.y.z.y to IPv4
if addr.lower().startswith('::ffff:'):
try:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
except:
ipv = 6
else:
ipv = 6
except:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
return ipv
def _get_record(self, ip):
low = 0
ipv = self._parse_addr(ip)
if ipv == 4:
ipno = struct.unpack('!L', socket.inet_pton(socket.AF_INET, ip))[0]
off = 0
baseaddr = self._ipv4dbaddr
high = self._ipv4dbcount
if self._ipv4indexbaseaddr > 0:
indexpos = ((ipno >> 16) << 3) + self._ipv4indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
elif ipv == 6:
if self._ipv6dbcount == 0:
raise ValueError('Please use IPv6 BIN file for IPv6 Address.')
a, b = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip))
ipno = (a << 64) | b
off = 12
baseaddr = self._ipv6dbaddr
high = self._ipv6dbcount
if self._ipv6indexbaseaddr > 0:
indexpos = ((ipno >> 112) << 3) + self._ipv6indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
while low <= high:
mid = int((low + high) / 2)
ipfrom = self._readip(baseaddr + (mid) * (self._dbcolumn * 4 + off), ipv)
ipto = self._readip(baseaddr + (mid + 1) * (self._dbcolumn * 4 + off), ipv)
if ipfrom <= ipno < ipto:
return self._read_record(mid, ipv)
else:
if ipno < ipfrom:
high = mid - 1
else:
low = mid + 1
|
chrislim2888/IP2Location-Python | IP2Location.py | IP2Location.get_zipcode | python | def get_zipcode(self, ip):
''' Get zipcode '''
rec = self.get_all(ip)
return rec and rec.zipcode | Get zipcode | train | https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L161-L164 | [
"def get_all(self, addr):\n ''' Get the whole record with all fields read from the file\n\n Arguments:\n\n addr: IPv4 or IPv6 address as a string\n\n Returns IP2LocationRecord or None if address not found in file\n '''\n return self._get_record(addr)\n"
] | class IP2Location(object):
''' IP2Location database '''
def __init__(self, filename=None):
''' Creates a database object and opens a file if filename is given
'''
if filename:
self.open(filename)
def __enter__(self):
if not hasattr(self, '_f') or self._f.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open(self, filename):
''' Opens a database file '''
# Ensure old file is closed before opening a new one
self.close()
self._f = open(filename, 'rb')
self._dbtype = struct.unpack('B', self._f.read(1))[0]
self._dbcolumn = struct.unpack('B', self._f.read(1))[0]
self._dbyear = struct.unpack('B', self._f.read(1))[0]
self._dbmonth = struct.unpack('B', self._f.read(1))[0]
self._dbday = struct.unpack('B', self._f.read(1))[0]
self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
def close(self):
if hasattr(self, '_f'):
# If there is file close it.
self._f.close()
del self._f
def get_country_short(self, ip):
''' Get country_short '''
rec = self.get_all(ip)
return rec and rec.country_short
def get_country_long(self, ip):
''' Get country_long '''
rec = self.get_all(ip)
return rec and rec.country_long
def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region
def get_city(self, ip):
''' Get city '''
rec = self.get_all(ip)
return rec and rec.city
def get_isp(self, ip):
''' Get isp '''
rec = self.get_all(ip)
return rec and rec.isp
def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude
def get_longitude(self, ip):
''' Get longitude '''
rec = self.get_all(ip)
return rec and rec.longitude
def get_domain(self, ip):
''' Get domain '''
rec = self.get_all(ip)
return rec and rec.domain
def get_timezone(self, ip):
''' Get timezone '''
rec = self.get_all(ip)
return rec and rec.timezone
def get_netspeed(self, ip):
''' Get netspeed '''
rec = self.get_all(ip)
return rec and rec.netspeed
def get_idd_code(self, ip):
''' Get idd_code '''
rec = self.get_all(ip)
return rec and rec.idd_code
def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code
def get_weather_code(self, ip):
''' Get weather_code '''
rec = self.get_all(ip)
return rec and rec.weather_code
def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc
def get_mnc(self, ip):
''' Get mnc '''
rec = self.get_all(ip)
return rec and rec.mnc
def get_mobile_brand(self, ip):
''' Get mobile_brand '''
rec = self.get_all(ip)
return rec and rec.mobile_brand
def get_elevation(self, ip):
''' Get elevation '''
rec = self.get_all(ip)
return rec and rec.elevation
def get_usage_type(self, ip):
''' Get usage_type '''
rec = self.get_all(ip)
return rec and rec.usage_type
def get_all(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def find(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def _reads(self, offset):
self._f.seek(offset - 1)
n = struct.unpack('B', self._f.read(1))[0]
#return u(self._f.read(n))
return self._f.read(n).decode('iso-8859-1').encode('utf-8')
def _readi(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<I', self._f.read(4))[0]
def _readf(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<f', self._f.read(4))[0]
def _readip(self, offset, ipv):
if ipv == 4:
return self._readi(offset)
elif ipv == 6:
a, b, c, d = self._readi(offset), self._readi(offset + 4), self._readi(offset + 8), self._readi(offset + 12)
return (d << 96) | (c << 64) | (b << 32) | a
def _readips(self, offset, ipv):
if ipv == 4:
return socket.inet_ntoa(struct.pack('!L', self._readi(offset)))
elif ipv == 6:
return str(self._readip(offset, ipv))
def _read_record(self, mid, ipv):
rec = IP2LocationRecord()
if ipv == 4:
off = 0
baseaddr = self._ipv4dbaddr
elif ipv == 6:
off = 12
baseaddr = self._ipv6dbaddr
rec.ip = self._readips(baseaddr + (mid) * self._dbcolumn * 4, ipv)
def calc_off(what, mid):
return baseaddr + mid * (self._dbcolumn * 4 + off) + off + 4 * (what[self._dbtype]-1)
if _COUNTRY_POSITION[self._dbtype] != 0:
rec.country_short = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 1)
rec.country_long = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 4)
if _REGION_POSITION[self._dbtype] != 0:
rec.region = self._reads(self._readi(calc_off(_REGION_POSITION, mid)) + 1)
if _CITY_POSITION[self._dbtype] != 0:
rec.city = self._reads(self._readi(calc_off(_CITY_POSITION, mid)) + 1)
if _ISP_POSITION[self._dbtype] != 0:
rec.isp = self._reads(self._readi(calc_off(_ISP_POSITION, mid)) + 1)
if _LATITUDE_POSITION[self._dbtype] != 0:
rec.latitude = round(self._readf(calc_off(_LATITUDE_POSITION, mid)), 6)
if _LONGITUDE_POSITION[self._dbtype] != 0:
rec.longitude = round(self._readf(calc_off(_LONGITUDE_POSITION, mid)), 6)
if _DOMAIN_POSITION[self._dbtype] != 0:
rec.domain = self._reads(self._readi(calc_off(_DOMAIN_POSITION, mid)) + 1)
if _ZIPCODE_POSITION[self._dbtype] != 0:
rec.zipcode = self._reads(self._readi(calc_off(_ZIPCODE_POSITION, mid)) + 1)
if _TIMEZONE_POSITION[self._dbtype] != 0:
rec.timezone = self._reads(self._readi(calc_off(_TIMEZONE_POSITION, mid)) + 1)
if _NETSPEED_POSITION[self._dbtype] != 0:
rec.netspeed = self._reads(self._readi(calc_off(_NETSPEED_POSITION, mid)) + 1)
if _IDDCODE_POSITION[self._dbtype] != 0:
rec.idd_code = self._reads(self._readi(calc_off(_IDDCODE_POSITION, mid)) + 1)
if _AREACODE_POSITION[self._dbtype] != 0:
rec.area_code = self._reads(self._readi(calc_off(_AREACODE_POSITION, mid)) + 1)
if _WEATHERSTATIONCODE_POSITION[self._dbtype] != 0:
rec.weather_code = self._reads(self._readi(calc_off(_WEATHERSTATIONCODE_POSITION, mid)) + 1)
if _WEATHERSTATIONNAME_POSITION[self._dbtype] != 0:
rec.weather_name = self._reads(self._readi(calc_off(_WEATHERSTATIONNAME_POSITION, mid)) + 1)
if _MCC_POSITION[self._dbtype] != 0:
rec.mcc = self._reads(self._readi(calc_off(_MCC_POSITION, mid)) + 1)
if _MNC_POSITION[self._dbtype] != 0:
rec.mnc = self._reads(self._readi(calc_off(_MNC_POSITION, mid)) + 1)
if _MOBILEBRAND_POSITION[self._dbtype] != 0:
rec.mobile_brand = self._reads(self._readi(calc_off(_MOBILEBRAND_POSITION, mid)) + 1)
if _ELEVATION_POSITION[self._dbtype] != 0:
rec.elevation = self._reads(self._readi(calc_off(_ELEVATION_POSITION, mid)) + 1)
if _USAGETYPE_POSITION[self._dbtype] != 0:
rec.usage_type = self._reads(self._readi(calc_off(_USAGETYPE_POSITION, mid)) + 1)
return rec
def __iter__(self):
low, high = 0, self._ipv4dbcount
while low <= high:
yield self._read_record(low, 4)
low += 1
low, high = 0, self._ipv6dbcount
while low <= high:
yield self._read_record(low, 6)
low += 1
def _parse_addr(self, addr):
''' Parses address and returns IP version. Raises exception on invalid argument '''
ipv = 0
try:
socket.inet_pton(socket.AF_INET6, addr)
# Convert ::FFFF:x.y.z.y to IPv4
if addr.lower().startswith('::ffff:'):
try:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
except:
ipv = 6
else:
ipv = 6
except:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
return ipv
def _get_record(self, ip):
low = 0
ipv = self._parse_addr(ip)
if ipv == 4:
ipno = struct.unpack('!L', socket.inet_pton(socket.AF_INET, ip))[0]
off = 0
baseaddr = self._ipv4dbaddr
high = self._ipv4dbcount
if self._ipv4indexbaseaddr > 0:
indexpos = ((ipno >> 16) << 3) + self._ipv4indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
elif ipv == 6:
if self._ipv6dbcount == 0:
raise ValueError('Please use IPv6 BIN file for IPv6 Address.')
a, b = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip))
ipno = (a << 64) | b
off = 12
baseaddr = self._ipv6dbaddr
high = self._ipv6dbcount
if self._ipv6indexbaseaddr > 0:
indexpos = ((ipno >> 112) << 3) + self._ipv6indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
while low <= high:
mid = int((low + high) / 2)
ipfrom = self._readip(baseaddr + (mid) * (self._dbcolumn * 4 + off), ipv)
ipto = self._readip(baseaddr + (mid + 1) * (self._dbcolumn * 4 + off), ipv)
if ipfrom <= ipno < ipto:
return self._read_record(mid, ipv)
else:
if ipno < ipfrom:
high = mid - 1
else:
low = mid + 1
|
chrislim2888/IP2Location-Python | IP2Location.py | IP2Location.get_timezone | python | def get_timezone(self, ip):
''' Get timezone '''
rec = self.get_all(ip)
return rec and rec.timezone | Get timezone | train | https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L165-L168 | [
"def get_all(self, addr):\n ''' Get the whole record with all fields read from the file\n\n Arguments:\n\n addr: IPv4 or IPv6 address as a string\n\n Returns IP2LocationRecord or None if address not found in file\n '''\n return self._get_record(addr)\n"
] | class IP2Location(object):
''' IP2Location database '''
def __init__(self, filename=None):
''' Creates a database object and opens a file if filename is given
'''
if filename:
self.open(filename)
def __enter__(self):
if not hasattr(self, '_f') or self._f.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open(self, filename):
''' Opens a database file '''
# Ensure old file is closed before opening a new one
self.close()
self._f = open(filename, 'rb')
self._dbtype = struct.unpack('B', self._f.read(1))[0]
self._dbcolumn = struct.unpack('B', self._f.read(1))[0]
self._dbyear = struct.unpack('B', self._f.read(1))[0]
self._dbmonth = struct.unpack('B', self._f.read(1))[0]
self._dbday = struct.unpack('B', self._f.read(1))[0]
self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
def close(self):
if hasattr(self, '_f'):
# If there is file close it.
self._f.close()
del self._f
def get_country_short(self, ip):
''' Get country_short '''
rec = self.get_all(ip)
return rec and rec.country_short
def get_country_long(self, ip):
''' Get country_long '''
rec = self.get_all(ip)
return rec and rec.country_long
def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region
def get_city(self, ip):
''' Get city '''
rec = self.get_all(ip)
return rec and rec.city
def get_isp(self, ip):
''' Get isp '''
rec = self.get_all(ip)
return rec and rec.isp
def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude
def get_longitude(self, ip):
''' Get longitude '''
rec = self.get_all(ip)
return rec and rec.longitude
def get_domain(self, ip):
''' Get domain '''
rec = self.get_all(ip)
return rec and rec.domain
def get_zipcode(self, ip):
''' Get zipcode '''
rec = self.get_all(ip)
return rec and rec.zipcode
def get_netspeed(self, ip):
''' Get netspeed '''
rec = self.get_all(ip)
return rec and rec.netspeed
def get_idd_code(self, ip):
''' Get idd_code '''
rec = self.get_all(ip)
return rec and rec.idd_code
def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code
def get_weather_code(self, ip):
''' Get weather_code '''
rec = self.get_all(ip)
return rec and rec.weather_code
def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc
def get_mnc(self, ip):
''' Get mnc '''
rec = self.get_all(ip)
return rec and rec.mnc
def get_mobile_brand(self, ip):
''' Get mobile_brand '''
rec = self.get_all(ip)
return rec and rec.mobile_brand
def get_elevation(self, ip):
''' Get elevation '''
rec = self.get_all(ip)
return rec and rec.elevation
def get_usage_type(self, ip):
''' Get usage_type '''
rec = self.get_all(ip)
return rec and rec.usage_type
def get_all(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def find(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def _reads(self, offset):
self._f.seek(offset - 1)
n = struct.unpack('B', self._f.read(1))[0]
#return u(self._f.read(n))
return self._f.read(n).decode('iso-8859-1').encode('utf-8')
def _readi(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<I', self._f.read(4))[0]
def _readf(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<f', self._f.read(4))[0]
def _readip(self, offset, ipv):
if ipv == 4:
return self._readi(offset)
elif ipv == 6:
a, b, c, d = self._readi(offset), self._readi(offset + 4), self._readi(offset + 8), self._readi(offset + 12)
return (d << 96) | (c << 64) | (b << 32) | a
def _readips(self, offset, ipv):
if ipv == 4:
return socket.inet_ntoa(struct.pack('!L', self._readi(offset)))
elif ipv == 6:
return str(self._readip(offset, ipv))
def _read_record(self, mid, ipv):
rec = IP2LocationRecord()
if ipv == 4:
off = 0
baseaddr = self._ipv4dbaddr
elif ipv == 6:
off = 12
baseaddr = self._ipv6dbaddr
rec.ip = self._readips(baseaddr + (mid) * self._dbcolumn * 4, ipv)
def calc_off(what, mid):
return baseaddr + mid * (self._dbcolumn * 4 + off) + off + 4 * (what[self._dbtype]-1)
if _COUNTRY_POSITION[self._dbtype] != 0:
rec.country_short = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 1)
rec.country_long = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 4)
if _REGION_POSITION[self._dbtype] != 0:
rec.region = self._reads(self._readi(calc_off(_REGION_POSITION, mid)) + 1)
if _CITY_POSITION[self._dbtype] != 0:
rec.city = self._reads(self._readi(calc_off(_CITY_POSITION, mid)) + 1)
if _ISP_POSITION[self._dbtype] != 0:
rec.isp = self._reads(self._readi(calc_off(_ISP_POSITION, mid)) + 1)
if _LATITUDE_POSITION[self._dbtype] != 0:
rec.latitude = round(self._readf(calc_off(_LATITUDE_POSITION, mid)), 6)
if _LONGITUDE_POSITION[self._dbtype] != 0:
rec.longitude = round(self._readf(calc_off(_LONGITUDE_POSITION, mid)), 6)
if _DOMAIN_POSITION[self._dbtype] != 0:
rec.domain = self._reads(self._readi(calc_off(_DOMAIN_POSITION, mid)) + 1)
if _ZIPCODE_POSITION[self._dbtype] != 0:
rec.zipcode = self._reads(self._readi(calc_off(_ZIPCODE_POSITION, mid)) + 1)
if _TIMEZONE_POSITION[self._dbtype] != 0:
rec.timezone = self._reads(self._readi(calc_off(_TIMEZONE_POSITION, mid)) + 1)
if _NETSPEED_POSITION[self._dbtype] != 0:
rec.netspeed = self._reads(self._readi(calc_off(_NETSPEED_POSITION, mid)) + 1)
if _IDDCODE_POSITION[self._dbtype] != 0:
rec.idd_code = self._reads(self._readi(calc_off(_IDDCODE_POSITION, mid)) + 1)
if _AREACODE_POSITION[self._dbtype] != 0:
rec.area_code = self._reads(self._readi(calc_off(_AREACODE_POSITION, mid)) + 1)
if _WEATHERSTATIONCODE_POSITION[self._dbtype] != 0:
rec.weather_code = self._reads(self._readi(calc_off(_WEATHERSTATIONCODE_POSITION, mid)) + 1)
if _WEATHERSTATIONNAME_POSITION[self._dbtype] != 0:
rec.weather_name = self._reads(self._readi(calc_off(_WEATHERSTATIONNAME_POSITION, mid)) + 1)
if _MCC_POSITION[self._dbtype] != 0:
rec.mcc = self._reads(self._readi(calc_off(_MCC_POSITION, mid)) + 1)
if _MNC_POSITION[self._dbtype] != 0:
rec.mnc = self._reads(self._readi(calc_off(_MNC_POSITION, mid)) + 1)
if _MOBILEBRAND_POSITION[self._dbtype] != 0:
rec.mobile_brand = self._reads(self._readi(calc_off(_MOBILEBRAND_POSITION, mid)) + 1)
if _ELEVATION_POSITION[self._dbtype] != 0:
rec.elevation = self._reads(self._readi(calc_off(_ELEVATION_POSITION, mid)) + 1)
if _USAGETYPE_POSITION[self._dbtype] != 0:
rec.usage_type = self._reads(self._readi(calc_off(_USAGETYPE_POSITION, mid)) + 1)
return rec
def __iter__(self):
low, high = 0, self._ipv4dbcount
while low <= high:
yield self._read_record(low, 4)
low += 1
low, high = 0, self._ipv6dbcount
while low <= high:
yield self._read_record(low, 6)
low += 1
def _parse_addr(self, addr):
''' Parses address and returns IP version. Raises exception on invalid argument '''
ipv = 0
try:
socket.inet_pton(socket.AF_INET6, addr)
# Convert ::FFFF:x.y.z.y to IPv4
if addr.lower().startswith('::ffff:'):
try:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
except:
ipv = 6
else:
ipv = 6
except:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
return ipv
def _get_record(self, ip):
low = 0
ipv = self._parse_addr(ip)
if ipv == 4:
ipno = struct.unpack('!L', socket.inet_pton(socket.AF_INET, ip))[0]
off = 0
baseaddr = self._ipv4dbaddr
high = self._ipv4dbcount
if self._ipv4indexbaseaddr > 0:
indexpos = ((ipno >> 16) << 3) + self._ipv4indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
elif ipv == 6:
if self._ipv6dbcount == 0:
raise ValueError('Please use IPv6 BIN file for IPv6 Address.')
a, b = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip))
ipno = (a << 64) | b
off = 12
baseaddr = self._ipv6dbaddr
high = self._ipv6dbcount
if self._ipv6indexbaseaddr > 0:
indexpos = ((ipno >> 112) << 3) + self._ipv6indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
while low <= high:
mid = int((low + high) / 2)
ipfrom = self._readip(baseaddr + (mid) * (self._dbcolumn * 4 + off), ipv)
ipto = self._readip(baseaddr + (mid + 1) * (self._dbcolumn * 4 + off), ipv)
if ipfrom <= ipno < ipto:
return self._read_record(mid, ipv)
else:
if ipno < ipfrom:
high = mid - 1
else:
low = mid + 1
|
chrislim2888/IP2Location-Python | IP2Location.py | IP2Location.get_netspeed | python | def get_netspeed(self, ip):
''' Get netspeed '''
rec = self.get_all(ip)
return rec and rec.netspeed | Get netspeed | train | https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L169-L172 | [
"def get_all(self, addr):\n ''' Get the whole record with all fields read from the file\n\n Arguments:\n\n addr: IPv4 or IPv6 address as a string\n\n Returns IP2LocationRecord or None if address not found in file\n '''\n return self._get_record(addr)\n"
] | class IP2Location(object):
''' IP2Location database '''
def __init__(self, filename=None):
''' Creates a database object and opens a file if filename is given
'''
if filename:
self.open(filename)
def __enter__(self):
if not hasattr(self, '_f') or self._f.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open(self, filename):
''' Opens a database file '''
# Ensure old file is closed before opening a new one
self.close()
self._f = open(filename, 'rb')
self._dbtype = struct.unpack('B', self._f.read(1))[0]
self._dbcolumn = struct.unpack('B', self._f.read(1))[0]
self._dbyear = struct.unpack('B', self._f.read(1))[0]
self._dbmonth = struct.unpack('B', self._f.read(1))[0]
self._dbday = struct.unpack('B', self._f.read(1))[0]
self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
def close(self):
if hasattr(self, '_f'):
# If there is file close it.
self._f.close()
del self._f
def get_country_short(self, ip):
''' Get country_short '''
rec = self.get_all(ip)
return rec and rec.country_short
def get_country_long(self, ip):
''' Get country_long '''
rec = self.get_all(ip)
return rec and rec.country_long
def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region
def get_city(self, ip):
''' Get city '''
rec = self.get_all(ip)
return rec and rec.city
def get_isp(self, ip):
''' Get isp '''
rec = self.get_all(ip)
return rec and rec.isp
def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude
def get_longitude(self, ip):
''' Get longitude '''
rec = self.get_all(ip)
return rec and rec.longitude
def get_domain(self, ip):
''' Get domain '''
rec = self.get_all(ip)
return rec and rec.domain
def get_zipcode(self, ip):
''' Get zipcode '''
rec = self.get_all(ip)
return rec and rec.zipcode
def get_timezone(self, ip):
''' Get timezone '''
rec = self.get_all(ip)
return rec and rec.timezone
def get_idd_code(self, ip):
''' Get idd_code '''
rec = self.get_all(ip)
return rec and rec.idd_code
def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code
def get_weather_code(self, ip):
''' Get weather_code '''
rec = self.get_all(ip)
return rec and rec.weather_code
def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc
def get_mnc(self, ip):
''' Get mnc '''
rec = self.get_all(ip)
return rec and rec.mnc
def get_mobile_brand(self, ip):
''' Get mobile_brand '''
rec = self.get_all(ip)
return rec and rec.mobile_brand
def get_elevation(self, ip):
''' Get elevation '''
rec = self.get_all(ip)
return rec and rec.elevation
def get_usage_type(self, ip):
''' Get usage_type '''
rec = self.get_all(ip)
return rec and rec.usage_type
def get_all(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def find(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def _reads(self, offset):
self._f.seek(offset - 1)
n = struct.unpack('B', self._f.read(1))[0]
#return u(self._f.read(n))
return self._f.read(n).decode('iso-8859-1').encode('utf-8')
def _readi(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<I', self._f.read(4))[0]
def _readf(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<f', self._f.read(4))[0]
def _readip(self, offset, ipv):
if ipv == 4:
return self._readi(offset)
elif ipv == 6:
a, b, c, d = self._readi(offset), self._readi(offset + 4), self._readi(offset + 8), self._readi(offset + 12)
return (d << 96) | (c << 64) | (b << 32) | a
def _readips(self, offset, ipv):
if ipv == 4:
return socket.inet_ntoa(struct.pack('!L', self._readi(offset)))
elif ipv == 6:
return str(self._readip(offset, ipv))
def _read_record(self, mid, ipv):
rec = IP2LocationRecord()
if ipv == 4:
off = 0
baseaddr = self._ipv4dbaddr
elif ipv == 6:
off = 12
baseaddr = self._ipv6dbaddr
rec.ip = self._readips(baseaddr + (mid) * self._dbcolumn * 4, ipv)
def calc_off(what, mid):
return baseaddr + mid * (self._dbcolumn * 4 + off) + off + 4 * (what[self._dbtype]-1)
if _COUNTRY_POSITION[self._dbtype] != 0:
rec.country_short = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 1)
rec.country_long = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 4)
if _REGION_POSITION[self._dbtype] != 0:
rec.region = self._reads(self._readi(calc_off(_REGION_POSITION, mid)) + 1)
if _CITY_POSITION[self._dbtype] != 0:
rec.city = self._reads(self._readi(calc_off(_CITY_POSITION, mid)) + 1)
if _ISP_POSITION[self._dbtype] != 0:
rec.isp = self._reads(self._readi(calc_off(_ISP_POSITION, mid)) + 1)
if _LATITUDE_POSITION[self._dbtype] != 0:
rec.latitude = round(self._readf(calc_off(_LATITUDE_POSITION, mid)), 6)
if _LONGITUDE_POSITION[self._dbtype] != 0:
rec.longitude = round(self._readf(calc_off(_LONGITUDE_POSITION, mid)), 6)
if _DOMAIN_POSITION[self._dbtype] != 0:
rec.domain = self._reads(self._readi(calc_off(_DOMAIN_POSITION, mid)) + 1)
if _ZIPCODE_POSITION[self._dbtype] != 0:
rec.zipcode = self._reads(self._readi(calc_off(_ZIPCODE_POSITION, mid)) + 1)
if _TIMEZONE_POSITION[self._dbtype] != 0:
rec.timezone = self._reads(self._readi(calc_off(_TIMEZONE_POSITION, mid)) + 1)
if _NETSPEED_POSITION[self._dbtype] != 0:
rec.netspeed = self._reads(self._readi(calc_off(_NETSPEED_POSITION, mid)) + 1)
if _IDDCODE_POSITION[self._dbtype] != 0:
rec.idd_code = self._reads(self._readi(calc_off(_IDDCODE_POSITION, mid)) + 1)
if _AREACODE_POSITION[self._dbtype] != 0:
rec.area_code = self._reads(self._readi(calc_off(_AREACODE_POSITION, mid)) + 1)
if _WEATHERSTATIONCODE_POSITION[self._dbtype] != 0:
rec.weather_code = self._reads(self._readi(calc_off(_WEATHERSTATIONCODE_POSITION, mid)) + 1)
if _WEATHERSTATIONNAME_POSITION[self._dbtype] != 0:
rec.weather_name = self._reads(self._readi(calc_off(_WEATHERSTATIONNAME_POSITION, mid)) + 1)
if _MCC_POSITION[self._dbtype] != 0:
rec.mcc = self._reads(self._readi(calc_off(_MCC_POSITION, mid)) + 1)
if _MNC_POSITION[self._dbtype] != 0:
rec.mnc = self._reads(self._readi(calc_off(_MNC_POSITION, mid)) + 1)
if _MOBILEBRAND_POSITION[self._dbtype] != 0:
rec.mobile_brand = self._reads(self._readi(calc_off(_MOBILEBRAND_POSITION, mid)) + 1)
if _ELEVATION_POSITION[self._dbtype] != 0:
rec.elevation = self._reads(self._readi(calc_off(_ELEVATION_POSITION, mid)) + 1)
if _USAGETYPE_POSITION[self._dbtype] != 0:
rec.usage_type = self._reads(self._readi(calc_off(_USAGETYPE_POSITION, mid)) + 1)
return rec
def __iter__(self):
low, high = 0, self._ipv4dbcount
while low <= high:
yield self._read_record(low, 4)
low += 1
low, high = 0, self._ipv6dbcount
while low <= high:
yield self._read_record(low, 6)
low += 1
def _parse_addr(self, addr):
''' Parses address and returns IP version. Raises exception on invalid argument '''
ipv = 0
try:
socket.inet_pton(socket.AF_INET6, addr)
# Convert ::FFFF:x.y.z.y to IPv4
if addr.lower().startswith('::ffff:'):
try:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
except:
ipv = 6
else:
ipv = 6
except:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
return ipv
def _get_record(self, ip):
low = 0
ipv = self._parse_addr(ip)
if ipv == 4:
ipno = struct.unpack('!L', socket.inet_pton(socket.AF_INET, ip))[0]
off = 0
baseaddr = self._ipv4dbaddr
high = self._ipv4dbcount
if self._ipv4indexbaseaddr > 0:
indexpos = ((ipno >> 16) << 3) + self._ipv4indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
elif ipv == 6:
if self._ipv6dbcount == 0:
raise ValueError('Please use IPv6 BIN file for IPv6 Address.')
a, b = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip))
ipno = (a << 64) | b
off = 12
baseaddr = self._ipv6dbaddr
high = self._ipv6dbcount
if self._ipv6indexbaseaddr > 0:
indexpos = ((ipno >> 112) << 3) + self._ipv6indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
while low <= high:
mid = int((low + high) / 2)
ipfrom = self._readip(baseaddr + (mid) * (self._dbcolumn * 4 + off), ipv)
ipto = self._readip(baseaddr + (mid + 1) * (self._dbcolumn * 4 + off), ipv)
if ipfrom <= ipno < ipto:
return self._read_record(mid, ipv)
else:
if ipno < ipfrom:
high = mid - 1
else:
low = mid + 1
|
chrislim2888/IP2Location-Python | IP2Location.py | IP2Location.get_idd_code | python | def get_idd_code(self, ip):
''' Get idd_code '''
rec = self.get_all(ip)
return rec and rec.idd_code | Get idd_code | train | https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L173-L176 | [
"def get_all(self, addr):\n ''' Get the whole record with all fields read from the file\n\n Arguments:\n\n addr: IPv4 or IPv6 address as a string\n\n Returns IP2LocationRecord or None if address not found in file\n '''\n return self._get_record(addr)\n"
] | class IP2Location(object):
''' IP2Location database '''
def __init__(self, filename=None):
''' Creates a database object and opens a file if filename is given
'''
if filename:
self.open(filename)
def __enter__(self):
if not hasattr(self, '_f') or self._f.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open(self, filename):
''' Opens a database file '''
# Ensure old file is closed before opening a new one
self.close()
self._f = open(filename, 'rb')
self._dbtype = struct.unpack('B', self._f.read(1))[0]
self._dbcolumn = struct.unpack('B', self._f.read(1))[0]
self._dbyear = struct.unpack('B', self._f.read(1))[0]
self._dbmonth = struct.unpack('B', self._f.read(1))[0]
self._dbday = struct.unpack('B', self._f.read(1))[0]
self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
def close(self):
if hasattr(self, '_f'):
# If there is file close it.
self._f.close()
del self._f
def get_country_short(self, ip):
''' Get country_short '''
rec = self.get_all(ip)
return rec and rec.country_short
def get_country_long(self, ip):
''' Get country_long '''
rec = self.get_all(ip)
return rec and rec.country_long
def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region
def get_city(self, ip):
''' Get city '''
rec = self.get_all(ip)
return rec and rec.city
def get_isp(self, ip):
''' Get isp '''
rec = self.get_all(ip)
return rec and rec.isp
def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude
def get_longitude(self, ip):
''' Get longitude '''
rec = self.get_all(ip)
return rec and rec.longitude
def get_domain(self, ip):
''' Get domain '''
rec = self.get_all(ip)
return rec and rec.domain
def get_zipcode(self, ip):
''' Get zipcode '''
rec = self.get_all(ip)
return rec and rec.zipcode
def get_timezone(self, ip):
''' Get timezone '''
rec = self.get_all(ip)
return rec and rec.timezone
def get_netspeed(self, ip):
''' Get netspeed '''
rec = self.get_all(ip)
return rec and rec.netspeed
def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code
def get_weather_code(self, ip):
''' Get weather_code '''
rec = self.get_all(ip)
return rec and rec.weather_code
def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc
def get_mnc(self, ip):
''' Get mnc '''
rec = self.get_all(ip)
return rec and rec.mnc
def get_mobile_brand(self, ip):
''' Get mobile_brand '''
rec = self.get_all(ip)
return rec and rec.mobile_brand
def get_elevation(self, ip):
''' Get elevation '''
rec = self.get_all(ip)
return rec and rec.elevation
def get_usage_type(self, ip):
''' Get usage_type '''
rec = self.get_all(ip)
return rec and rec.usage_type
def get_all(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def find(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def _reads(self, offset):
self._f.seek(offset - 1)
n = struct.unpack('B', self._f.read(1))[0]
#return u(self._f.read(n))
return self._f.read(n).decode('iso-8859-1').encode('utf-8')
def _readi(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<I', self._f.read(4))[0]
def _readf(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<f', self._f.read(4))[0]
def _readip(self, offset, ipv):
if ipv == 4:
return self._readi(offset)
elif ipv == 6:
a, b, c, d = self._readi(offset), self._readi(offset + 4), self._readi(offset + 8), self._readi(offset + 12)
return (d << 96) | (c << 64) | (b << 32) | a
def _readips(self, offset, ipv):
if ipv == 4:
return socket.inet_ntoa(struct.pack('!L', self._readi(offset)))
elif ipv == 6:
return str(self._readip(offset, ipv))
def _read_record(self, mid, ipv):
rec = IP2LocationRecord()
if ipv == 4:
off = 0
baseaddr = self._ipv4dbaddr
elif ipv == 6:
off = 12
baseaddr = self._ipv6dbaddr
rec.ip = self._readips(baseaddr + (mid) * self._dbcolumn * 4, ipv)
def calc_off(what, mid):
return baseaddr + mid * (self._dbcolumn * 4 + off) + off + 4 * (what[self._dbtype]-1)
if _COUNTRY_POSITION[self._dbtype] != 0:
rec.country_short = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 1)
rec.country_long = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 4)
if _REGION_POSITION[self._dbtype] != 0:
rec.region = self._reads(self._readi(calc_off(_REGION_POSITION, mid)) + 1)
if _CITY_POSITION[self._dbtype] != 0:
rec.city = self._reads(self._readi(calc_off(_CITY_POSITION, mid)) + 1)
if _ISP_POSITION[self._dbtype] != 0:
rec.isp = self._reads(self._readi(calc_off(_ISP_POSITION, mid)) + 1)
if _LATITUDE_POSITION[self._dbtype] != 0:
rec.latitude = round(self._readf(calc_off(_LATITUDE_POSITION, mid)), 6)
if _LONGITUDE_POSITION[self._dbtype] != 0:
rec.longitude = round(self._readf(calc_off(_LONGITUDE_POSITION, mid)), 6)
if _DOMAIN_POSITION[self._dbtype] != 0:
rec.domain = self._reads(self._readi(calc_off(_DOMAIN_POSITION, mid)) + 1)
if _ZIPCODE_POSITION[self._dbtype] != 0:
rec.zipcode = self._reads(self._readi(calc_off(_ZIPCODE_POSITION, mid)) + 1)
if _TIMEZONE_POSITION[self._dbtype] != 0:
rec.timezone = self._reads(self._readi(calc_off(_TIMEZONE_POSITION, mid)) + 1)
if _NETSPEED_POSITION[self._dbtype] != 0:
rec.netspeed = self._reads(self._readi(calc_off(_NETSPEED_POSITION, mid)) + 1)
if _IDDCODE_POSITION[self._dbtype] != 0:
rec.idd_code = self._reads(self._readi(calc_off(_IDDCODE_POSITION, mid)) + 1)
if _AREACODE_POSITION[self._dbtype] != 0:
rec.area_code = self._reads(self._readi(calc_off(_AREACODE_POSITION, mid)) + 1)
if _WEATHERSTATIONCODE_POSITION[self._dbtype] != 0:
rec.weather_code = self._reads(self._readi(calc_off(_WEATHERSTATIONCODE_POSITION, mid)) + 1)
if _WEATHERSTATIONNAME_POSITION[self._dbtype] != 0:
rec.weather_name = self._reads(self._readi(calc_off(_WEATHERSTATIONNAME_POSITION, mid)) + 1)
if _MCC_POSITION[self._dbtype] != 0:
rec.mcc = self._reads(self._readi(calc_off(_MCC_POSITION, mid)) + 1)
if _MNC_POSITION[self._dbtype] != 0:
rec.mnc = self._reads(self._readi(calc_off(_MNC_POSITION, mid)) + 1)
if _MOBILEBRAND_POSITION[self._dbtype] != 0:
rec.mobile_brand = self._reads(self._readi(calc_off(_MOBILEBRAND_POSITION, mid)) + 1)
if _ELEVATION_POSITION[self._dbtype] != 0:
rec.elevation = self._reads(self._readi(calc_off(_ELEVATION_POSITION, mid)) + 1)
if _USAGETYPE_POSITION[self._dbtype] != 0:
rec.usage_type = self._reads(self._readi(calc_off(_USAGETYPE_POSITION, mid)) + 1)
return rec
def __iter__(self):
low, high = 0, self._ipv4dbcount
while low <= high:
yield self._read_record(low, 4)
low += 1
low, high = 0, self._ipv6dbcount
while low <= high:
yield self._read_record(low, 6)
low += 1
def _parse_addr(self, addr):
''' Parses address and returns IP version. Raises exception on invalid argument '''
ipv = 0
try:
socket.inet_pton(socket.AF_INET6, addr)
# Convert ::FFFF:x.y.z.y to IPv4
if addr.lower().startswith('::ffff:'):
try:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
except:
ipv = 6
else:
ipv = 6
except:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
return ipv
def _get_record(self, ip):
low = 0
ipv = self._parse_addr(ip)
if ipv == 4:
ipno = struct.unpack('!L', socket.inet_pton(socket.AF_INET, ip))[0]
off = 0
baseaddr = self._ipv4dbaddr
high = self._ipv4dbcount
if self._ipv4indexbaseaddr > 0:
indexpos = ((ipno >> 16) << 3) + self._ipv4indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
elif ipv == 6:
if self._ipv6dbcount == 0:
raise ValueError('Please use IPv6 BIN file for IPv6 Address.')
a, b = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip))
ipno = (a << 64) | b
off = 12
baseaddr = self._ipv6dbaddr
high = self._ipv6dbcount
if self._ipv6indexbaseaddr > 0:
indexpos = ((ipno >> 112) << 3) + self._ipv6indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
while low <= high:
mid = int((low + high) / 2)
ipfrom = self._readip(baseaddr + (mid) * (self._dbcolumn * 4 + off), ipv)
ipto = self._readip(baseaddr + (mid + 1) * (self._dbcolumn * 4 + off), ipv)
if ipfrom <= ipno < ipto:
return self._read_record(mid, ipv)
else:
if ipno < ipfrom:
high = mid - 1
else:
low = mid + 1
|
chrislim2888/IP2Location-Python | IP2Location.py | IP2Location.get_area_code | python | def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code | Get area_code | train | https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L177-L180 | [
"def get_all(self, addr):\n ''' Get the whole record with all fields read from the file\n\n Arguments:\n\n addr: IPv4 or IPv6 address as a string\n\n Returns IP2LocationRecord or None if address not found in file\n '''\n return self._get_record(addr)\n"
] | class IP2Location(object):
''' IP2Location database '''
def __init__(self, filename=None):
''' Creates a database object and opens a file if filename is given
'''
if filename:
self.open(filename)
def __enter__(self):
if not hasattr(self, '_f') or self._f.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open(self, filename):
''' Opens a database file '''
# Ensure old file is closed before opening a new one
self.close()
self._f = open(filename, 'rb')
self._dbtype = struct.unpack('B', self._f.read(1))[0]
self._dbcolumn = struct.unpack('B', self._f.read(1))[0]
self._dbyear = struct.unpack('B', self._f.read(1))[0]
self._dbmonth = struct.unpack('B', self._f.read(1))[0]
self._dbday = struct.unpack('B', self._f.read(1))[0]
self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
def close(self):
if hasattr(self, '_f'):
# If there is file close it.
self._f.close()
del self._f
def get_country_short(self, ip):
''' Get country_short '''
rec = self.get_all(ip)
return rec and rec.country_short
def get_country_long(self, ip):
''' Get country_long '''
rec = self.get_all(ip)
return rec and rec.country_long
def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region
def get_city(self, ip):
''' Get city '''
rec = self.get_all(ip)
return rec and rec.city
def get_isp(self, ip):
''' Get isp '''
rec = self.get_all(ip)
return rec and rec.isp
def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude
def get_longitude(self, ip):
''' Get longitude '''
rec = self.get_all(ip)
return rec and rec.longitude
def get_domain(self, ip):
''' Get domain '''
rec = self.get_all(ip)
return rec and rec.domain
def get_zipcode(self, ip):
''' Get zipcode '''
rec = self.get_all(ip)
return rec and rec.zipcode
def get_timezone(self, ip):
''' Get timezone '''
rec = self.get_all(ip)
return rec and rec.timezone
def get_netspeed(self, ip):
''' Get netspeed '''
rec = self.get_all(ip)
return rec and rec.netspeed
def get_idd_code(self, ip):
''' Get idd_code '''
rec = self.get_all(ip)
return rec and rec.idd_code
def get_weather_code(self, ip):
''' Get weather_code '''
rec = self.get_all(ip)
return rec and rec.weather_code
def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc
def get_mnc(self, ip):
''' Get mnc '''
rec = self.get_all(ip)
return rec and rec.mnc
def get_mobile_brand(self, ip):
''' Get mobile_brand '''
rec = self.get_all(ip)
return rec and rec.mobile_brand
def get_elevation(self, ip):
''' Get elevation '''
rec = self.get_all(ip)
return rec and rec.elevation
def get_usage_type(self, ip):
''' Get usage_type '''
rec = self.get_all(ip)
return rec and rec.usage_type
def get_all(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def find(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def _reads(self, offset):
self._f.seek(offset - 1)
n = struct.unpack('B', self._f.read(1))[0]
#return u(self._f.read(n))
return self._f.read(n).decode('iso-8859-1').encode('utf-8')
def _readi(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<I', self._f.read(4))[0]
def _readf(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<f', self._f.read(4))[0]
def _readip(self, offset, ipv):
if ipv == 4:
return self._readi(offset)
elif ipv == 6:
a, b, c, d = self._readi(offset), self._readi(offset + 4), self._readi(offset + 8), self._readi(offset + 12)
return (d << 96) | (c << 64) | (b << 32) | a
def _readips(self, offset, ipv):
if ipv == 4:
return socket.inet_ntoa(struct.pack('!L', self._readi(offset)))
elif ipv == 6:
return str(self._readip(offset, ipv))
def _read_record(self, mid, ipv):
rec = IP2LocationRecord()
if ipv == 4:
off = 0
baseaddr = self._ipv4dbaddr
elif ipv == 6:
off = 12
baseaddr = self._ipv6dbaddr
rec.ip = self._readips(baseaddr + (mid) * self._dbcolumn * 4, ipv)
def calc_off(what, mid):
return baseaddr + mid * (self._dbcolumn * 4 + off) + off + 4 * (what[self._dbtype]-1)
if _COUNTRY_POSITION[self._dbtype] != 0:
rec.country_short = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 1)
rec.country_long = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 4)
if _REGION_POSITION[self._dbtype] != 0:
rec.region = self._reads(self._readi(calc_off(_REGION_POSITION, mid)) + 1)
if _CITY_POSITION[self._dbtype] != 0:
rec.city = self._reads(self._readi(calc_off(_CITY_POSITION, mid)) + 1)
if _ISP_POSITION[self._dbtype] != 0:
rec.isp = self._reads(self._readi(calc_off(_ISP_POSITION, mid)) + 1)
if _LATITUDE_POSITION[self._dbtype] != 0:
rec.latitude = round(self._readf(calc_off(_LATITUDE_POSITION, mid)), 6)
if _LONGITUDE_POSITION[self._dbtype] != 0:
rec.longitude = round(self._readf(calc_off(_LONGITUDE_POSITION, mid)), 6)
if _DOMAIN_POSITION[self._dbtype] != 0:
rec.domain = self._reads(self._readi(calc_off(_DOMAIN_POSITION, mid)) + 1)
if _ZIPCODE_POSITION[self._dbtype] != 0:
rec.zipcode = self._reads(self._readi(calc_off(_ZIPCODE_POSITION, mid)) + 1)
if _TIMEZONE_POSITION[self._dbtype] != 0:
rec.timezone = self._reads(self._readi(calc_off(_TIMEZONE_POSITION, mid)) + 1)
if _NETSPEED_POSITION[self._dbtype] != 0:
rec.netspeed = self._reads(self._readi(calc_off(_NETSPEED_POSITION, mid)) + 1)
if _IDDCODE_POSITION[self._dbtype] != 0:
rec.idd_code = self._reads(self._readi(calc_off(_IDDCODE_POSITION, mid)) + 1)
if _AREACODE_POSITION[self._dbtype] != 0:
rec.area_code = self._reads(self._readi(calc_off(_AREACODE_POSITION, mid)) + 1)
if _WEATHERSTATIONCODE_POSITION[self._dbtype] != 0:
rec.weather_code = self._reads(self._readi(calc_off(_WEATHERSTATIONCODE_POSITION, mid)) + 1)
if _WEATHERSTATIONNAME_POSITION[self._dbtype] != 0:
rec.weather_name = self._reads(self._readi(calc_off(_WEATHERSTATIONNAME_POSITION, mid)) + 1)
if _MCC_POSITION[self._dbtype] != 0:
rec.mcc = self._reads(self._readi(calc_off(_MCC_POSITION, mid)) + 1)
if _MNC_POSITION[self._dbtype] != 0:
rec.mnc = self._reads(self._readi(calc_off(_MNC_POSITION, mid)) + 1)
if _MOBILEBRAND_POSITION[self._dbtype] != 0:
rec.mobile_brand = self._reads(self._readi(calc_off(_MOBILEBRAND_POSITION, mid)) + 1)
if _ELEVATION_POSITION[self._dbtype] != 0:
rec.elevation = self._reads(self._readi(calc_off(_ELEVATION_POSITION, mid)) + 1)
if _USAGETYPE_POSITION[self._dbtype] != 0:
rec.usage_type = self._reads(self._readi(calc_off(_USAGETYPE_POSITION, mid)) + 1)
return rec
def __iter__(self):
low, high = 0, self._ipv4dbcount
while low <= high:
yield self._read_record(low, 4)
low += 1
low, high = 0, self._ipv6dbcount
while low <= high:
yield self._read_record(low, 6)
low += 1
def _parse_addr(self, addr):
''' Parses address and returns IP version. Raises exception on invalid argument '''
ipv = 0
try:
socket.inet_pton(socket.AF_INET6, addr)
# Convert ::FFFF:x.y.z.y to IPv4
if addr.lower().startswith('::ffff:'):
try:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
except:
ipv = 6
else:
ipv = 6
except:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
return ipv
def _get_record(self, ip):
low = 0
ipv = self._parse_addr(ip)
if ipv == 4:
ipno = struct.unpack('!L', socket.inet_pton(socket.AF_INET, ip))[0]
off = 0
baseaddr = self._ipv4dbaddr
high = self._ipv4dbcount
if self._ipv4indexbaseaddr > 0:
indexpos = ((ipno >> 16) << 3) + self._ipv4indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
elif ipv == 6:
if self._ipv6dbcount == 0:
raise ValueError('Please use IPv6 BIN file for IPv6 Address.')
a, b = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip))
ipno = (a << 64) | b
off = 12
baseaddr = self._ipv6dbaddr
high = self._ipv6dbcount
if self._ipv6indexbaseaddr > 0:
indexpos = ((ipno >> 112) << 3) + self._ipv6indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
while low <= high:
mid = int((low + high) / 2)
ipfrom = self._readip(baseaddr + (mid) * (self._dbcolumn * 4 + off), ipv)
ipto = self._readip(baseaddr + (mid + 1) * (self._dbcolumn * 4 + off), ipv)
if ipfrom <= ipno < ipto:
return self._read_record(mid, ipv)
else:
if ipno < ipfrom:
high = mid - 1
else:
low = mid + 1
|
chrislim2888/IP2Location-Python | IP2Location.py | IP2Location.get_weather_code | python | def get_weather_code(self, ip):
''' Get weather_code '''
rec = self.get_all(ip)
return rec and rec.weather_code | Get weather_code | train | https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L181-L184 | [
"def get_all(self, addr):\n ''' Get the whole record with all fields read from the file\n\n Arguments:\n\n addr: IPv4 or IPv6 address as a string\n\n Returns IP2LocationRecord or None if address not found in file\n '''\n return self._get_record(addr)\n"
] | class IP2Location(object):
''' IP2Location database '''
def __init__(self, filename=None):
''' Creates a database object and opens a file if filename is given
'''
if filename:
self.open(filename)
def __enter__(self):
if not hasattr(self, '_f') or self._f.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open(self, filename):
''' Opens a database file '''
# Ensure old file is closed before opening a new one
self.close()
self._f = open(filename, 'rb')
self._dbtype = struct.unpack('B', self._f.read(1))[0]
self._dbcolumn = struct.unpack('B', self._f.read(1))[0]
self._dbyear = struct.unpack('B', self._f.read(1))[0]
self._dbmonth = struct.unpack('B', self._f.read(1))[0]
self._dbday = struct.unpack('B', self._f.read(1))[0]
self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
def close(self):
if hasattr(self, '_f'):
# If there is file close it.
self._f.close()
del self._f
def get_country_short(self, ip):
''' Get country_short '''
rec = self.get_all(ip)
return rec and rec.country_short
def get_country_long(self, ip):
''' Get country_long '''
rec = self.get_all(ip)
return rec and rec.country_long
def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region
def get_city(self, ip):
''' Get city '''
rec = self.get_all(ip)
return rec and rec.city
def get_isp(self, ip):
''' Get isp '''
rec = self.get_all(ip)
return rec and rec.isp
def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude
def get_longitude(self, ip):
''' Get longitude '''
rec = self.get_all(ip)
return rec and rec.longitude
def get_domain(self, ip):
''' Get domain '''
rec = self.get_all(ip)
return rec and rec.domain
def get_zipcode(self, ip):
''' Get zipcode '''
rec = self.get_all(ip)
return rec and rec.zipcode
def get_timezone(self, ip):
''' Get timezone '''
rec = self.get_all(ip)
return rec and rec.timezone
def get_netspeed(self, ip):
''' Get netspeed '''
rec = self.get_all(ip)
return rec and rec.netspeed
def get_idd_code(self, ip):
''' Get idd_code '''
rec = self.get_all(ip)
return rec and rec.idd_code
def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code
def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc
def get_mnc(self, ip):
''' Get mnc '''
rec = self.get_all(ip)
return rec and rec.mnc
def get_mobile_brand(self, ip):
''' Get mobile_brand '''
rec = self.get_all(ip)
return rec and rec.mobile_brand
def get_elevation(self, ip):
''' Get elevation '''
rec = self.get_all(ip)
return rec and rec.elevation
def get_usage_type(self, ip):
''' Get usage_type '''
rec = self.get_all(ip)
return rec and rec.usage_type
def get_all(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def find(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def _reads(self, offset):
self._f.seek(offset - 1)
n = struct.unpack('B', self._f.read(1))[0]
#return u(self._f.read(n))
return self._f.read(n).decode('iso-8859-1').encode('utf-8')
def _readi(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<I', self._f.read(4))[0]
def _readf(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<f', self._f.read(4))[0]
def _readip(self, offset, ipv):
if ipv == 4:
return self._readi(offset)
elif ipv == 6:
a, b, c, d = self._readi(offset), self._readi(offset + 4), self._readi(offset + 8), self._readi(offset + 12)
return (d << 96) | (c << 64) | (b << 32) | a
def _readips(self, offset, ipv):
if ipv == 4:
return socket.inet_ntoa(struct.pack('!L', self._readi(offset)))
elif ipv == 6:
return str(self._readip(offset, ipv))
def _read_record(self, mid, ipv):
rec = IP2LocationRecord()
if ipv == 4:
off = 0
baseaddr = self._ipv4dbaddr
elif ipv == 6:
off = 12
baseaddr = self._ipv6dbaddr
rec.ip = self._readips(baseaddr + (mid) * self._dbcolumn * 4, ipv)
def calc_off(what, mid):
return baseaddr + mid * (self._dbcolumn * 4 + off) + off + 4 * (what[self._dbtype]-1)
if _COUNTRY_POSITION[self._dbtype] != 0:
rec.country_short = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 1)
rec.country_long = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 4)
if _REGION_POSITION[self._dbtype] != 0:
rec.region = self._reads(self._readi(calc_off(_REGION_POSITION, mid)) + 1)
if _CITY_POSITION[self._dbtype] != 0:
rec.city = self._reads(self._readi(calc_off(_CITY_POSITION, mid)) + 1)
if _ISP_POSITION[self._dbtype] != 0:
rec.isp = self._reads(self._readi(calc_off(_ISP_POSITION, mid)) + 1)
if _LATITUDE_POSITION[self._dbtype] != 0:
rec.latitude = round(self._readf(calc_off(_LATITUDE_POSITION, mid)), 6)
if _LONGITUDE_POSITION[self._dbtype] != 0:
rec.longitude = round(self._readf(calc_off(_LONGITUDE_POSITION, mid)), 6)
if _DOMAIN_POSITION[self._dbtype] != 0:
rec.domain = self._reads(self._readi(calc_off(_DOMAIN_POSITION, mid)) + 1)
if _ZIPCODE_POSITION[self._dbtype] != 0:
rec.zipcode = self._reads(self._readi(calc_off(_ZIPCODE_POSITION, mid)) + 1)
if _TIMEZONE_POSITION[self._dbtype] != 0:
rec.timezone = self._reads(self._readi(calc_off(_TIMEZONE_POSITION, mid)) + 1)
if _NETSPEED_POSITION[self._dbtype] != 0:
rec.netspeed = self._reads(self._readi(calc_off(_NETSPEED_POSITION, mid)) + 1)
if _IDDCODE_POSITION[self._dbtype] != 0:
rec.idd_code = self._reads(self._readi(calc_off(_IDDCODE_POSITION, mid)) + 1)
if _AREACODE_POSITION[self._dbtype] != 0:
rec.area_code = self._reads(self._readi(calc_off(_AREACODE_POSITION, mid)) + 1)
if _WEATHERSTATIONCODE_POSITION[self._dbtype] != 0:
rec.weather_code = self._reads(self._readi(calc_off(_WEATHERSTATIONCODE_POSITION, mid)) + 1)
if _WEATHERSTATIONNAME_POSITION[self._dbtype] != 0:
rec.weather_name = self._reads(self._readi(calc_off(_WEATHERSTATIONNAME_POSITION, mid)) + 1)
if _MCC_POSITION[self._dbtype] != 0:
rec.mcc = self._reads(self._readi(calc_off(_MCC_POSITION, mid)) + 1)
if _MNC_POSITION[self._dbtype] != 0:
rec.mnc = self._reads(self._readi(calc_off(_MNC_POSITION, mid)) + 1)
if _MOBILEBRAND_POSITION[self._dbtype] != 0:
rec.mobile_brand = self._reads(self._readi(calc_off(_MOBILEBRAND_POSITION, mid)) + 1)
if _ELEVATION_POSITION[self._dbtype] != 0:
rec.elevation = self._reads(self._readi(calc_off(_ELEVATION_POSITION, mid)) + 1)
if _USAGETYPE_POSITION[self._dbtype] != 0:
rec.usage_type = self._reads(self._readi(calc_off(_USAGETYPE_POSITION, mid)) + 1)
return rec
def __iter__(self):
low, high = 0, self._ipv4dbcount
while low <= high:
yield self._read_record(low, 4)
low += 1
low, high = 0, self._ipv6dbcount
while low <= high:
yield self._read_record(low, 6)
low += 1
def _parse_addr(self, addr):
''' Parses address and returns IP version. Raises exception on invalid argument '''
ipv = 0
try:
socket.inet_pton(socket.AF_INET6, addr)
# Convert ::FFFF:x.y.z.y to IPv4
if addr.lower().startswith('::ffff:'):
try:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
except:
ipv = 6
else:
ipv = 6
except:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
return ipv
def _get_record(self, ip):
low = 0
ipv = self._parse_addr(ip)
if ipv == 4:
ipno = struct.unpack('!L', socket.inet_pton(socket.AF_INET, ip))[0]
off = 0
baseaddr = self._ipv4dbaddr
high = self._ipv4dbcount
if self._ipv4indexbaseaddr > 0:
indexpos = ((ipno >> 16) << 3) + self._ipv4indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
elif ipv == 6:
if self._ipv6dbcount == 0:
raise ValueError('Please use IPv6 BIN file for IPv6 Address.')
a, b = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip))
ipno = (a << 64) | b
off = 12
baseaddr = self._ipv6dbaddr
high = self._ipv6dbcount
if self._ipv6indexbaseaddr > 0:
indexpos = ((ipno >> 112) << 3) + self._ipv6indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
while low <= high:
mid = int((low + high) / 2)
ipfrom = self._readip(baseaddr + (mid) * (self._dbcolumn * 4 + off), ipv)
ipto = self._readip(baseaddr + (mid + 1) * (self._dbcolumn * 4 + off), ipv)
if ipfrom <= ipno < ipto:
return self._read_record(mid, ipv)
else:
if ipno < ipfrom:
high = mid - 1
else:
low = mid + 1
|
chrislim2888/IP2Location-Python | IP2Location.py | IP2Location.get_weather_name | python | def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name | Get weather_name | train | https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L185-L188 | [
"def get_all(self, addr):\n ''' Get the whole record with all fields read from the file\n\n Arguments:\n\n addr: IPv4 or IPv6 address as a string\n\n Returns IP2LocationRecord or None if address not found in file\n '''\n return self._get_record(addr)\n"
] | class IP2Location(object):
''' IP2Location database '''
def __init__(self, filename=None):
''' Creates a database object and opens a file if filename is given
'''
if filename:
self.open(filename)
def __enter__(self):
if not hasattr(self, '_f') or self._f.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open(self, filename):
''' Opens a database file '''
# Ensure old file is closed before opening a new one
self.close()
self._f = open(filename, 'rb')
self._dbtype = struct.unpack('B', self._f.read(1))[0]
self._dbcolumn = struct.unpack('B', self._f.read(1))[0]
self._dbyear = struct.unpack('B', self._f.read(1))[0]
self._dbmonth = struct.unpack('B', self._f.read(1))[0]
self._dbday = struct.unpack('B', self._f.read(1))[0]
self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
def close(self):
if hasattr(self, '_f'):
# If there is file close it.
self._f.close()
del self._f
def get_country_short(self, ip):
''' Get country_short '''
rec = self.get_all(ip)
return rec and rec.country_short
def get_country_long(self, ip):
''' Get country_long '''
rec = self.get_all(ip)
return rec and rec.country_long
def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region
def get_city(self, ip):
''' Get city '''
rec = self.get_all(ip)
return rec and rec.city
def get_isp(self, ip):
''' Get isp '''
rec = self.get_all(ip)
return rec and rec.isp
def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude
def get_longitude(self, ip):
''' Get longitude '''
rec = self.get_all(ip)
return rec and rec.longitude
def get_domain(self, ip):
''' Get domain '''
rec = self.get_all(ip)
return rec and rec.domain
def get_zipcode(self, ip):
''' Get zipcode '''
rec = self.get_all(ip)
return rec and rec.zipcode
def get_timezone(self, ip):
''' Get timezone '''
rec = self.get_all(ip)
return rec and rec.timezone
def get_netspeed(self, ip):
''' Get netspeed '''
rec = self.get_all(ip)
return rec and rec.netspeed
def get_idd_code(self, ip):
''' Get idd_code '''
rec = self.get_all(ip)
return rec and rec.idd_code
def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code
def get_weather_code(self, ip):
''' Get weather_code '''
rec = self.get_all(ip)
return rec and rec.weather_code
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc
def get_mnc(self, ip):
''' Get mnc '''
rec = self.get_all(ip)
return rec and rec.mnc
def get_mobile_brand(self, ip):
''' Get mobile_brand '''
rec = self.get_all(ip)
return rec and rec.mobile_brand
def get_elevation(self, ip):
''' Get elevation '''
rec = self.get_all(ip)
return rec and rec.elevation
def get_usage_type(self, ip):
''' Get usage_type '''
rec = self.get_all(ip)
return rec and rec.usage_type
def get_all(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def find(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def _reads(self, offset):
self._f.seek(offset - 1)
n = struct.unpack('B', self._f.read(1))[0]
#return u(self._f.read(n))
return self._f.read(n).decode('iso-8859-1').encode('utf-8')
def _readi(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<I', self._f.read(4))[0]
def _readf(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<f', self._f.read(4))[0]
def _readip(self, offset, ipv):
if ipv == 4:
return self._readi(offset)
elif ipv == 6:
a, b, c, d = self._readi(offset), self._readi(offset + 4), self._readi(offset + 8), self._readi(offset + 12)
return (d << 96) | (c << 64) | (b << 32) | a
def _readips(self, offset, ipv):
if ipv == 4:
return socket.inet_ntoa(struct.pack('!L', self._readi(offset)))
elif ipv == 6:
return str(self._readip(offset, ipv))
def _read_record(self, mid, ipv):
rec = IP2LocationRecord()
if ipv == 4:
off = 0
baseaddr = self._ipv4dbaddr
elif ipv == 6:
off = 12
baseaddr = self._ipv6dbaddr
rec.ip = self._readips(baseaddr + (mid) * self._dbcolumn * 4, ipv)
def calc_off(what, mid):
return baseaddr + mid * (self._dbcolumn * 4 + off) + off + 4 * (what[self._dbtype]-1)
if _COUNTRY_POSITION[self._dbtype] != 0:
rec.country_short = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 1)
rec.country_long = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 4)
if _REGION_POSITION[self._dbtype] != 0:
rec.region = self._reads(self._readi(calc_off(_REGION_POSITION, mid)) + 1)
if _CITY_POSITION[self._dbtype] != 0:
rec.city = self._reads(self._readi(calc_off(_CITY_POSITION, mid)) + 1)
if _ISP_POSITION[self._dbtype] != 0:
rec.isp = self._reads(self._readi(calc_off(_ISP_POSITION, mid)) + 1)
if _LATITUDE_POSITION[self._dbtype] != 0:
rec.latitude = round(self._readf(calc_off(_LATITUDE_POSITION, mid)), 6)
if _LONGITUDE_POSITION[self._dbtype] != 0:
rec.longitude = round(self._readf(calc_off(_LONGITUDE_POSITION, mid)), 6)
if _DOMAIN_POSITION[self._dbtype] != 0:
rec.domain = self._reads(self._readi(calc_off(_DOMAIN_POSITION, mid)) + 1)
if _ZIPCODE_POSITION[self._dbtype] != 0:
rec.zipcode = self._reads(self._readi(calc_off(_ZIPCODE_POSITION, mid)) + 1)
if _TIMEZONE_POSITION[self._dbtype] != 0:
rec.timezone = self._reads(self._readi(calc_off(_TIMEZONE_POSITION, mid)) + 1)
if _NETSPEED_POSITION[self._dbtype] != 0:
rec.netspeed = self._reads(self._readi(calc_off(_NETSPEED_POSITION, mid)) + 1)
if _IDDCODE_POSITION[self._dbtype] != 0:
rec.idd_code = self._reads(self._readi(calc_off(_IDDCODE_POSITION, mid)) + 1)
if _AREACODE_POSITION[self._dbtype] != 0:
rec.area_code = self._reads(self._readi(calc_off(_AREACODE_POSITION, mid)) + 1)
if _WEATHERSTATIONCODE_POSITION[self._dbtype] != 0:
rec.weather_code = self._reads(self._readi(calc_off(_WEATHERSTATIONCODE_POSITION, mid)) + 1)
if _WEATHERSTATIONNAME_POSITION[self._dbtype] != 0:
rec.weather_name = self._reads(self._readi(calc_off(_WEATHERSTATIONNAME_POSITION, mid)) + 1)
if _MCC_POSITION[self._dbtype] != 0:
rec.mcc = self._reads(self._readi(calc_off(_MCC_POSITION, mid)) + 1)
if _MNC_POSITION[self._dbtype] != 0:
rec.mnc = self._reads(self._readi(calc_off(_MNC_POSITION, mid)) + 1)
if _MOBILEBRAND_POSITION[self._dbtype] != 0:
rec.mobile_brand = self._reads(self._readi(calc_off(_MOBILEBRAND_POSITION, mid)) + 1)
if _ELEVATION_POSITION[self._dbtype] != 0:
rec.elevation = self._reads(self._readi(calc_off(_ELEVATION_POSITION, mid)) + 1)
if _USAGETYPE_POSITION[self._dbtype] != 0:
rec.usage_type = self._reads(self._readi(calc_off(_USAGETYPE_POSITION, mid)) + 1)
return rec
def __iter__(self):
low, high = 0, self._ipv4dbcount
while low <= high:
yield self._read_record(low, 4)
low += 1
low, high = 0, self._ipv6dbcount
while low <= high:
yield self._read_record(low, 6)
low += 1
def _parse_addr(self, addr):
''' Parses address and returns IP version. Raises exception on invalid argument '''
ipv = 0
try:
socket.inet_pton(socket.AF_INET6, addr)
# Convert ::FFFF:x.y.z.y to IPv4
if addr.lower().startswith('::ffff:'):
try:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
except:
ipv = 6
else:
ipv = 6
except:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
return ipv
def _get_record(self, ip):
low = 0
ipv = self._parse_addr(ip)
if ipv == 4:
ipno = struct.unpack('!L', socket.inet_pton(socket.AF_INET, ip))[0]
off = 0
baseaddr = self._ipv4dbaddr
high = self._ipv4dbcount
if self._ipv4indexbaseaddr > 0:
indexpos = ((ipno >> 16) << 3) + self._ipv4indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
elif ipv == 6:
if self._ipv6dbcount == 0:
raise ValueError('Please use IPv6 BIN file for IPv6 Address.')
a, b = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip))
ipno = (a << 64) | b
off = 12
baseaddr = self._ipv6dbaddr
high = self._ipv6dbcount
if self._ipv6indexbaseaddr > 0:
indexpos = ((ipno >> 112) << 3) + self._ipv6indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
while low <= high:
mid = int((low + high) / 2)
ipfrom = self._readip(baseaddr + (mid) * (self._dbcolumn * 4 + off), ipv)
ipto = self._readip(baseaddr + (mid + 1) * (self._dbcolumn * 4 + off), ipv)
if ipfrom <= ipno < ipto:
return self._read_record(mid, ipv)
else:
if ipno < ipfrom:
high = mid - 1
else:
low = mid + 1
|
chrislim2888/IP2Location-Python | IP2Location.py | IP2Location.get_mcc | python | def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc | Get mcc | train | https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L189-L192 | [
"def get_all(self, addr):\n ''' Get the whole record with all fields read from the file\n\n Arguments:\n\n addr: IPv4 or IPv6 address as a string\n\n Returns IP2LocationRecord or None if address not found in file\n '''\n return self._get_record(addr)\n"
] | class IP2Location(object):
''' IP2Location database '''
def __init__(self, filename=None):
''' Creates a database object and opens a file if filename is given
'''
if filename:
self.open(filename)
def __enter__(self):
if not hasattr(self, '_f') or self._f.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open(self, filename):
''' Opens a database file '''
# Ensure old file is closed before opening a new one
self.close()
self._f = open(filename, 'rb')
self._dbtype = struct.unpack('B', self._f.read(1))[0]
self._dbcolumn = struct.unpack('B', self._f.read(1))[0]
self._dbyear = struct.unpack('B', self._f.read(1))[0]
self._dbmonth = struct.unpack('B', self._f.read(1))[0]
self._dbday = struct.unpack('B', self._f.read(1))[0]
self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
def close(self):
if hasattr(self, '_f'):
# If there is file close it.
self._f.close()
del self._f
def get_country_short(self, ip):
''' Get country_short '''
rec = self.get_all(ip)
return rec and rec.country_short
def get_country_long(self, ip):
''' Get country_long '''
rec = self.get_all(ip)
return rec and rec.country_long
def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region
def get_city(self, ip):
''' Get city '''
rec = self.get_all(ip)
return rec and rec.city
def get_isp(self, ip):
''' Get isp '''
rec = self.get_all(ip)
return rec and rec.isp
def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude
def get_longitude(self, ip):
''' Get longitude '''
rec = self.get_all(ip)
return rec and rec.longitude
def get_domain(self, ip):
''' Get domain '''
rec = self.get_all(ip)
return rec and rec.domain
def get_zipcode(self, ip):
''' Get zipcode '''
rec = self.get_all(ip)
return rec and rec.zipcode
def get_timezone(self, ip):
''' Get timezone '''
rec = self.get_all(ip)
return rec and rec.timezone
def get_netspeed(self, ip):
''' Get netspeed '''
rec = self.get_all(ip)
return rec and rec.netspeed
def get_idd_code(self, ip):
''' Get idd_code '''
rec = self.get_all(ip)
return rec and rec.idd_code
def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code
def get_weather_code(self, ip):
''' Get weather_code '''
rec = self.get_all(ip)
return rec and rec.weather_code
def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name
def get_mnc(self, ip):
''' Get mnc '''
rec = self.get_all(ip)
return rec and rec.mnc
def get_mobile_brand(self, ip):
''' Get mobile_brand '''
rec = self.get_all(ip)
return rec and rec.mobile_brand
def get_elevation(self, ip):
''' Get elevation '''
rec = self.get_all(ip)
return rec and rec.elevation
def get_usage_type(self, ip):
''' Get usage_type '''
rec = self.get_all(ip)
return rec and rec.usage_type
def get_all(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def find(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def _reads(self, offset):
self._f.seek(offset - 1)
n = struct.unpack('B', self._f.read(1))[0]
#return u(self._f.read(n))
return self._f.read(n).decode('iso-8859-1').encode('utf-8')
def _readi(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<I', self._f.read(4))[0]
def _readf(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<f', self._f.read(4))[0]
def _readip(self, offset, ipv):
if ipv == 4:
return self._readi(offset)
elif ipv == 6:
a, b, c, d = self._readi(offset), self._readi(offset + 4), self._readi(offset + 8), self._readi(offset + 12)
return (d << 96) | (c << 64) | (b << 32) | a
def _readips(self, offset, ipv):
if ipv == 4:
return socket.inet_ntoa(struct.pack('!L', self._readi(offset)))
elif ipv == 6:
return str(self._readip(offset, ipv))
def _read_record(self, mid, ipv):
rec = IP2LocationRecord()
if ipv == 4:
off = 0
baseaddr = self._ipv4dbaddr
elif ipv == 6:
off = 12
baseaddr = self._ipv6dbaddr
rec.ip = self._readips(baseaddr + (mid) * self._dbcolumn * 4, ipv)
def calc_off(what, mid):
return baseaddr + mid * (self._dbcolumn * 4 + off) + off + 4 * (what[self._dbtype]-1)
if _COUNTRY_POSITION[self._dbtype] != 0:
rec.country_short = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 1)
rec.country_long = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 4)
if _REGION_POSITION[self._dbtype] != 0:
rec.region = self._reads(self._readi(calc_off(_REGION_POSITION, mid)) + 1)
if _CITY_POSITION[self._dbtype] != 0:
rec.city = self._reads(self._readi(calc_off(_CITY_POSITION, mid)) + 1)
if _ISP_POSITION[self._dbtype] != 0:
rec.isp = self._reads(self._readi(calc_off(_ISP_POSITION, mid)) + 1)
if _LATITUDE_POSITION[self._dbtype] != 0:
rec.latitude = round(self._readf(calc_off(_LATITUDE_POSITION, mid)), 6)
if _LONGITUDE_POSITION[self._dbtype] != 0:
rec.longitude = round(self._readf(calc_off(_LONGITUDE_POSITION, mid)), 6)
if _DOMAIN_POSITION[self._dbtype] != 0:
rec.domain = self._reads(self._readi(calc_off(_DOMAIN_POSITION, mid)) + 1)
if _ZIPCODE_POSITION[self._dbtype] != 0:
rec.zipcode = self._reads(self._readi(calc_off(_ZIPCODE_POSITION, mid)) + 1)
if _TIMEZONE_POSITION[self._dbtype] != 0:
rec.timezone = self._reads(self._readi(calc_off(_TIMEZONE_POSITION, mid)) + 1)
if _NETSPEED_POSITION[self._dbtype] != 0:
rec.netspeed = self._reads(self._readi(calc_off(_NETSPEED_POSITION, mid)) + 1)
if _IDDCODE_POSITION[self._dbtype] != 0:
rec.idd_code = self._reads(self._readi(calc_off(_IDDCODE_POSITION, mid)) + 1)
if _AREACODE_POSITION[self._dbtype] != 0:
rec.area_code = self._reads(self._readi(calc_off(_AREACODE_POSITION, mid)) + 1)
if _WEATHERSTATIONCODE_POSITION[self._dbtype] != 0:
rec.weather_code = self._reads(self._readi(calc_off(_WEATHERSTATIONCODE_POSITION, mid)) + 1)
if _WEATHERSTATIONNAME_POSITION[self._dbtype] != 0:
rec.weather_name = self._reads(self._readi(calc_off(_WEATHERSTATIONNAME_POSITION, mid)) + 1)
if _MCC_POSITION[self._dbtype] != 0:
rec.mcc = self._reads(self._readi(calc_off(_MCC_POSITION, mid)) + 1)
if _MNC_POSITION[self._dbtype] != 0:
rec.mnc = self._reads(self._readi(calc_off(_MNC_POSITION, mid)) + 1)
if _MOBILEBRAND_POSITION[self._dbtype] != 0:
rec.mobile_brand = self._reads(self._readi(calc_off(_MOBILEBRAND_POSITION, mid)) + 1)
if _ELEVATION_POSITION[self._dbtype] != 0:
rec.elevation = self._reads(self._readi(calc_off(_ELEVATION_POSITION, mid)) + 1)
if _USAGETYPE_POSITION[self._dbtype] != 0:
rec.usage_type = self._reads(self._readi(calc_off(_USAGETYPE_POSITION, mid)) + 1)
return rec
def __iter__(self):
low, high = 0, self._ipv4dbcount
while low <= high:
yield self._read_record(low, 4)
low += 1
low, high = 0, self._ipv6dbcount
while low <= high:
yield self._read_record(low, 6)
low += 1
def _parse_addr(self, addr):
''' Parses address and returns IP version. Raises exception on invalid argument '''
ipv = 0
try:
socket.inet_pton(socket.AF_INET6, addr)
# Convert ::FFFF:x.y.z.y to IPv4
if addr.lower().startswith('::ffff:'):
try:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
except:
ipv = 6
else:
ipv = 6
except:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
return ipv
def _get_record(self, ip):
low = 0
ipv = self._parse_addr(ip)
if ipv == 4:
ipno = struct.unpack('!L', socket.inet_pton(socket.AF_INET, ip))[0]
off = 0
baseaddr = self._ipv4dbaddr
high = self._ipv4dbcount
if self._ipv4indexbaseaddr > 0:
indexpos = ((ipno >> 16) << 3) + self._ipv4indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
elif ipv == 6:
if self._ipv6dbcount == 0:
raise ValueError('Please use IPv6 BIN file for IPv6 Address.')
a, b = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip))
ipno = (a << 64) | b
off = 12
baseaddr = self._ipv6dbaddr
high = self._ipv6dbcount
if self._ipv6indexbaseaddr > 0:
indexpos = ((ipno >> 112) << 3) + self._ipv6indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
while low <= high:
mid = int((low + high) / 2)
ipfrom = self._readip(baseaddr + (mid) * (self._dbcolumn * 4 + off), ipv)
ipto = self._readip(baseaddr + (mid + 1) * (self._dbcolumn * 4 + off), ipv)
if ipfrom <= ipno < ipto:
return self._read_record(mid, ipv)
else:
if ipno < ipfrom:
high = mid - 1
else:
low = mid + 1
|
chrislim2888/IP2Location-Python | IP2Location.py | IP2Location.get_mnc | python | def get_mnc(self, ip):
''' Get mnc '''
rec = self.get_all(ip)
return rec and rec.mnc | Get mnc | train | https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L193-L196 | [
"def get_all(self, addr):\n ''' Get the whole record with all fields read from the file\n\n Arguments:\n\n addr: IPv4 or IPv6 address as a string\n\n Returns IP2LocationRecord or None if address not found in file\n '''\n return self._get_record(addr)\n"
] | class IP2Location(object):
''' IP2Location database '''
def __init__(self, filename=None):
''' Creates a database object and opens a file if filename is given
'''
if filename:
self.open(filename)
def __enter__(self):
if not hasattr(self, '_f') or self._f.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open(self, filename):
''' Opens a database file '''
# Ensure old file is closed before opening a new one
self.close()
self._f = open(filename, 'rb')
self._dbtype = struct.unpack('B', self._f.read(1))[0]
self._dbcolumn = struct.unpack('B', self._f.read(1))[0]
self._dbyear = struct.unpack('B', self._f.read(1))[0]
self._dbmonth = struct.unpack('B', self._f.read(1))[0]
self._dbday = struct.unpack('B', self._f.read(1))[0]
self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
def close(self):
if hasattr(self, '_f'):
# If there is file close it.
self._f.close()
del self._f
def get_country_short(self, ip):
''' Get country_short '''
rec = self.get_all(ip)
return rec and rec.country_short
def get_country_long(self, ip):
''' Get country_long '''
rec = self.get_all(ip)
return rec and rec.country_long
def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region
def get_city(self, ip):
''' Get city '''
rec = self.get_all(ip)
return rec and rec.city
def get_isp(self, ip):
''' Get isp '''
rec = self.get_all(ip)
return rec and rec.isp
def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude
def get_longitude(self, ip):
''' Get longitude '''
rec = self.get_all(ip)
return rec and rec.longitude
def get_domain(self, ip):
''' Get domain '''
rec = self.get_all(ip)
return rec and rec.domain
def get_zipcode(self, ip):
''' Get zipcode '''
rec = self.get_all(ip)
return rec and rec.zipcode
def get_timezone(self, ip):
''' Get timezone '''
rec = self.get_all(ip)
return rec and rec.timezone
def get_netspeed(self, ip):
''' Get netspeed '''
rec = self.get_all(ip)
return rec and rec.netspeed
def get_idd_code(self, ip):
''' Get idd_code '''
rec = self.get_all(ip)
return rec and rec.idd_code
def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code
def get_weather_code(self, ip):
''' Get weather_code '''
rec = self.get_all(ip)
return rec and rec.weather_code
def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc
def get_mobile_brand(self, ip):
''' Get mobile_brand '''
rec = self.get_all(ip)
return rec and rec.mobile_brand
def get_elevation(self, ip):
''' Get elevation '''
rec = self.get_all(ip)
return rec and rec.elevation
def get_usage_type(self, ip):
''' Get usage_type '''
rec = self.get_all(ip)
return rec and rec.usage_type
def get_all(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def find(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def _reads(self, offset):
self._f.seek(offset - 1)
n = struct.unpack('B', self._f.read(1))[0]
#return u(self._f.read(n))
return self._f.read(n).decode('iso-8859-1').encode('utf-8')
def _readi(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<I', self._f.read(4))[0]
def _readf(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<f', self._f.read(4))[0]
def _readip(self, offset, ipv):
if ipv == 4:
return self._readi(offset)
elif ipv == 6:
a, b, c, d = self._readi(offset), self._readi(offset + 4), self._readi(offset + 8), self._readi(offset + 12)
return (d << 96) | (c << 64) | (b << 32) | a
def _readips(self, offset, ipv):
if ipv == 4:
return socket.inet_ntoa(struct.pack('!L', self._readi(offset)))
elif ipv == 6:
return str(self._readip(offset, ipv))
def _read_record(self, mid, ipv):
rec = IP2LocationRecord()
if ipv == 4:
off = 0
baseaddr = self._ipv4dbaddr
elif ipv == 6:
off = 12
baseaddr = self._ipv6dbaddr
rec.ip = self._readips(baseaddr + (mid) * self._dbcolumn * 4, ipv)
def calc_off(what, mid):
return baseaddr + mid * (self._dbcolumn * 4 + off) + off + 4 * (what[self._dbtype]-1)
if _COUNTRY_POSITION[self._dbtype] != 0:
rec.country_short = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 1)
rec.country_long = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 4)
if _REGION_POSITION[self._dbtype] != 0:
rec.region = self._reads(self._readi(calc_off(_REGION_POSITION, mid)) + 1)
if _CITY_POSITION[self._dbtype] != 0:
rec.city = self._reads(self._readi(calc_off(_CITY_POSITION, mid)) + 1)
if _ISP_POSITION[self._dbtype] != 0:
rec.isp = self._reads(self._readi(calc_off(_ISP_POSITION, mid)) + 1)
if _LATITUDE_POSITION[self._dbtype] != 0:
rec.latitude = round(self._readf(calc_off(_LATITUDE_POSITION, mid)), 6)
if _LONGITUDE_POSITION[self._dbtype] != 0:
rec.longitude = round(self._readf(calc_off(_LONGITUDE_POSITION, mid)), 6)
if _DOMAIN_POSITION[self._dbtype] != 0:
rec.domain = self._reads(self._readi(calc_off(_DOMAIN_POSITION, mid)) + 1)
if _ZIPCODE_POSITION[self._dbtype] != 0:
rec.zipcode = self._reads(self._readi(calc_off(_ZIPCODE_POSITION, mid)) + 1)
if _TIMEZONE_POSITION[self._dbtype] != 0:
rec.timezone = self._reads(self._readi(calc_off(_TIMEZONE_POSITION, mid)) + 1)
if _NETSPEED_POSITION[self._dbtype] != 0:
rec.netspeed = self._reads(self._readi(calc_off(_NETSPEED_POSITION, mid)) + 1)
if _IDDCODE_POSITION[self._dbtype] != 0:
rec.idd_code = self._reads(self._readi(calc_off(_IDDCODE_POSITION, mid)) + 1)
if _AREACODE_POSITION[self._dbtype] != 0:
rec.area_code = self._reads(self._readi(calc_off(_AREACODE_POSITION, mid)) + 1)
if _WEATHERSTATIONCODE_POSITION[self._dbtype] != 0:
rec.weather_code = self._reads(self._readi(calc_off(_WEATHERSTATIONCODE_POSITION, mid)) + 1)
if _WEATHERSTATIONNAME_POSITION[self._dbtype] != 0:
rec.weather_name = self._reads(self._readi(calc_off(_WEATHERSTATIONNAME_POSITION, mid)) + 1)
if _MCC_POSITION[self._dbtype] != 0:
rec.mcc = self._reads(self._readi(calc_off(_MCC_POSITION, mid)) + 1)
if _MNC_POSITION[self._dbtype] != 0:
rec.mnc = self._reads(self._readi(calc_off(_MNC_POSITION, mid)) + 1)
if _MOBILEBRAND_POSITION[self._dbtype] != 0:
rec.mobile_brand = self._reads(self._readi(calc_off(_MOBILEBRAND_POSITION, mid)) + 1)
if _ELEVATION_POSITION[self._dbtype] != 0:
rec.elevation = self._reads(self._readi(calc_off(_ELEVATION_POSITION, mid)) + 1)
if _USAGETYPE_POSITION[self._dbtype] != 0:
rec.usage_type = self._reads(self._readi(calc_off(_USAGETYPE_POSITION, mid)) + 1)
return rec
def __iter__(self):
low, high = 0, self._ipv4dbcount
while low <= high:
yield self._read_record(low, 4)
low += 1
low, high = 0, self._ipv6dbcount
while low <= high:
yield self._read_record(low, 6)
low += 1
def _parse_addr(self, addr):
''' Parses address and returns IP version. Raises exception on invalid argument '''
ipv = 0
try:
socket.inet_pton(socket.AF_INET6, addr)
# Convert ::FFFF:x.y.z.y to IPv4
if addr.lower().startswith('::ffff:'):
try:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
except:
ipv = 6
else:
ipv = 6
except:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
return ipv
def _get_record(self, ip):
low = 0
ipv = self._parse_addr(ip)
if ipv == 4:
ipno = struct.unpack('!L', socket.inet_pton(socket.AF_INET, ip))[0]
off = 0
baseaddr = self._ipv4dbaddr
high = self._ipv4dbcount
if self._ipv4indexbaseaddr > 0:
indexpos = ((ipno >> 16) << 3) + self._ipv4indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
elif ipv == 6:
if self._ipv6dbcount == 0:
raise ValueError('Please use IPv6 BIN file for IPv6 Address.')
a, b = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip))
ipno = (a << 64) | b
off = 12
baseaddr = self._ipv6dbaddr
high = self._ipv6dbcount
if self._ipv6indexbaseaddr > 0:
indexpos = ((ipno >> 112) << 3) + self._ipv6indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
while low <= high:
mid = int((low + high) / 2)
ipfrom = self._readip(baseaddr + (mid) * (self._dbcolumn * 4 + off), ipv)
ipto = self._readip(baseaddr + (mid + 1) * (self._dbcolumn * 4 + off), ipv)
if ipfrom <= ipno < ipto:
return self._read_record(mid, ipv)
else:
if ipno < ipfrom:
high = mid - 1
else:
low = mid + 1
|
chrislim2888/IP2Location-Python | IP2Location.py | IP2Location.get_mobile_brand | python | def get_mobile_brand(self, ip):
''' Get mobile_brand '''
rec = self.get_all(ip)
return rec and rec.mobile_brand | Get mobile_brand | train | https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L197-L200 | [
"def get_all(self, addr):\n ''' Get the whole record with all fields read from the file\n\n Arguments:\n\n addr: IPv4 or IPv6 address as a string\n\n Returns IP2LocationRecord or None if address not found in file\n '''\n return self._get_record(addr)\n"
] | class IP2Location(object):
''' IP2Location database '''
def __init__(self, filename=None):
''' Creates a database object and opens a file if filename is given
'''
if filename:
self.open(filename)
def __enter__(self):
if not hasattr(self, '_f') or self._f.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open(self, filename):
''' Opens a database file '''
# Ensure old file is closed before opening a new one
self.close()
self._f = open(filename, 'rb')
self._dbtype = struct.unpack('B', self._f.read(1))[0]
self._dbcolumn = struct.unpack('B', self._f.read(1))[0]
self._dbyear = struct.unpack('B', self._f.read(1))[0]
self._dbmonth = struct.unpack('B', self._f.read(1))[0]
self._dbday = struct.unpack('B', self._f.read(1))[0]
self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
def close(self):
if hasattr(self, '_f'):
# If there is file close it.
self._f.close()
del self._f
def get_country_short(self, ip):
''' Get country_short '''
rec = self.get_all(ip)
return rec and rec.country_short
def get_country_long(self, ip):
''' Get country_long '''
rec = self.get_all(ip)
return rec and rec.country_long
def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region
def get_city(self, ip):
''' Get city '''
rec = self.get_all(ip)
return rec and rec.city
def get_isp(self, ip):
''' Get isp '''
rec = self.get_all(ip)
return rec and rec.isp
def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude
def get_longitude(self, ip):
''' Get longitude '''
rec = self.get_all(ip)
return rec and rec.longitude
def get_domain(self, ip):
''' Get domain '''
rec = self.get_all(ip)
return rec and rec.domain
def get_zipcode(self, ip):
''' Get zipcode '''
rec = self.get_all(ip)
return rec and rec.zipcode
def get_timezone(self, ip):
''' Get timezone '''
rec = self.get_all(ip)
return rec and rec.timezone
def get_netspeed(self, ip):
''' Get netspeed '''
rec = self.get_all(ip)
return rec and rec.netspeed
def get_idd_code(self, ip):
''' Get idd_code '''
rec = self.get_all(ip)
return rec and rec.idd_code
def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code
def get_weather_code(self, ip):
''' Get weather_code '''
rec = self.get_all(ip)
return rec and rec.weather_code
def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc
def get_mnc(self, ip):
''' Get mnc '''
rec = self.get_all(ip)
return rec and rec.mnc
def get_elevation(self, ip):
''' Get elevation '''
rec = self.get_all(ip)
return rec and rec.elevation
def get_usage_type(self, ip):
''' Get usage_type '''
rec = self.get_all(ip)
return rec and rec.usage_type
def get_all(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def find(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def _reads(self, offset):
self._f.seek(offset - 1)
n = struct.unpack('B', self._f.read(1))[0]
#return u(self._f.read(n))
return self._f.read(n).decode('iso-8859-1').encode('utf-8')
def _readi(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<I', self._f.read(4))[0]
def _readf(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<f', self._f.read(4))[0]
def _readip(self, offset, ipv):
if ipv == 4:
return self._readi(offset)
elif ipv == 6:
a, b, c, d = self._readi(offset), self._readi(offset + 4), self._readi(offset + 8), self._readi(offset + 12)
return (d << 96) | (c << 64) | (b << 32) | a
def _readips(self, offset, ipv):
if ipv == 4:
return socket.inet_ntoa(struct.pack('!L', self._readi(offset)))
elif ipv == 6:
return str(self._readip(offset, ipv))
def _read_record(self, mid, ipv):
rec = IP2LocationRecord()
if ipv == 4:
off = 0
baseaddr = self._ipv4dbaddr
elif ipv == 6:
off = 12
baseaddr = self._ipv6dbaddr
rec.ip = self._readips(baseaddr + (mid) * self._dbcolumn * 4, ipv)
def calc_off(what, mid):
return baseaddr + mid * (self._dbcolumn * 4 + off) + off + 4 * (what[self._dbtype]-1)
if _COUNTRY_POSITION[self._dbtype] != 0:
rec.country_short = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 1)
rec.country_long = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 4)
if _REGION_POSITION[self._dbtype] != 0:
rec.region = self._reads(self._readi(calc_off(_REGION_POSITION, mid)) + 1)
if _CITY_POSITION[self._dbtype] != 0:
rec.city = self._reads(self._readi(calc_off(_CITY_POSITION, mid)) + 1)
if _ISP_POSITION[self._dbtype] != 0:
rec.isp = self._reads(self._readi(calc_off(_ISP_POSITION, mid)) + 1)
if _LATITUDE_POSITION[self._dbtype] != 0:
rec.latitude = round(self._readf(calc_off(_LATITUDE_POSITION, mid)), 6)
if _LONGITUDE_POSITION[self._dbtype] != 0:
rec.longitude = round(self._readf(calc_off(_LONGITUDE_POSITION, mid)), 6)
if _DOMAIN_POSITION[self._dbtype] != 0:
rec.domain = self._reads(self._readi(calc_off(_DOMAIN_POSITION, mid)) + 1)
if _ZIPCODE_POSITION[self._dbtype] != 0:
rec.zipcode = self._reads(self._readi(calc_off(_ZIPCODE_POSITION, mid)) + 1)
if _TIMEZONE_POSITION[self._dbtype] != 0:
rec.timezone = self._reads(self._readi(calc_off(_TIMEZONE_POSITION, mid)) + 1)
if _NETSPEED_POSITION[self._dbtype] != 0:
rec.netspeed = self._reads(self._readi(calc_off(_NETSPEED_POSITION, mid)) + 1)
if _IDDCODE_POSITION[self._dbtype] != 0:
rec.idd_code = self._reads(self._readi(calc_off(_IDDCODE_POSITION, mid)) + 1)
if _AREACODE_POSITION[self._dbtype] != 0:
rec.area_code = self._reads(self._readi(calc_off(_AREACODE_POSITION, mid)) + 1)
if _WEATHERSTATIONCODE_POSITION[self._dbtype] != 0:
rec.weather_code = self._reads(self._readi(calc_off(_WEATHERSTATIONCODE_POSITION, mid)) + 1)
if _WEATHERSTATIONNAME_POSITION[self._dbtype] != 0:
rec.weather_name = self._reads(self._readi(calc_off(_WEATHERSTATIONNAME_POSITION, mid)) + 1)
if _MCC_POSITION[self._dbtype] != 0:
rec.mcc = self._reads(self._readi(calc_off(_MCC_POSITION, mid)) + 1)
if _MNC_POSITION[self._dbtype] != 0:
rec.mnc = self._reads(self._readi(calc_off(_MNC_POSITION, mid)) + 1)
if _MOBILEBRAND_POSITION[self._dbtype] != 0:
rec.mobile_brand = self._reads(self._readi(calc_off(_MOBILEBRAND_POSITION, mid)) + 1)
if _ELEVATION_POSITION[self._dbtype] != 0:
rec.elevation = self._reads(self._readi(calc_off(_ELEVATION_POSITION, mid)) + 1)
if _USAGETYPE_POSITION[self._dbtype] != 0:
rec.usage_type = self._reads(self._readi(calc_off(_USAGETYPE_POSITION, mid)) + 1)
return rec
def __iter__(self):
low, high = 0, self._ipv4dbcount
while low <= high:
yield self._read_record(low, 4)
low += 1
low, high = 0, self._ipv6dbcount
while low <= high:
yield self._read_record(low, 6)
low += 1
def _parse_addr(self, addr):
''' Parses address and returns IP version. Raises exception on invalid argument '''
ipv = 0
try:
socket.inet_pton(socket.AF_INET6, addr)
# Convert ::FFFF:x.y.z.y to IPv4
if addr.lower().startswith('::ffff:'):
try:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
except:
ipv = 6
else:
ipv = 6
except:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
return ipv
def _get_record(self, ip):
low = 0
ipv = self._parse_addr(ip)
if ipv == 4:
ipno = struct.unpack('!L', socket.inet_pton(socket.AF_INET, ip))[0]
off = 0
baseaddr = self._ipv4dbaddr
high = self._ipv4dbcount
if self._ipv4indexbaseaddr > 0:
indexpos = ((ipno >> 16) << 3) + self._ipv4indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
elif ipv == 6:
if self._ipv6dbcount == 0:
raise ValueError('Please use IPv6 BIN file for IPv6 Address.')
a, b = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip))
ipno = (a << 64) | b
off = 12
baseaddr = self._ipv6dbaddr
high = self._ipv6dbcount
if self._ipv6indexbaseaddr > 0:
indexpos = ((ipno >> 112) << 3) + self._ipv6indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
while low <= high:
mid = int((low + high) / 2)
ipfrom = self._readip(baseaddr + (mid) * (self._dbcolumn * 4 + off), ipv)
ipto = self._readip(baseaddr + (mid + 1) * (self._dbcolumn * 4 + off), ipv)
if ipfrom <= ipno < ipto:
return self._read_record(mid, ipv)
else:
if ipno < ipfrom:
high = mid - 1
else:
low = mid + 1
|
chrislim2888/IP2Location-Python | IP2Location.py | IP2Location.get_elevation | python | def get_elevation(self, ip):
''' Get elevation '''
rec = self.get_all(ip)
return rec and rec.elevation | Get elevation | train | https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L201-L204 | [
"def get_all(self, addr):\n ''' Get the whole record with all fields read from the file\n\n Arguments:\n\n addr: IPv4 or IPv6 address as a string\n\n Returns IP2LocationRecord or None if address not found in file\n '''\n return self._get_record(addr)\n"
] | class IP2Location(object):
''' IP2Location database '''
def __init__(self, filename=None):
''' Creates a database object and opens a file if filename is given
'''
if filename:
self.open(filename)
def __enter__(self):
if not hasattr(self, '_f') or self._f.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open(self, filename):
''' Opens a database file '''
# Ensure old file is closed before opening a new one
self.close()
self._f = open(filename, 'rb')
self._dbtype = struct.unpack('B', self._f.read(1))[0]
self._dbcolumn = struct.unpack('B', self._f.read(1))[0]
self._dbyear = struct.unpack('B', self._f.read(1))[0]
self._dbmonth = struct.unpack('B', self._f.read(1))[0]
self._dbday = struct.unpack('B', self._f.read(1))[0]
self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
def close(self):
if hasattr(self, '_f'):
# If there is file close it.
self._f.close()
del self._f
def get_country_short(self, ip):
''' Get country_short '''
rec = self.get_all(ip)
return rec and rec.country_short
def get_country_long(self, ip):
''' Get country_long '''
rec = self.get_all(ip)
return rec and rec.country_long
def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region
def get_city(self, ip):
''' Get city '''
rec = self.get_all(ip)
return rec and rec.city
def get_isp(self, ip):
''' Get isp '''
rec = self.get_all(ip)
return rec and rec.isp
def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude
def get_longitude(self, ip):
''' Get longitude '''
rec = self.get_all(ip)
return rec and rec.longitude
def get_domain(self, ip):
''' Get domain '''
rec = self.get_all(ip)
return rec and rec.domain
def get_zipcode(self, ip):
''' Get zipcode '''
rec = self.get_all(ip)
return rec and rec.zipcode
def get_timezone(self, ip):
''' Get timezone '''
rec = self.get_all(ip)
return rec and rec.timezone
def get_netspeed(self, ip):
''' Get netspeed '''
rec = self.get_all(ip)
return rec and rec.netspeed
def get_idd_code(self, ip):
''' Get idd_code '''
rec = self.get_all(ip)
return rec and rec.idd_code
def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code
def get_weather_code(self, ip):
''' Get weather_code '''
rec = self.get_all(ip)
return rec and rec.weather_code
def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc
def get_mnc(self, ip):
''' Get mnc '''
rec = self.get_all(ip)
return rec and rec.mnc
def get_mobile_brand(self, ip):
''' Get mobile_brand '''
rec = self.get_all(ip)
return rec and rec.mobile_brand
def get_usage_type(self, ip):
''' Get usage_type '''
rec = self.get_all(ip)
return rec and rec.usage_type
def get_all(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def find(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def _reads(self, offset):
self._f.seek(offset - 1)
n = struct.unpack('B', self._f.read(1))[0]
#return u(self._f.read(n))
return self._f.read(n).decode('iso-8859-1').encode('utf-8')
def _readi(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<I', self._f.read(4))[0]
def _readf(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<f', self._f.read(4))[0]
def _readip(self, offset, ipv):
if ipv == 4:
return self._readi(offset)
elif ipv == 6:
a, b, c, d = self._readi(offset), self._readi(offset + 4), self._readi(offset + 8), self._readi(offset + 12)
return (d << 96) | (c << 64) | (b << 32) | a
def _readips(self, offset, ipv):
if ipv == 4:
return socket.inet_ntoa(struct.pack('!L', self._readi(offset)))
elif ipv == 6:
return str(self._readip(offset, ipv))
def _read_record(self, mid, ipv):
rec = IP2LocationRecord()
if ipv == 4:
off = 0
baseaddr = self._ipv4dbaddr
elif ipv == 6:
off = 12
baseaddr = self._ipv6dbaddr
rec.ip = self._readips(baseaddr + (mid) * self._dbcolumn * 4, ipv)
def calc_off(what, mid):
return baseaddr + mid * (self._dbcolumn * 4 + off) + off + 4 * (what[self._dbtype]-1)
if _COUNTRY_POSITION[self._dbtype] != 0:
rec.country_short = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 1)
rec.country_long = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 4)
if _REGION_POSITION[self._dbtype] != 0:
rec.region = self._reads(self._readi(calc_off(_REGION_POSITION, mid)) + 1)
if _CITY_POSITION[self._dbtype] != 0:
rec.city = self._reads(self._readi(calc_off(_CITY_POSITION, mid)) + 1)
if _ISP_POSITION[self._dbtype] != 0:
rec.isp = self._reads(self._readi(calc_off(_ISP_POSITION, mid)) + 1)
if _LATITUDE_POSITION[self._dbtype] != 0:
rec.latitude = round(self._readf(calc_off(_LATITUDE_POSITION, mid)), 6)
if _LONGITUDE_POSITION[self._dbtype] != 0:
rec.longitude = round(self._readf(calc_off(_LONGITUDE_POSITION, mid)), 6)
if _DOMAIN_POSITION[self._dbtype] != 0:
rec.domain = self._reads(self._readi(calc_off(_DOMAIN_POSITION, mid)) + 1)
if _ZIPCODE_POSITION[self._dbtype] != 0:
rec.zipcode = self._reads(self._readi(calc_off(_ZIPCODE_POSITION, mid)) + 1)
if _TIMEZONE_POSITION[self._dbtype] != 0:
rec.timezone = self._reads(self._readi(calc_off(_TIMEZONE_POSITION, mid)) + 1)
if _NETSPEED_POSITION[self._dbtype] != 0:
rec.netspeed = self._reads(self._readi(calc_off(_NETSPEED_POSITION, mid)) + 1)
if _IDDCODE_POSITION[self._dbtype] != 0:
rec.idd_code = self._reads(self._readi(calc_off(_IDDCODE_POSITION, mid)) + 1)
if _AREACODE_POSITION[self._dbtype] != 0:
rec.area_code = self._reads(self._readi(calc_off(_AREACODE_POSITION, mid)) + 1)
if _WEATHERSTATIONCODE_POSITION[self._dbtype] != 0:
rec.weather_code = self._reads(self._readi(calc_off(_WEATHERSTATIONCODE_POSITION, mid)) + 1)
if _WEATHERSTATIONNAME_POSITION[self._dbtype] != 0:
rec.weather_name = self._reads(self._readi(calc_off(_WEATHERSTATIONNAME_POSITION, mid)) + 1)
if _MCC_POSITION[self._dbtype] != 0:
rec.mcc = self._reads(self._readi(calc_off(_MCC_POSITION, mid)) + 1)
if _MNC_POSITION[self._dbtype] != 0:
rec.mnc = self._reads(self._readi(calc_off(_MNC_POSITION, mid)) + 1)
if _MOBILEBRAND_POSITION[self._dbtype] != 0:
rec.mobile_brand = self._reads(self._readi(calc_off(_MOBILEBRAND_POSITION, mid)) + 1)
if _ELEVATION_POSITION[self._dbtype] != 0:
rec.elevation = self._reads(self._readi(calc_off(_ELEVATION_POSITION, mid)) + 1)
if _USAGETYPE_POSITION[self._dbtype] != 0:
rec.usage_type = self._reads(self._readi(calc_off(_USAGETYPE_POSITION, mid)) + 1)
return rec
def __iter__(self):
low, high = 0, self._ipv4dbcount
while low <= high:
yield self._read_record(low, 4)
low += 1
low, high = 0, self._ipv6dbcount
while low <= high:
yield self._read_record(low, 6)
low += 1
def _parse_addr(self, addr):
''' Parses address and returns IP version. Raises exception on invalid argument '''
ipv = 0
try:
socket.inet_pton(socket.AF_INET6, addr)
# Convert ::FFFF:x.y.z.y to IPv4
if addr.lower().startswith('::ffff:'):
try:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
except:
ipv = 6
else:
ipv = 6
except:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
return ipv
def _get_record(self, ip):
low = 0
ipv = self._parse_addr(ip)
if ipv == 4:
ipno = struct.unpack('!L', socket.inet_pton(socket.AF_INET, ip))[0]
off = 0
baseaddr = self._ipv4dbaddr
high = self._ipv4dbcount
if self._ipv4indexbaseaddr > 0:
indexpos = ((ipno >> 16) << 3) + self._ipv4indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
elif ipv == 6:
if self._ipv6dbcount == 0:
raise ValueError('Please use IPv6 BIN file for IPv6 Address.')
a, b = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip))
ipno = (a << 64) | b
off = 12
baseaddr = self._ipv6dbaddr
high = self._ipv6dbcount
if self._ipv6indexbaseaddr > 0:
indexpos = ((ipno >> 112) << 3) + self._ipv6indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
while low <= high:
mid = int((low + high) / 2)
ipfrom = self._readip(baseaddr + (mid) * (self._dbcolumn * 4 + off), ipv)
ipto = self._readip(baseaddr + (mid + 1) * (self._dbcolumn * 4 + off), ipv)
if ipfrom <= ipno < ipto:
return self._read_record(mid, ipv)
else:
if ipno < ipfrom:
high = mid - 1
else:
low = mid + 1
|
chrislim2888/IP2Location-Python | IP2Location.py | IP2Location.get_usage_type | python | def get_usage_type(self, ip):
''' Get usage_type '''
rec = self.get_all(ip)
return rec and rec.usage_type | Get usage_type | train | https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L205-L208 | [
"def get_all(self, addr):\n ''' Get the whole record with all fields read from the file\n\n Arguments:\n\n addr: IPv4 or IPv6 address as a string\n\n Returns IP2LocationRecord or None if address not found in file\n '''\n return self._get_record(addr)\n"
] | class IP2Location(object):
''' IP2Location database '''
def __init__(self, filename=None):
''' Creates a database object and opens a file if filename is given
'''
if filename:
self.open(filename)
def __enter__(self):
if not hasattr(self, '_f') or self._f.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open(self, filename):
''' Opens a database file '''
# Ensure old file is closed before opening a new one
self.close()
self._f = open(filename, 'rb')
self._dbtype = struct.unpack('B', self._f.read(1))[0]
self._dbcolumn = struct.unpack('B', self._f.read(1))[0]
self._dbyear = struct.unpack('B', self._f.read(1))[0]
self._dbmonth = struct.unpack('B', self._f.read(1))[0]
self._dbday = struct.unpack('B', self._f.read(1))[0]
self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
def close(self):
if hasattr(self, '_f'):
# If there is file close it.
self._f.close()
del self._f
def get_country_short(self, ip):
''' Get country_short '''
rec = self.get_all(ip)
return rec and rec.country_short
def get_country_long(self, ip):
''' Get country_long '''
rec = self.get_all(ip)
return rec and rec.country_long
def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region
def get_city(self, ip):
''' Get city '''
rec = self.get_all(ip)
return rec and rec.city
def get_isp(self, ip):
''' Get isp '''
rec = self.get_all(ip)
return rec and rec.isp
def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude
def get_longitude(self, ip):
''' Get longitude '''
rec = self.get_all(ip)
return rec and rec.longitude
def get_domain(self, ip):
''' Get domain '''
rec = self.get_all(ip)
return rec and rec.domain
def get_zipcode(self, ip):
''' Get zipcode '''
rec = self.get_all(ip)
return rec and rec.zipcode
def get_timezone(self, ip):
''' Get timezone '''
rec = self.get_all(ip)
return rec and rec.timezone
def get_netspeed(self, ip):
''' Get netspeed '''
rec = self.get_all(ip)
return rec and rec.netspeed
def get_idd_code(self, ip):
''' Get idd_code '''
rec = self.get_all(ip)
return rec and rec.idd_code
def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code
def get_weather_code(self, ip):
''' Get weather_code '''
rec = self.get_all(ip)
return rec and rec.weather_code
def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc
def get_mnc(self, ip):
''' Get mnc '''
rec = self.get_all(ip)
return rec and rec.mnc
def get_mobile_brand(self, ip):
''' Get mobile_brand '''
rec = self.get_all(ip)
return rec and rec.mobile_brand
def get_elevation(self, ip):
''' Get elevation '''
rec = self.get_all(ip)
return rec and rec.elevation
def get_all(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def find(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def _reads(self, offset):
self._f.seek(offset - 1)
n = struct.unpack('B', self._f.read(1))[0]
#return u(self._f.read(n))
return self._f.read(n).decode('iso-8859-1').encode('utf-8')
def _readi(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<I', self._f.read(4))[0]
def _readf(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<f', self._f.read(4))[0]
def _readip(self, offset, ipv):
if ipv == 4:
return self._readi(offset)
elif ipv == 6:
a, b, c, d = self._readi(offset), self._readi(offset + 4), self._readi(offset + 8), self._readi(offset + 12)
return (d << 96) | (c << 64) | (b << 32) | a
def _readips(self, offset, ipv):
if ipv == 4:
return socket.inet_ntoa(struct.pack('!L', self._readi(offset)))
elif ipv == 6:
return str(self._readip(offset, ipv))
def _read_record(self, mid, ipv):
rec = IP2LocationRecord()
if ipv == 4:
off = 0
baseaddr = self._ipv4dbaddr
elif ipv == 6:
off = 12
baseaddr = self._ipv6dbaddr
rec.ip = self._readips(baseaddr + (mid) * self._dbcolumn * 4, ipv)
def calc_off(what, mid):
return baseaddr + mid * (self._dbcolumn * 4 + off) + off + 4 * (what[self._dbtype]-1)
if _COUNTRY_POSITION[self._dbtype] != 0:
rec.country_short = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 1)
rec.country_long = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 4)
if _REGION_POSITION[self._dbtype] != 0:
rec.region = self._reads(self._readi(calc_off(_REGION_POSITION, mid)) + 1)
if _CITY_POSITION[self._dbtype] != 0:
rec.city = self._reads(self._readi(calc_off(_CITY_POSITION, mid)) + 1)
if _ISP_POSITION[self._dbtype] != 0:
rec.isp = self._reads(self._readi(calc_off(_ISP_POSITION, mid)) + 1)
if _LATITUDE_POSITION[self._dbtype] != 0:
rec.latitude = round(self._readf(calc_off(_LATITUDE_POSITION, mid)), 6)
if _LONGITUDE_POSITION[self._dbtype] != 0:
rec.longitude = round(self._readf(calc_off(_LONGITUDE_POSITION, mid)), 6)
if _DOMAIN_POSITION[self._dbtype] != 0:
rec.domain = self._reads(self._readi(calc_off(_DOMAIN_POSITION, mid)) + 1)
if _ZIPCODE_POSITION[self._dbtype] != 0:
rec.zipcode = self._reads(self._readi(calc_off(_ZIPCODE_POSITION, mid)) + 1)
if _TIMEZONE_POSITION[self._dbtype] != 0:
rec.timezone = self._reads(self._readi(calc_off(_TIMEZONE_POSITION, mid)) + 1)
if _NETSPEED_POSITION[self._dbtype] != 0:
rec.netspeed = self._reads(self._readi(calc_off(_NETSPEED_POSITION, mid)) + 1)
if _IDDCODE_POSITION[self._dbtype] != 0:
rec.idd_code = self._reads(self._readi(calc_off(_IDDCODE_POSITION, mid)) + 1)
if _AREACODE_POSITION[self._dbtype] != 0:
rec.area_code = self._reads(self._readi(calc_off(_AREACODE_POSITION, mid)) + 1)
if _WEATHERSTATIONCODE_POSITION[self._dbtype] != 0:
rec.weather_code = self._reads(self._readi(calc_off(_WEATHERSTATIONCODE_POSITION, mid)) + 1)
if _WEATHERSTATIONNAME_POSITION[self._dbtype] != 0:
rec.weather_name = self._reads(self._readi(calc_off(_WEATHERSTATIONNAME_POSITION, mid)) + 1)
if _MCC_POSITION[self._dbtype] != 0:
rec.mcc = self._reads(self._readi(calc_off(_MCC_POSITION, mid)) + 1)
if _MNC_POSITION[self._dbtype] != 0:
rec.mnc = self._reads(self._readi(calc_off(_MNC_POSITION, mid)) + 1)
if _MOBILEBRAND_POSITION[self._dbtype] != 0:
rec.mobile_brand = self._reads(self._readi(calc_off(_MOBILEBRAND_POSITION, mid)) + 1)
if _ELEVATION_POSITION[self._dbtype] != 0:
rec.elevation = self._reads(self._readi(calc_off(_ELEVATION_POSITION, mid)) + 1)
if _USAGETYPE_POSITION[self._dbtype] != 0:
rec.usage_type = self._reads(self._readi(calc_off(_USAGETYPE_POSITION, mid)) + 1)
return rec
def __iter__(self):
low, high = 0, self._ipv4dbcount
while low <= high:
yield self._read_record(low, 4)
low += 1
low, high = 0, self._ipv6dbcount
while low <= high:
yield self._read_record(low, 6)
low += 1
def _parse_addr(self, addr):
''' Parses address and returns IP version. Raises exception on invalid argument '''
ipv = 0
try:
socket.inet_pton(socket.AF_INET6, addr)
# Convert ::FFFF:x.y.z.y to IPv4
if addr.lower().startswith('::ffff:'):
try:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
except:
ipv = 6
else:
ipv = 6
except:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
return ipv
def _get_record(self, ip):
low = 0
ipv = self._parse_addr(ip)
if ipv == 4:
ipno = struct.unpack('!L', socket.inet_pton(socket.AF_INET, ip))[0]
off = 0
baseaddr = self._ipv4dbaddr
high = self._ipv4dbcount
if self._ipv4indexbaseaddr > 0:
indexpos = ((ipno >> 16) << 3) + self._ipv4indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
elif ipv == 6:
if self._ipv6dbcount == 0:
raise ValueError('Please use IPv6 BIN file for IPv6 Address.')
a, b = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip))
ipno = (a << 64) | b
off = 12
baseaddr = self._ipv6dbaddr
high = self._ipv6dbcount
if self._ipv6indexbaseaddr > 0:
indexpos = ((ipno >> 112) << 3) + self._ipv6indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
while low <= high:
mid = int((low + high) / 2)
ipfrom = self._readip(baseaddr + (mid) * (self._dbcolumn * 4 + off), ipv)
ipto = self._readip(baseaddr + (mid + 1) * (self._dbcolumn * 4 + off), ipv)
if ipfrom <= ipno < ipto:
return self._read_record(mid, ipv)
else:
if ipno < ipfrom:
high = mid - 1
else:
low = mid + 1
|
chrislim2888/IP2Location-Python | IP2Location.py | IP2Location._parse_addr | python | def _parse_addr(self, addr):
''' Parses address and returns IP version. Raises exception on invalid argument '''
ipv = 0
try:
socket.inet_pton(socket.AF_INET6, addr)
# Convert ::FFFF:x.y.z.y to IPv4
if addr.lower().startswith('::ffff:'):
try:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
except:
ipv = 6
else:
ipv = 6
except:
socket.inet_pton(socket.AF_INET, addr)
ipv = 4
return ipv | Parses address and returns IP version. Raises exception on invalid argument | train | https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L342-L359 | null | class IP2Location(object):
''' IP2Location database '''
def __init__(self, filename=None):
''' Creates a database object and opens a file if filename is given
'''
if filename:
self.open(filename)
def __enter__(self):
if not hasattr(self, '_f') or self._f.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def open(self, filename):
''' Opens a database file '''
# Ensure old file is closed before opening a new one
self.close()
self._f = open(filename, 'rb')
self._dbtype = struct.unpack('B', self._f.read(1))[0]
self._dbcolumn = struct.unpack('B', self._f.read(1))[0]
self._dbyear = struct.unpack('B', self._f.read(1))[0]
self._dbmonth = struct.unpack('B', self._f.read(1))[0]
self._dbday = struct.unpack('B', self._f.read(1))[0]
self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0]
self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0]
def close(self):
if hasattr(self, '_f'):
# If there is file close it.
self._f.close()
del self._f
def get_country_short(self, ip):
''' Get country_short '''
rec = self.get_all(ip)
return rec and rec.country_short
def get_country_long(self, ip):
''' Get country_long '''
rec = self.get_all(ip)
return rec and rec.country_long
def get_region(self, ip):
''' Get region '''
rec = self.get_all(ip)
return rec and rec.region
def get_city(self, ip):
''' Get city '''
rec = self.get_all(ip)
return rec and rec.city
def get_isp(self, ip):
''' Get isp '''
rec = self.get_all(ip)
return rec and rec.isp
def get_latitude(self, ip):
''' Get latitude '''
rec = self.get_all(ip)
return rec and rec.latitude
def get_longitude(self, ip):
''' Get longitude '''
rec = self.get_all(ip)
return rec and rec.longitude
def get_domain(self, ip):
''' Get domain '''
rec = self.get_all(ip)
return rec and rec.domain
def get_zipcode(self, ip):
''' Get zipcode '''
rec = self.get_all(ip)
return rec and rec.zipcode
def get_timezone(self, ip):
''' Get timezone '''
rec = self.get_all(ip)
return rec and rec.timezone
def get_netspeed(self, ip):
''' Get netspeed '''
rec = self.get_all(ip)
return rec and rec.netspeed
def get_idd_code(self, ip):
''' Get idd_code '''
rec = self.get_all(ip)
return rec and rec.idd_code
def get_area_code(self, ip):
''' Get area_code '''
rec = self.get_all(ip)
return rec and rec.area_code
def get_weather_code(self, ip):
''' Get weather_code '''
rec = self.get_all(ip)
return rec and rec.weather_code
def get_weather_name(self, ip):
''' Get weather_name '''
rec = self.get_all(ip)
return rec and rec.weather_name
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc
def get_mnc(self, ip):
''' Get mnc '''
rec = self.get_all(ip)
return rec and rec.mnc
def get_mobile_brand(self, ip):
''' Get mobile_brand '''
rec = self.get_all(ip)
return rec and rec.mobile_brand
def get_elevation(self, ip):
''' Get elevation '''
rec = self.get_all(ip)
return rec and rec.elevation
def get_usage_type(self, ip):
''' Get usage_type '''
rec = self.get_all(ip)
return rec and rec.usage_type
def get_all(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def find(self, addr):
''' Get the whole record with all fields read from the file
Arguments:
addr: IPv4 or IPv6 address as a string
Returns IP2LocationRecord or None if address not found in file
'''
return self._get_record(addr)
def _reads(self, offset):
self._f.seek(offset - 1)
n = struct.unpack('B', self._f.read(1))[0]
#return u(self._f.read(n))
return self._f.read(n).decode('iso-8859-1').encode('utf-8')
def _readi(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<I', self._f.read(4))[0]
def _readf(self, offset):
self._f.seek(offset - 1)
return struct.unpack('<f', self._f.read(4))[0]
def _readip(self, offset, ipv):
if ipv == 4:
return self._readi(offset)
elif ipv == 6:
a, b, c, d = self._readi(offset), self._readi(offset + 4), self._readi(offset + 8), self._readi(offset + 12)
return (d << 96) | (c << 64) | (b << 32) | a
def _readips(self, offset, ipv):
if ipv == 4:
return socket.inet_ntoa(struct.pack('!L', self._readi(offset)))
elif ipv == 6:
return str(self._readip(offset, ipv))
def _read_record(self, mid, ipv):
rec = IP2LocationRecord()
if ipv == 4:
off = 0
baseaddr = self._ipv4dbaddr
elif ipv == 6:
off = 12
baseaddr = self._ipv6dbaddr
rec.ip = self._readips(baseaddr + (mid) * self._dbcolumn * 4, ipv)
def calc_off(what, mid):
return baseaddr + mid * (self._dbcolumn * 4 + off) + off + 4 * (what[self._dbtype]-1)
if _COUNTRY_POSITION[self._dbtype] != 0:
rec.country_short = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 1)
rec.country_long = self._reads(self._readi(calc_off(_COUNTRY_POSITION, mid)) + 4)
if _REGION_POSITION[self._dbtype] != 0:
rec.region = self._reads(self._readi(calc_off(_REGION_POSITION, mid)) + 1)
if _CITY_POSITION[self._dbtype] != 0:
rec.city = self._reads(self._readi(calc_off(_CITY_POSITION, mid)) + 1)
if _ISP_POSITION[self._dbtype] != 0:
rec.isp = self._reads(self._readi(calc_off(_ISP_POSITION, mid)) + 1)
if _LATITUDE_POSITION[self._dbtype] != 0:
rec.latitude = round(self._readf(calc_off(_LATITUDE_POSITION, mid)), 6)
if _LONGITUDE_POSITION[self._dbtype] != 0:
rec.longitude = round(self._readf(calc_off(_LONGITUDE_POSITION, mid)), 6)
if _DOMAIN_POSITION[self._dbtype] != 0:
rec.domain = self._reads(self._readi(calc_off(_DOMAIN_POSITION, mid)) + 1)
if _ZIPCODE_POSITION[self._dbtype] != 0:
rec.zipcode = self._reads(self._readi(calc_off(_ZIPCODE_POSITION, mid)) + 1)
if _TIMEZONE_POSITION[self._dbtype] != 0:
rec.timezone = self._reads(self._readi(calc_off(_TIMEZONE_POSITION, mid)) + 1)
if _NETSPEED_POSITION[self._dbtype] != 0:
rec.netspeed = self._reads(self._readi(calc_off(_NETSPEED_POSITION, mid)) + 1)
if _IDDCODE_POSITION[self._dbtype] != 0:
rec.idd_code = self._reads(self._readi(calc_off(_IDDCODE_POSITION, mid)) + 1)
if _AREACODE_POSITION[self._dbtype] != 0:
rec.area_code = self._reads(self._readi(calc_off(_AREACODE_POSITION, mid)) + 1)
if _WEATHERSTATIONCODE_POSITION[self._dbtype] != 0:
rec.weather_code = self._reads(self._readi(calc_off(_WEATHERSTATIONCODE_POSITION, mid)) + 1)
if _WEATHERSTATIONNAME_POSITION[self._dbtype] != 0:
rec.weather_name = self._reads(self._readi(calc_off(_WEATHERSTATIONNAME_POSITION, mid)) + 1)
if _MCC_POSITION[self._dbtype] != 0:
rec.mcc = self._reads(self._readi(calc_off(_MCC_POSITION, mid)) + 1)
if _MNC_POSITION[self._dbtype] != 0:
rec.mnc = self._reads(self._readi(calc_off(_MNC_POSITION, mid)) + 1)
if _MOBILEBRAND_POSITION[self._dbtype] != 0:
rec.mobile_brand = self._reads(self._readi(calc_off(_MOBILEBRAND_POSITION, mid)) + 1)
if _ELEVATION_POSITION[self._dbtype] != 0:
rec.elevation = self._reads(self._readi(calc_off(_ELEVATION_POSITION, mid)) + 1)
if _USAGETYPE_POSITION[self._dbtype] != 0:
rec.usage_type = self._reads(self._readi(calc_off(_USAGETYPE_POSITION, mid)) + 1)
return rec
def __iter__(self):
low, high = 0, self._ipv4dbcount
while low <= high:
yield self._read_record(low, 4)
low += 1
low, high = 0, self._ipv6dbcount
while low <= high:
yield self._read_record(low, 6)
low += 1
def _get_record(self, ip):
low = 0
ipv = self._parse_addr(ip)
if ipv == 4:
ipno = struct.unpack('!L', socket.inet_pton(socket.AF_INET, ip))[0]
off = 0
baseaddr = self._ipv4dbaddr
high = self._ipv4dbcount
if self._ipv4indexbaseaddr > 0:
indexpos = ((ipno >> 16) << 3) + self._ipv4indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
elif ipv == 6:
if self._ipv6dbcount == 0:
raise ValueError('Please use IPv6 BIN file for IPv6 Address.')
a, b = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip))
ipno = (a << 64) | b
off = 12
baseaddr = self._ipv6dbaddr
high = self._ipv6dbcount
if self._ipv6indexbaseaddr > 0:
indexpos = ((ipno >> 112) << 3) + self._ipv6indexbaseaddr
low = self._readi(indexpos)
high = self._readi(indexpos + 4)
while low <= high:
mid = int((low + high) / 2)
ipfrom = self._readip(baseaddr + (mid) * (self._dbcolumn * 4 + off), ipv)
ipto = self._readip(baseaddr + (mid + 1) * (self._dbcolumn * 4 + off), ipv)
if ipfrom <= ipno < ipto:
return self._read_record(mid, ipv)
else:
if ipno < ipfrom:
high = mid - 1
else:
low = mid + 1
|
ffalcinelli/pydivert | pydivert/windivert.py | WinDivert.check_filter | python | def check_filter(filter, layer=Layer.NETWORK):
res, pos, msg = False, c_uint(), c_char_p()
try:
res = windivert_dll.WinDivertHelperCheckFilter(filter.encode(), layer, byref(msg), byref(pos))
except OSError:
pass
return res, pos.value, msg.value.decode() | Checks if the given packet filter string is valid with respect to the filter language.
The remapped function is WinDivertHelperCheckFilter::
BOOL WinDivertHelperCheckFilter(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__out_opt const char **errorStr,
__out_opt UINT *errorPos
);
See: https://reqrypt.org/windivert-doc.html#divert_helper_check_filter
:return: A tuple (res, pos, msg) with check result in 'res' human readable description of the error in 'msg' and the error's position in 'pos'. | train | https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/windivert.py#L102-L124 | null | class WinDivert(object):
"""
A WinDivert handle that can be used to capture packets.
The main methods are `.open()`, `.recv()`, `.send()` and `.close()`.
Use it like so::
with pydivert.WinDivert() as w:
for packet in w:
print(packet)
w.send(packet)
"""
def __init__(self, filter="true", layer=Layer.NETWORK, priority=0, flags=Flag.DEFAULT):
self._handle = None
self._filter = filter.encode()
self._layer = layer
self._priority = priority
self._flags = flags
def __repr__(self):
return '<WinDivert state="{}" filter="{}" layer="{}" priority="{}" flags="{}" />'.format(
"open" if self._handle is not None else "closed",
self._filter.decode(),
self._layer,
self._priority,
self._flags
)
def __enter__(self):
self.open()
return self
def __exit__(self, *args):
self.close()
def __iter__(self):
return self
def __next__(self):
return self.recv()
if sys.version_info < (3, 0):
next = __next__
@staticmethod
def register():
"""
An utility method to register the service the first time.
It is usually not required to call this function, as WinDivert will register itself when opening a handle.
"""
with WinDivert("false"):
pass
@staticmethod
def is_registered():
"""
Check if the WinDivert service is currently installed on the system.
"""
return subprocess.call("sc query WinDivert1.3", stdout=subprocess.PIPE,
stderr=subprocess.PIPE) == 0
@staticmethod
def unregister():
"""
Unregisters the WinDivert service.
This function only requests a service stop, which may not be processed immediately if there are still open
handles.
"""
subprocess.check_call("sc stop WinDivert1.3", stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
@staticmethod
def open(self):
"""
Opens a WinDivert handle for the given filter.
Unless otherwise specified by flags, any packet that matches the filter will be diverted to the handle.
Diverted packets can be read by the application with receive().
The remapped function is WinDivertOpen::
HANDLE WinDivertOpen(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__in INT16 priority,
__in UINT64 flags
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_open
"""
if self.is_open:
raise RuntimeError("WinDivert handle is already open.")
self._handle = windivert_dll.WinDivertOpen(self._filter, self._layer, self._priority,
self._flags)
@property
def is_open(self):
"""
Indicates if there is currently an open handle.
"""
return bool(self._handle)
def close(self):
"""
Closes the handle opened by open().
The remapped function is WinDivertClose::
BOOL WinDivertClose(
__in HANDLE handle
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_close
"""
if not self.is_open:
raise RuntimeError("WinDivert handle is not open.")
windivert_dll.WinDivertClose(self._handle)
self._handle = None
def recv(self, bufsize=DEFAULT_PACKET_BUFFER_SIZE):
"""
Receives a diverted packet that matched the filter.
The remapped function is WinDivertRecv::
BOOL WinDivertRecv(
__in HANDLE handle,
__out PVOID pPacket,
__in UINT packetLen,
__out_opt PWINDIVERT_ADDRESS pAddr,
__out_opt UINT *recvLen
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_recv
:return: The return value is a `pydivert.Packet`.
"""
if self._handle is None:
raise RuntimeError("WinDivert handle is not open")
packet = bytearray(bufsize)
packet_ = (c_char * bufsize).from_buffer(packet)
address = windivert_dll.WinDivertAddress()
recv_len = c_uint(0)
windivert_dll.WinDivertRecv(self._handle, packet_, bufsize, byref(address), byref(recv_len))
return Packet(
memoryview(packet)[:recv_len.value],
(address.IfIdx, address.SubIfIdx),
Direction(address.Direction)
)
def send(self, packet, recalculate_checksum=True):
"""
Injects a packet into the network stack.
Recalculates the checksum before sending unless recalculate_checksum=False is passed.
The injected packet may be one received from recv(), or a modified version, or a completely new packet.
Injected packets can be captured and diverted again by other WinDivert handles with lower priorities.
The remapped function is WinDivertSend::
BOOL WinDivertSend(
__in HANDLE handle,
__in PVOID pPacket,
__in UINT packetLen,
__in PWINDIVERT_ADDRESS pAddr,
__out_opt UINT *sendLen
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_send
:return: The return value is the number of bytes actually sent.
"""
if recalculate_checksum:
packet.recalculate_checksums()
send_len = c_uint(0)
if PY2:
# .from_buffer(memoryview) does not work on PY2
buff = bytearray(packet.raw)
else:
buff = packet.raw
buff = (c_char * len(packet.raw)).from_buffer(buff)
windivert_dll.WinDivertSend(self._handle, buff, len(packet.raw), byref(packet.wd_addr),
byref(send_len))
return send_len
def get_param(self, name):
"""
Get a WinDivert parameter. See pydivert.Param for the list of parameters.
The remapped function is WinDivertGetParam::
BOOL WinDivertGetParam(
__in HANDLE handle,
__in WINDIVERT_PARAM param,
__out UINT64 *pValue
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_get_param
:return: The parameter value.
"""
value = c_uint64(0)
windivert_dll.WinDivertGetParam(self._handle, name, byref(value))
return value.value
def set_param(self, name, value):
"""
Set a WinDivert parameter. See pydivert.Param for the list of parameters.
The remapped function is DivertSetParam::
BOOL WinDivertSetParam(
__in HANDLE handle,
__in WINDIVERT_PARAM param,
__in UINT64 value
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_set_param
"""
return windivert_dll.WinDivertSetParam(self._handle, name, value)
|
ffalcinelli/pydivert | pydivert/windivert.py | WinDivert.open | python | def open(self):
if self.is_open:
raise RuntimeError("WinDivert handle is already open.")
self._handle = windivert_dll.WinDivertOpen(self._filter, self._layer, self._priority,
self._flags) | Opens a WinDivert handle for the given filter.
Unless otherwise specified by flags, any packet that matches the filter will be diverted to the handle.
Diverted packets can be read by the application with receive().
The remapped function is WinDivertOpen::
HANDLE WinDivertOpen(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__in INT16 priority,
__in UINT64 flags
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_open | train | https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/windivert.py#L126-L146 | null | class WinDivert(object):
"""
A WinDivert handle that can be used to capture packets.
The main methods are `.open()`, `.recv()`, `.send()` and `.close()`.
Use it like so::
with pydivert.WinDivert() as w:
for packet in w:
print(packet)
w.send(packet)
"""
def __init__(self, filter="true", layer=Layer.NETWORK, priority=0, flags=Flag.DEFAULT):
self._handle = None
self._filter = filter.encode()
self._layer = layer
self._priority = priority
self._flags = flags
def __repr__(self):
return '<WinDivert state="{}" filter="{}" layer="{}" priority="{}" flags="{}" />'.format(
"open" if self._handle is not None else "closed",
self._filter.decode(),
self._layer,
self._priority,
self._flags
)
def __enter__(self):
self.open()
return self
def __exit__(self, *args):
self.close()
def __iter__(self):
return self
def __next__(self):
return self.recv()
if sys.version_info < (3, 0):
next = __next__
@staticmethod
def register():
"""
An utility method to register the service the first time.
It is usually not required to call this function, as WinDivert will register itself when opening a handle.
"""
with WinDivert("false"):
pass
@staticmethod
def is_registered():
"""
Check if the WinDivert service is currently installed on the system.
"""
return subprocess.call("sc query WinDivert1.3", stdout=subprocess.PIPE,
stderr=subprocess.PIPE) == 0
@staticmethod
def unregister():
"""
Unregisters the WinDivert service.
This function only requests a service stop, which may not be processed immediately if there are still open
handles.
"""
subprocess.check_call("sc stop WinDivert1.3", stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
@staticmethod
def check_filter(filter, layer=Layer.NETWORK):
"""
Checks if the given packet filter string is valid with respect to the filter language.
The remapped function is WinDivertHelperCheckFilter::
BOOL WinDivertHelperCheckFilter(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__out_opt const char **errorStr,
__out_opt UINT *errorPos
);
See: https://reqrypt.org/windivert-doc.html#divert_helper_check_filter
:return: A tuple (res, pos, msg) with check result in 'res' human readable description of the error in 'msg' and the error's position in 'pos'.
"""
res, pos, msg = False, c_uint(), c_char_p()
try:
res = windivert_dll.WinDivertHelperCheckFilter(filter.encode(), layer, byref(msg), byref(pos))
except OSError:
pass
return res, pos.value, msg.value.decode()
@property
def is_open(self):
"""
Indicates if there is currently an open handle.
"""
return bool(self._handle)
def close(self):
"""
Closes the handle opened by open().
The remapped function is WinDivertClose::
BOOL WinDivertClose(
__in HANDLE handle
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_close
"""
if not self.is_open:
raise RuntimeError("WinDivert handle is not open.")
windivert_dll.WinDivertClose(self._handle)
self._handle = None
def recv(self, bufsize=DEFAULT_PACKET_BUFFER_SIZE):
"""
Receives a diverted packet that matched the filter.
The remapped function is WinDivertRecv::
BOOL WinDivertRecv(
__in HANDLE handle,
__out PVOID pPacket,
__in UINT packetLen,
__out_opt PWINDIVERT_ADDRESS pAddr,
__out_opt UINT *recvLen
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_recv
:return: The return value is a `pydivert.Packet`.
"""
if self._handle is None:
raise RuntimeError("WinDivert handle is not open")
packet = bytearray(bufsize)
packet_ = (c_char * bufsize).from_buffer(packet)
address = windivert_dll.WinDivertAddress()
recv_len = c_uint(0)
windivert_dll.WinDivertRecv(self._handle, packet_, bufsize, byref(address), byref(recv_len))
return Packet(
memoryview(packet)[:recv_len.value],
(address.IfIdx, address.SubIfIdx),
Direction(address.Direction)
)
def send(self, packet, recalculate_checksum=True):
"""
Injects a packet into the network stack.
Recalculates the checksum before sending unless recalculate_checksum=False is passed.
The injected packet may be one received from recv(), or a modified version, or a completely new packet.
Injected packets can be captured and diverted again by other WinDivert handles with lower priorities.
The remapped function is WinDivertSend::
BOOL WinDivertSend(
__in HANDLE handle,
__in PVOID pPacket,
__in UINT packetLen,
__in PWINDIVERT_ADDRESS pAddr,
__out_opt UINT *sendLen
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_send
:return: The return value is the number of bytes actually sent.
"""
if recalculate_checksum:
packet.recalculate_checksums()
send_len = c_uint(0)
if PY2:
# .from_buffer(memoryview) does not work on PY2
buff = bytearray(packet.raw)
else:
buff = packet.raw
buff = (c_char * len(packet.raw)).from_buffer(buff)
windivert_dll.WinDivertSend(self._handle, buff, len(packet.raw), byref(packet.wd_addr),
byref(send_len))
return send_len
def get_param(self, name):
"""
Get a WinDivert parameter. See pydivert.Param for the list of parameters.
The remapped function is WinDivertGetParam::
BOOL WinDivertGetParam(
__in HANDLE handle,
__in WINDIVERT_PARAM param,
__out UINT64 *pValue
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_get_param
:return: The parameter value.
"""
value = c_uint64(0)
windivert_dll.WinDivertGetParam(self._handle, name, byref(value))
return value.value
def set_param(self, name, value):
"""
Set a WinDivert parameter. See pydivert.Param for the list of parameters.
The remapped function is DivertSetParam::
BOOL WinDivertSetParam(
__in HANDLE handle,
__in WINDIVERT_PARAM param,
__in UINT64 value
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_set_param
"""
return windivert_dll.WinDivertSetParam(self._handle, name, value)
|
ffalcinelli/pydivert | pydivert/windivert.py | WinDivert.close | python | def close(self):
if not self.is_open:
raise RuntimeError("WinDivert handle is not open.")
windivert_dll.WinDivertClose(self._handle)
self._handle = None | Closes the handle opened by open().
The remapped function is WinDivertClose::
BOOL WinDivertClose(
__in HANDLE handle
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_close | train | https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/windivert.py#L155-L170 | null | class WinDivert(object):
"""
A WinDivert handle that can be used to capture packets.
The main methods are `.open()`, `.recv()`, `.send()` and `.close()`.
Use it like so::
with pydivert.WinDivert() as w:
for packet in w:
print(packet)
w.send(packet)
"""
def __init__(self, filter="true", layer=Layer.NETWORK, priority=0, flags=Flag.DEFAULT):
self._handle = None
self._filter = filter.encode()
self._layer = layer
self._priority = priority
self._flags = flags
def __repr__(self):
return '<WinDivert state="{}" filter="{}" layer="{}" priority="{}" flags="{}" />'.format(
"open" if self._handle is not None else "closed",
self._filter.decode(),
self._layer,
self._priority,
self._flags
)
def __enter__(self):
self.open()
return self
def __exit__(self, *args):
self.close()
def __iter__(self):
return self
def __next__(self):
return self.recv()
if sys.version_info < (3, 0):
next = __next__
@staticmethod
def register():
"""
An utility method to register the service the first time.
It is usually not required to call this function, as WinDivert will register itself when opening a handle.
"""
with WinDivert("false"):
pass
@staticmethod
def is_registered():
"""
Check if the WinDivert service is currently installed on the system.
"""
return subprocess.call("sc query WinDivert1.3", stdout=subprocess.PIPE,
stderr=subprocess.PIPE) == 0
@staticmethod
def unregister():
"""
Unregisters the WinDivert service.
This function only requests a service stop, which may not be processed immediately if there are still open
handles.
"""
subprocess.check_call("sc stop WinDivert1.3", stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
@staticmethod
def check_filter(filter, layer=Layer.NETWORK):
"""
Checks if the given packet filter string is valid with respect to the filter language.
The remapped function is WinDivertHelperCheckFilter::
BOOL WinDivertHelperCheckFilter(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__out_opt const char **errorStr,
__out_opt UINT *errorPos
);
See: https://reqrypt.org/windivert-doc.html#divert_helper_check_filter
:return: A tuple (res, pos, msg) with check result in 'res' human readable description of the error in 'msg' and the error's position in 'pos'.
"""
res, pos, msg = False, c_uint(), c_char_p()
try:
res = windivert_dll.WinDivertHelperCheckFilter(filter.encode(), layer, byref(msg), byref(pos))
except OSError:
pass
return res, pos.value, msg.value.decode()
def open(self):
"""
Opens a WinDivert handle for the given filter.
Unless otherwise specified by flags, any packet that matches the filter will be diverted to the handle.
Diverted packets can be read by the application with receive().
The remapped function is WinDivertOpen::
HANDLE WinDivertOpen(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__in INT16 priority,
__in UINT64 flags
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_open
"""
if self.is_open:
raise RuntimeError("WinDivert handle is already open.")
self._handle = windivert_dll.WinDivertOpen(self._filter, self._layer, self._priority,
self._flags)
@property
def is_open(self):
"""
Indicates if there is currently an open handle.
"""
return bool(self._handle)
def recv(self, bufsize=DEFAULT_PACKET_BUFFER_SIZE):
"""
Receives a diverted packet that matched the filter.
The remapped function is WinDivertRecv::
BOOL WinDivertRecv(
__in HANDLE handle,
__out PVOID pPacket,
__in UINT packetLen,
__out_opt PWINDIVERT_ADDRESS pAddr,
__out_opt UINT *recvLen
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_recv
:return: The return value is a `pydivert.Packet`.
"""
if self._handle is None:
raise RuntimeError("WinDivert handle is not open")
packet = bytearray(bufsize)
packet_ = (c_char * bufsize).from_buffer(packet)
address = windivert_dll.WinDivertAddress()
recv_len = c_uint(0)
windivert_dll.WinDivertRecv(self._handle, packet_, bufsize, byref(address), byref(recv_len))
return Packet(
memoryview(packet)[:recv_len.value],
(address.IfIdx, address.SubIfIdx),
Direction(address.Direction)
)
def send(self, packet, recalculate_checksum=True):
"""
Injects a packet into the network stack.
Recalculates the checksum before sending unless recalculate_checksum=False is passed.
The injected packet may be one received from recv(), or a modified version, or a completely new packet.
Injected packets can be captured and diverted again by other WinDivert handles with lower priorities.
The remapped function is WinDivertSend::
BOOL WinDivertSend(
__in HANDLE handle,
__in PVOID pPacket,
__in UINT packetLen,
__in PWINDIVERT_ADDRESS pAddr,
__out_opt UINT *sendLen
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_send
:return: The return value is the number of bytes actually sent.
"""
if recalculate_checksum:
packet.recalculate_checksums()
send_len = c_uint(0)
if PY2:
# .from_buffer(memoryview) does not work on PY2
buff = bytearray(packet.raw)
else:
buff = packet.raw
buff = (c_char * len(packet.raw)).from_buffer(buff)
windivert_dll.WinDivertSend(self._handle, buff, len(packet.raw), byref(packet.wd_addr),
byref(send_len))
return send_len
def get_param(self, name):
"""
Get a WinDivert parameter. See pydivert.Param for the list of parameters.
The remapped function is WinDivertGetParam::
BOOL WinDivertGetParam(
__in HANDLE handle,
__in WINDIVERT_PARAM param,
__out UINT64 *pValue
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_get_param
:return: The parameter value.
"""
value = c_uint64(0)
windivert_dll.WinDivertGetParam(self._handle, name, byref(value))
return value.value
def set_param(self, name, value):
"""
Set a WinDivert parameter. See pydivert.Param for the list of parameters.
The remapped function is DivertSetParam::
BOOL WinDivertSetParam(
__in HANDLE handle,
__in WINDIVERT_PARAM param,
__in UINT64 value
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_set_param
"""
return windivert_dll.WinDivertSetParam(self._handle, name, value)
|
ffalcinelli/pydivert | pydivert/windivert.py | WinDivert.recv | python | def recv(self, bufsize=DEFAULT_PACKET_BUFFER_SIZE):
if self._handle is None:
raise RuntimeError("WinDivert handle is not open")
packet = bytearray(bufsize)
packet_ = (c_char * bufsize).from_buffer(packet)
address = windivert_dll.WinDivertAddress()
recv_len = c_uint(0)
windivert_dll.WinDivertRecv(self._handle, packet_, bufsize, byref(address), byref(recv_len))
return Packet(
memoryview(packet)[:recv_len.value],
(address.IfIdx, address.SubIfIdx),
Direction(address.Direction)
) | Receives a diverted packet that matched the filter.
The remapped function is WinDivertRecv::
BOOL WinDivertRecv(
__in HANDLE handle,
__out PVOID pPacket,
__in UINT packetLen,
__out_opt PWINDIVERT_ADDRESS pAddr,
__out_opt UINT *recvLen
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_recv
:return: The return value is a `pydivert.Packet`. | train | https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/windivert.py#L172-L202 | null | class WinDivert(object):
"""
A WinDivert handle that can be used to capture packets.
The main methods are `.open()`, `.recv()`, `.send()` and `.close()`.
Use it like so::
with pydivert.WinDivert() as w:
for packet in w:
print(packet)
w.send(packet)
"""
def __init__(self, filter="true", layer=Layer.NETWORK, priority=0, flags=Flag.DEFAULT):
self._handle = None
self._filter = filter.encode()
self._layer = layer
self._priority = priority
self._flags = flags
def __repr__(self):
return '<WinDivert state="{}" filter="{}" layer="{}" priority="{}" flags="{}" />'.format(
"open" if self._handle is not None else "closed",
self._filter.decode(),
self._layer,
self._priority,
self._flags
)
def __enter__(self):
self.open()
return self
def __exit__(self, *args):
self.close()
def __iter__(self):
return self
def __next__(self):
return self.recv()
if sys.version_info < (3, 0):
next = __next__
@staticmethod
def register():
"""
An utility method to register the service the first time.
It is usually not required to call this function, as WinDivert will register itself when opening a handle.
"""
with WinDivert("false"):
pass
@staticmethod
def is_registered():
"""
Check if the WinDivert service is currently installed on the system.
"""
return subprocess.call("sc query WinDivert1.3", stdout=subprocess.PIPE,
stderr=subprocess.PIPE) == 0
@staticmethod
def unregister():
"""
Unregisters the WinDivert service.
This function only requests a service stop, which may not be processed immediately if there are still open
handles.
"""
subprocess.check_call("sc stop WinDivert1.3", stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
@staticmethod
def check_filter(filter, layer=Layer.NETWORK):
"""
Checks if the given packet filter string is valid with respect to the filter language.
The remapped function is WinDivertHelperCheckFilter::
BOOL WinDivertHelperCheckFilter(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__out_opt const char **errorStr,
__out_opt UINT *errorPos
);
See: https://reqrypt.org/windivert-doc.html#divert_helper_check_filter
:return: A tuple (res, pos, msg) with check result in 'res' human readable description of the error in 'msg' and the error's position in 'pos'.
"""
res, pos, msg = False, c_uint(), c_char_p()
try:
res = windivert_dll.WinDivertHelperCheckFilter(filter.encode(), layer, byref(msg), byref(pos))
except OSError:
pass
return res, pos.value, msg.value.decode()
def open(self):
"""
Opens a WinDivert handle for the given filter.
Unless otherwise specified by flags, any packet that matches the filter will be diverted to the handle.
Diverted packets can be read by the application with receive().
The remapped function is WinDivertOpen::
HANDLE WinDivertOpen(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__in INT16 priority,
__in UINT64 flags
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_open
"""
if self.is_open:
raise RuntimeError("WinDivert handle is already open.")
self._handle = windivert_dll.WinDivertOpen(self._filter, self._layer, self._priority,
self._flags)
@property
def is_open(self):
"""
Indicates if there is currently an open handle.
"""
return bool(self._handle)
def close(self):
"""
Closes the handle opened by open().
The remapped function is WinDivertClose::
BOOL WinDivertClose(
__in HANDLE handle
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_close
"""
if not self.is_open:
raise RuntimeError("WinDivert handle is not open.")
windivert_dll.WinDivertClose(self._handle)
self._handle = None
def send(self, packet, recalculate_checksum=True):
"""
Injects a packet into the network stack.
Recalculates the checksum before sending unless recalculate_checksum=False is passed.
The injected packet may be one received from recv(), or a modified version, or a completely new packet.
Injected packets can be captured and diverted again by other WinDivert handles with lower priorities.
The remapped function is WinDivertSend::
BOOL WinDivertSend(
__in HANDLE handle,
__in PVOID pPacket,
__in UINT packetLen,
__in PWINDIVERT_ADDRESS pAddr,
__out_opt UINT *sendLen
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_send
:return: The return value is the number of bytes actually sent.
"""
if recalculate_checksum:
packet.recalculate_checksums()
send_len = c_uint(0)
if PY2:
# .from_buffer(memoryview) does not work on PY2
buff = bytearray(packet.raw)
else:
buff = packet.raw
buff = (c_char * len(packet.raw)).from_buffer(buff)
windivert_dll.WinDivertSend(self._handle, buff, len(packet.raw), byref(packet.wd_addr),
byref(send_len))
return send_len
def get_param(self, name):
"""
Get a WinDivert parameter. See pydivert.Param for the list of parameters.
The remapped function is WinDivertGetParam::
BOOL WinDivertGetParam(
__in HANDLE handle,
__in WINDIVERT_PARAM param,
__out UINT64 *pValue
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_get_param
:return: The parameter value.
"""
value = c_uint64(0)
windivert_dll.WinDivertGetParam(self._handle, name, byref(value))
return value.value
def set_param(self, name, value):
"""
Set a WinDivert parameter. See pydivert.Param for the list of parameters.
The remapped function is DivertSetParam::
BOOL WinDivertSetParam(
__in HANDLE handle,
__in WINDIVERT_PARAM param,
__in UINT64 value
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_set_param
"""
return windivert_dll.WinDivertSetParam(self._handle, name, value)
|
ffalcinelli/pydivert | pydivert/windivert.py | WinDivert.send | python | def send(self, packet, recalculate_checksum=True):
if recalculate_checksum:
packet.recalculate_checksums()
send_len = c_uint(0)
if PY2:
# .from_buffer(memoryview) does not work on PY2
buff = bytearray(packet.raw)
else:
buff = packet.raw
buff = (c_char * len(packet.raw)).from_buffer(buff)
windivert_dll.WinDivertSend(self._handle, buff, len(packet.raw), byref(packet.wd_addr),
byref(send_len))
return send_len | Injects a packet into the network stack.
Recalculates the checksum before sending unless recalculate_checksum=False is passed.
The injected packet may be one received from recv(), or a modified version, or a completely new packet.
Injected packets can be captured and diverted again by other WinDivert handles with lower priorities.
The remapped function is WinDivertSend::
BOOL WinDivertSend(
__in HANDLE handle,
__in PVOID pPacket,
__in UINT packetLen,
__in PWINDIVERT_ADDRESS pAddr,
__out_opt UINT *sendLen
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_send
:return: The return value is the number of bytes actually sent. | train | https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/windivert.py#L204-L238 | null | class WinDivert(object):
"""
A WinDivert handle that can be used to capture packets.
The main methods are `.open()`, `.recv()`, `.send()` and `.close()`.
Use it like so::
with pydivert.WinDivert() as w:
for packet in w:
print(packet)
w.send(packet)
"""
def __init__(self, filter="true", layer=Layer.NETWORK, priority=0, flags=Flag.DEFAULT):
self._handle = None
self._filter = filter.encode()
self._layer = layer
self._priority = priority
self._flags = flags
def __repr__(self):
return '<WinDivert state="{}" filter="{}" layer="{}" priority="{}" flags="{}" />'.format(
"open" if self._handle is not None else "closed",
self._filter.decode(),
self._layer,
self._priority,
self._flags
)
def __enter__(self):
self.open()
return self
def __exit__(self, *args):
self.close()
def __iter__(self):
return self
def __next__(self):
return self.recv()
if sys.version_info < (3, 0):
next = __next__
@staticmethod
def register():
"""
An utility method to register the service the first time.
It is usually not required to call this function, as WinDivert will register itself when opening a handle.
"""
with WinDivert("false"):
pass
@staticmethod
def is_registered():
"""
Check if the WinDivert service is currently installed on the system.
"""
return subprocess.call("sc query WinDivert1.3", stdout=subprocess.PIPE,
stderr=subprocess.PIPE) == 0
@staticmethod
def unregister():
"""
Unregisters the WinDivert service.
This function only requests a service stop, which may not be processed immediately if there are still open
handles.
"""
subprocess.check_call("sc stop WinDivert1.3", stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
@staticmethod
def check_filter(filter, layer=Layer.NETWORK):
"""
Checks if the given packet filter string is valid with respect to the filter language.
The remapped function is WinDivertHelperCheckFilter::
BOOL WinDivertHelperCheckFilter(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__out_opt const char **errorStr,
__out_opt UINT *errorPos
);
See: https://reqrypt.org/windivert-doc.html#divert_helper_check_filter
:return: A tuple (res, pos, msg) with check result in 'res' human readable description of the error in 'msg' and the error's position in 'pos'.
"""
res, pos, msg = False, c_uint(), c_char_p()
try:
res = windivert_dll.WinDivertHelperCheckFilter(filter.encode(), layer, byref(msg), byref(pos))
except OSError:
pass
return res, pos.value, msg.value.decode()
def open(self):
"""
Opens a WinDivert handle for the given filter.
Unless otherwise specified by flags, any packet that matches the filter will be diverted to the handle.
Diverted packets can be read by the application with receive().
The remapped function is WinDivertOpen::
HANDLE WinDivertOpen(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__in INT16 priority,
__in UINT64 flags
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_open
"""
if self.is_open:
raise RuntimeError("WinDivert handle is already open.")
self._handle = windivert_dll.WinDivertOpen(self._filter, self._layer, self._priority,
self._flags)
@property
def is_open(self):
"""
Indicates if there is currently an open handle.
"""
return bool(self._handle)
def close(self):
"""
Closes the handle opened by open().
The remapped function is WinDivertClose::
BOOL WinDivertClose(
__in HANDLE handle
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_close
"""
if not self.is_open:
raise RuntimeError("WinDivert handle is not open.")
windivert_dll.WinDivertClose(self._handle)
self._handle = None
def recv(self, bufsize=DEFAULT_PACKET_BUFFER_SIZE):
"""
Receives a diverted packet that matched the filter.
The remapped function is WinDivertRecv::
BOOL WinDivertRecv(
__in HANDLE handle,
__out PVOID pPacket,
__in UINT packetLen,
__out_opt PWINDIVERT_ADDRESS pAddr,
__out_opt UINT *recvLen
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_recv
:return: The return value is a `pydivert.Packet`.
"""
if self._handle is None:
raise RuntimeError("WinDivert handle is not open")
packet = bytearray(bufsize)
packet_ = (c_char * bufsize).from_buffer(packet)
address = windivert_dll.WinDivertAddress()
recv_len = c_uint(0)
windivert_dll.WinDivertRecv(self._handle, packet_, bufsize, byref(address), byref(recv_len))
return Packet(
memoryview(packet)[:recv_len.value],
(address.IfIdx, address.SubIfIdx),
Direction(address.Direction)
)
def get_param(self, name):
"""
Get a WinDivert parameter. See pydivert.Param for the list of parameters.
The remapped function is WinDivertGetParam::
BOOL WinDivertGetParam(
__in HANDLE handle,
__in WINDIVERT_PARAM param,
__out UINT64 *pValue
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_get_param
:return: The parameter value.
"""
value = c_uint64(0)
windivert_dll.WinDivertGetParam(self._handle, name, byref(value))
return value.value
def set_param(self, name, value):
"""
Set a WinDivert parameter. See pydivert.Param for the list of parameters.
The remapped function is DivertSetParam::
BOOL WinDivertSetParam(
__in HANDLE handle,
__in WINDIVERT_PARAM param,
__in UINT64 value
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_set_param
"""
return windivert_dll.WinDivertSetParam(self._handle, name, value)
|
ffalcinelli/pydivert | pydivert/windivert.py | WinDivert.get_param | python | def get_param(self, name):
value = c_uint64(0)
windivert_dll.WinDivertGetParam(self._handle, name, byref(value))
return value.value | Get a WinDivert parameter. See pydivert.Param for the list of parameters.
The remapped function is WinDivertGetParam::
BOOL WinDivertGetParam(
__in HANDLE handle,
__in WINDIVERT_PARAM param,
__out UINT64 *pValue
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_get_param
:return: The parameter value. | train | https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/windivert.py#L240-L258 | null | class WinDivert(object):
"""
A WinDivert handle that can be used to capture packets.
The main methods are `.open()`, `.recv()`, `.send()` and `.close()`.
Use it like so::
with pydivert.WinDivert() as w:
for packet in w:
print(packet)
w.send(packet)
"""
def __init__(self, filter="true", layer=Layer.NETWORK, priority=0, flags=Flag.DEFAULT):
self._handle = None
self._filter = filter.encode()
self._layer = layer
self._priority = priority
self._flags = flags
def __repr__(self):
return '<WinDivert state="{}" filter="{}" layer="{}" priority="{}" flags="{}" />'.format(
"open" if self._handle is not None else "closed",
self._filter.decode(),
self._layer,
self._priority,
self._flags
)
def __enter__(self):
self.open()
return self
def __exit__(self, *args):
self.close()
def __iter__(self):
return self
def __next__(self):
return self.recv()
if sys.version_info < (3, 0):
next = __next__
@staticmethod
def register():
"""
An utility method to register the service the first time.
It is usually not required to call this function, as WinDivert will register itself when opening a handle.
"""
with WinDivert("false"):
pass
@staticmethod
def is_registered():
"""
Check if the WinDivert service is currently installed on the system.
"""
return subprocess.call("sc query WinDivert1.3", stdout=subprocess.PIPE,
stderr=subprocess.PIPE) == 0
@staticmethod
def unregister():
"""
Unregisters the WinDivert service.
This function only requests a service stop, which may not be processed immediately if there are still open
handles.
"""
subprocess.check_call("sc stop WinDivert1.3", stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
@staticmethod
def check_filter(filter, layer=Layer.NETWORK):
"""
Checks if the given packet filter string is valid with respect to the filter language.
The remapped function is WinDivertHelperCheckFilter::
BOOL WinDivertHelperCheckFilter(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__out_opt const char **errorStr,
__out_opt UINT *errorPos
);
See: https://reqrypt.org/windivert-doc.html#divert_helper_check_filter
:return: A tuple (res, pos, msg) with check result in 'res' human readable description of the error in 'msg' and the error's position in 'pos'.
"""
res, pos, msg = False, c_uint(), c_char_p()
try:
res = windivert_dll.WinDivertHelperCheckFilter(filter.encode(), layer, byref(msg), byref(pos))
except OSError:
pass
return res, pos.value, msg.value.decode()
def open(self):
"""
Opens a WinDivert handle for the given filter.
Unless otherwise specified by flags, any packet that matches the filter will be diverted to the handle.
Diverted packets can be read by the application with receive().
The remapped function is WinDivertOpen::
HANDLE WinDivertOpen(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__in INT16 priority,
__in UINT64 flags
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_open
"""
if self.is_open:
raise RuntimeError("WinDivert handle is already open.")
self._handle = windivert_dll.WinDivertOpen(self._filter, self._layer, self._priority,
self._flags)
@property
def is_open(self):
"""
Indicates if there is currently an open handle.
"""
return bool(self._handle)
def close(self):
"""
Closes the handle opened by open().
The remapped function is WinDivertClose::
BOOL WinDivertClose(
__in HANDLE handle
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_close
"""
if not self.is_open:
raise RuntimeError("WinDivert handle is not open.")
windivert_dll.WinDivertClose(self._handle)
self._handle = None
def recv(self, bufsize=DEFAULT_PACKET_BUFFER_SIZE):
"""
Receives a diverted packet that matched the filter.
The remapped function is WinDivertRecv::
BOOL WinDivertRecv(
__in HANDLE handle,
__out PVOID pPacket,
__in UINT packetLen,
__out_opt PWINDIVERT_ADDRESS pAddr,
__out_opt UINT *recvLen
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_recv
:return: The return value is a `pydivert.Packet`.
"""
if self._handle is None:
raise RuntimeError("WinDivert handle is not open")
packet = bytearray(bufsize)
packet_ = (c_char * bufsize).from_buffer(packet)
address = windivert_dll.WinDivertAddress()
recv_len = c_uint(0)
windivert_dll.WinDivertRecv(self._handle, packet_, bufsize, byref(address), byref(recv_len))
return Packet(
memoryview(packet)[:recv_len.value],
(address.IfIdx, address.SubIfIdx),
Direction(address.Direction)
)
def send(self, packet, recalculate_checksum=True):
"""
Injects a packet into the network stack.
Recalculates the checksum before sending unless recalculate_checksum=False is passed.
The injected packet may be one received from recv(), or a modified version, or a completely new packet.
Injected packets can be captured and diverted again by other WinDivert handles with lower priorities.
The remapped function is WinDivertSend::
BOOL WinDivertSend(
__in HANDLE handle,
__in PVOID pPacket,
__in UINT packetLen,
__in PWINDIVERT_ADDRESS pAddr,
__out_opt UINT *sendLen
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_send
:return: The return value is the number of bytes actually sent.
"""
if recalculate_checksum:
packet.recalculate_checksums()
send_len = c_uint(0)
if PY2:
# .from_buffer(memoryview) does not work on PY2
buff = bytearray(packet.raw)
else:
buff = packet.raw
buff = (c_char * len(packet.raw)).from_buffer(buff)
windivert_dll.WinDivertSend(self._handle, buff, len(packet.raw), byref(packet.wd_addr),
byref(send_len))
return send_len
def set_param(self, name, value):
"""
Set a WinDivert parameter. See pydivert.Param for the list of parameters.
The remapped function is DivertSetParam::
BOOL WinDivertSetParam(
__in HANDLE handle,
__in WINDIVERT_PARAM param,
__in UINT64 value
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_set_param
"""
return windivert_dll.WinDivertSetParam(self._handle, name, value)
|
ffalcinelli/pydivert | pydivert/windivert.py | WinDivert.set_param | python | def set_param(self, name, value):
return windivert_dll.WinDivertSetParam(self._handle, name, value) | Set a WinDivert parameter. See pydivert.Param for the list of parameters.
The remapped function is DivertSetParam::
BOOL WinDivertSetParam(
__in HANDLE handle,
__in WINDIVERT_PARAM param,
__in UINT64 value
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_set_param | train | https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/windivert.py#L260-L274 | null | class WinDivert(object):
"""
A WinDivert handle that can be used to capture packets.
The main methods are `.open()`, `.recv()`, `.send()` and `.close()`.
Use it like so::
with pydivert.WinDivert() as w:
for packet in w:
print(packet)
w.send(packet)
"""
def __init__(self, filter="true", layer=Layer.NETWORK, priority=0, flags=Flag.DEFAULT):
self._handle = None
self._filter = filter.encode()
self._layer = layer
self._priority = priority
self._flags = flags
def __repr__(self):
return '<WinDivert state="{}" filter="{}" layer="{}" priority="{}" flags="{}" />'.format(
"open" if self._handle is not None else "closed",
self._filter.decode(),
self._layer,
self._priority,
self._flags
)
def __enter__(self):
self.open()
return self
def __exit__(self, *args):
self.close()
def __iter__(self):
return self
def __next__(self):
return self.recv()
if sys.version_info < (3, 0):
next = __next__
@staticmethod
def register():
"""
An utility method to register the service the first time.
It is usually not required to call this function, as WinDivert will register itself when opening a handle.
"""
with WinDivert("false"):
pass
@staticmethod
def is_registered():
"""
Check if the WinDivert service is currently installed on the system.
"""
return subprocess.call("sc query WinDivert1.3", stdout=subprocess.PIPE,
stderr=subprocess.PIPE) == 0
@staticmethod
def unregister():
"""
Unregisters the WinDivert service.
This function only requests a service stop, which may not be processed immediately if there are still open
handles.
"""
subprocess.check_call("sc stop WinDivert1.3", stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
@staticmethod
def check_filter(filter, layer=Layer.NETWORK):
"""
Checks if the given packet filter string is valid with respect to the filter language.
The remapped function is WinDivertHelperCheckFilter::
BOOL WinDivertHelperCheckFilter(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__out_opt const char **errorStr,
__out_opt UINT *errorPos
);
See: https://reqrypt.org/windivert-doc.html#divert_helper_check_filter
:return: A tuple (res, pos, msg) with check result in 'res' human readable description of the error in 'msg' and the error's position in 'pos'.
"""
res, pos, msg = False, c_uint(), c_char_p()
try:
res = windivert_dll.WinDivertHelperCheckFilter(filter.encode(), layer, byref(msg), byref(pos))
except OSError:
pass
return res, pos.value, msg.value.decode()
def open(self):
"""
Opens a WinDivert handle for the given filter.
Unless otherwise specified by flags, any packet that matches the filter will be diverted to the handle.
Diverted packets can be read by the application with receive().
The remapped function is WinDivertOpen::
HANDLE WinDivertOpen(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__in INT16 priority,
__in UINT64 flags
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_open
"""
if self.is_open:
raise RuntimeError("WinDivert handle is already open.")
self._handle = windivert_dll.WinDivertOpen(self._filter, self._layer, self._priority,
self._flags)
@property
def is_open(self):
"""
Indicates if there is currently an open handle.
"""
return bool(self._handle)
def close(self):
"""
Closes the handle opened by open().
The remapped function is WinDivertClose::
BOOL WinDivertClose(
__in HANDLE handle
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_close
"""
if not self.is_open:
raise RuntimeError("WinDivert handle is not open.")
windivert_dll.WinDivertClose(self._handle)
self._handle = None
def recv(self, bufsize=DEFAULT_PACKET_BUFFER_SIZE):
"""
Receives a diverted packet that matched the filter.
The remapped function is WinDivertRecv::
BOOL WinDivertRecv(
__in HANDLE handle,
__out PVOID pPacket,
__in UINT packetLen,
__out_opt PWINDIVERT_ADDRESS pAddr,
__out_opt UINT *recvLen
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_recv
:return: The return value is a `pydivert.Packet`.
"""
if self._handle is None:
raise RuntimeError("WinDivert handle is not open")
packet = bytearray(bufsize)
packet_ = (c_char * bufsize).from_buffer(packet)
address = windivert_dll.WinDivertAddress()
recv_len = c_uint(0)
windivert_dll.WinDivertRecv(self._handle, packet_, bufsize, byref(address), byref(recv_len))
return Packet(
memoryview(packet)[:recv_len.value],
(address.IfIdx, address.SubIfIdx),
Direction(address.Direction)
)
def send(self, packet, recalculate_checksum=True):
"""
Injects a packet into the network stack.
Recalculates the checksum before sending unless recalculate_checksum=False is passed.
The injected packet may be one received from recv(), or a modified version, or a completely new packet.
Injected packets can be captured and diverted again by other WinDivert handles with lower priorities.
The remapped function is WinDivertSend::
BOOL WinDivertSend(
__in HANDLE handle,
__in PVOID pPacket,
__in UINT packetLen,
__in PWINDIVERT_ADDRESS pAddr,
__out_opt UINT *sendLen
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_send
:return: The return value is the number of bytes actually sent.
"""
if recalculate_checksum:
packet.recalculate_checksums()
send_len = c_uint(0)
if PY2:
# .from_buffer(memoryview) does not work on PY2
buff = bytearray(packet.raw)
else:
buff = packet.raw
buff = (c_char * len(packet.raw)).from_buffer(buff)
windivert_dll.WinDivertSend(self._handle, buff, len(packet.raw), byref(packet.wd_addr),
byref(send_len))
return send_len
def get_param(self, name):
"""
Get a WinDivert parameter. See pydivert.Param for the list of parameters.
The remapped function is WinDivertGetParam::
BOOL WinDivertGetParam(
__in HANDLE handle,
__in WINDIVERT_PARAM param,
__out UINT64 *pValue
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_get_param
:return: The parameter value.
"""
value = c_uint64(0)
windivert_dll.WinDivertGetParam(self._handle, name, byref(value))
return value.value
|
ffalcinelli/pydivert | pydivert/windivert_dll/__init__.py | raise_on_error | python | def raise_on_error(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
result = f(*args, **kwargs)
retcode = GetLastError()
if retcode and retcode != ERROR_IO_PENDING:
err = WinError(code=retcode)
windll.kernel32.SetLastError(0) # clear error code so that we don't raise twice.
raise err
return result
return wrapper | This decorator throws a WinError whenever GetLastError() returns an error.
As as special case, ERROR_IO_PENDING is ignored. | train | https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/windivert_dll/__init__.py#L41-L57 | null | # -*- coding: utf-8 -*-
# Copyright (C) 2016 Fabio Falcinelli, Maximilian Hils
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
pydivert bundles the WinDivert binaries from
https://reqrypt.org/download/WinDivert-1.3.0-WDDK.zip
"""
import functools
import os
import platform
import sys
from ctypes import (
POINTER, GetLastError, WinError, c_uint, c_void_p, c_uint32, c_char_p, ARRAY, c_uint64, c_int16, c_int, WinDLL,
c_uint8, windll)
from ctypes.wintypes import HANDLE
from .structs import WinDivertAddress
ERROR_IO_PENDING = 997
here = os.path.abspath(os.path.dirname(__file__))
if platform.architecture()[0] == "64bit":
DLL_PATH = os.path.join(here, "WinDivert64.dll")
else:
DLL_PATH = os.path.join(here, "WinDivert32.dll")
WINDIVERT_FUNCTIONS = {
"WinDivertHelperParsePacket": [HANDLE, c_uint, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p,
c_void_p, POINTER(c_uint)],
"WinDivertHelperParseIPv4Address": [c_char_p, POINTER(c_uint32)],
"WinDivertHelperParseIPv6Address": [c_char_p, POINTER(ARRAY(c_uint8, 16))],
"WinDivertHelperCalcChecksums": [c_void_p, c_uint, c_uint64],
"WinDivertHelperCheckFilter": [c_char_p, c_int, POINTER(c_char_p), POINTER(c_uint)],
"WinDivertHelperEvalFilter": [c_char_p, c_int, c_void_p, c_uint, c_void_p],
"WinDivertOpen": [c_char_p, c_int, c_int16, c_uint64],
"WinDivertRecv": [HANDLE, c_void_p, c_uint, c_void_p, c_void_p],
"WinDivertSend": [HANDLE, c_void_p, c_uint, c_void_p, c_void_p],
"WinDivertRecvEx": [HANDLE, c_void_p, c_uint, c_uint64, c_void_p, c_void_p, c_void_p],
"WinDivertSendEx": [HANDLE, c_void_p, c_uint, c_uint64, c_void_p, c_void_p, c_void_p],
"WinDivertClose": [HANDLE],
"WinDivertGetParam": [HANDLE, c_int, POINTER(c_uint64)],
"WinDivertSetParam": [HANDLE, c_int, c_uint64],
}
_instance = None
def instance():
global _instance
if _instance is None:
_instance = WinDLL(DLL_PATH)
for funcname, argtypes in WINDIVERT_FUNCTIONS.items():
func = getattr(_instance, funcname)
func.argtypes = argtypes
return _instance
# Dark magic happens below.
# On init, windivert_dll.WinDivertOpen is a proxy function that loads the DLL on the first invocation
# and then replaces all existing proxy function with direct handles to the DLL's functions.
_module = sys.modules[__name__]
def _init():
"""
Lazy-load DLL, replace proxy functions with actual ones.
"""
i = instance()
for funcname in WINDIVERT_FUNCTIONS:
func = getattr(i, funcname)
func = raise_on_error(func)
setattr(_module, funcname, func)
def _mkprox(funcname):
"""
Make lazy-init proxy function.
"""
def prox(*args, **kwargs):
_init()
return getattr(_module, funcname)(*args, **kwargs)
return prox
for funcname in WINDIVERT_FUNCTIONS:
setattr(_module, funcname, _mkprox(funcname))
|
ffalcinelli/pydivert | pydivert/windivert_dll/__init__.py | _init | python | def _init():
i = instance()
for funcname in WINDIVERT_FUNCTIONS:
func = getattr(i, funcname)
func = raise_on_error(func)
setattr(_module, funcname, func) | Lazy-load DLL, replace proxy functions with actual ones. | train | https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/windivert_dll/__init__.py#L99-L107 | [
"def instance():\n global _instance\n if _instance is None:\n _instance = WinDLL(DLL_PATH)\n for funcname, argtypes in WINDIVERT_FUNCTIONS.items():\n func = getattr(_instance, funcname)\n func.argtypes = argtypes\n return _instance\n",
"def raise_on_error(f):\n \"\"... | # -*- coding: utf-8 -*-
# Copyright (C) 2016 Fabio Falcinelli, Maximilian Hils
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
pydivert bundles the WinDivert binaries from
https://reqrypt.org/download/WinDivert-1.3.0-WDDK.zip
"""
import functools
import os
import platform
import sys
from ctypes import (
POINTER, GetLastError, WinError, c_uint, c_void_p, c_uint32, c_char_p, ARRAY, c_uint64, c_int16, c_int, WinDLL,
c_uint8, windll)
from ctypes.wintypes import HANDLE
from .structs import WinDivertAddress
ERROR_IO_PENDING = 997
here = os.path.abspath(os.path.dirname(__file__))
if platform.architecture()[0] == "64bit":
DLL_PATH = os.path.join(here, "WinDivert64.dll")
else:
DLL_PATH = os.path.join(here, "WinDivert32.dll")
def raise_on_error(f):
"""
This decorator throws a WinError whenever GetLastError() returns an error.
As as special case, ERROR_IO_PENDING is ignored.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
result = f(*args, **kwargs)
retcode = GetLastError()
if retcode and retcode != ERROR_IO_PENDING:
err = WinError(code=retcode)
windll.kernel32.SetLastError(0) # clear error code so that we don't raise twice.
raise err
return result
return wrapper
WINDIVERT_FUNCTIONS = {
"WinDivertHelperParsePacket": [HANDLE, c_uint, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p,
c_void_p, POINTER(c_uint)],
"WinDivertHelperParseIPv4Address": [c_char_p, POINTER(c_uint32)],
"WinDivertHelperParseIPv6Address": [c_char_p, POINTER(ARRAY(c_uint8, 16))],
"WinDivertHelperCalcChecksums": [c_void_p, c_uint, c_uint64],
"WinDivertHelperCheckFilter": [c_char_p, c_int, POINTER(c_char_p), POINTER(c_uint)],
"WinDivertHelperEvalFilter": [c_char_p, c_int, c_void_p, c_uint, c_void_p],
"WinDivertOpen": [c_char_p, c_int, c_int16, c_uint64],
"WinDivertRecv": [HANDLE, c_void_p, c_uint, c_void_p, c_void_p],
"WinDivertSend": [HANDLE, c_void_p, c_uint, c_void_p, c_void_p],
"WinDivertRecvEx": [HANDLE, c_void_p, c_uint, c_uint64, c_void_p, c_void_p, c_void_p],
"WinDivertSendEx": [HANDLE, c_void_p, c_uint, c_uint64, c_void_p, c_void_p, c_void_p],
"WinDivertClose": [HANDLE],
"WinDivertGetParam": [HANDLE, c_int, POINTER(c_uint64)],
"WinDivertSetParam": [HANDLE, c_int, c_uint64],
}
_instance = None
def instance():
global _instance
if _instance is None:
_instance = WinDLL(DLL_PATH)
for funcname, argtypes in WINDIVERT_FUNCTIONS.items():
func = getattr(_instance, funcname)
func.argtypes = argtypes
return _instance
# Dark magic happens below.
# On init, windivert_dll.WinDivertOpen is a proxy function that loads the DLL on the first invocation
# and then replaces all existing proxy function with direct handles to the DLL's functions.
_module = sys.modules[__name__]
def _mkprox(funcname):
"""
Make lazy-init proxy function.
"""
def prox(*args, **kwargs):
_init()
return getattr(_module, funcname)(*args, **kwargs)
return prox
for funcname in WINDIVERT_FUNCTIONS:
setattr(_module, funcname, _mkprox(funcname))
|
ffalcinelli/pydivert | pydivert/windivert_dll/__init__.py | _mkprox | python | def _mkprox(funcname):
def prox(*args, **kwargs):
_init()
return getattr(_module, funcname)(*args, **kwargs)
return prox | Make lazy-init proxy function. | train | https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/windivert_dll/__init__.py#L110-L119 | null | # -*- coding: utf-8 -*-
# Copyright (C) 2016 Fabio Falcinelli, Maximilian Hils
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
pydivert bundles the WinDivert binaries from
https://reqrypt.org/download/WinDivert-1.3.0-WDDK.zip
"""
import functools
import os
import platform
import sys
from ctypes import (
POINTER, GetLastError, WinError, c_uint, c_void_p, c_uint32, c_char_p, ARRAY, c_uint64, c_int16, c_int, WinDLL,
c_uint8, windll)
from ctypes.wintypes import HANDLE
from .structs import WinDivertAddress
ERROR_IO_PENDING = 997
here = os.path.abspath(os.path.dirname(__file__))
if platform.architecture()[0] == "64bit":
DLL_PATH = os.path.join(here, "WinDivert64.dll")
else:
DLL_PATH = os.path.join(here, "WinDivert32.dll")
def raise_on_error(f):
"""
This decorator throws a WinError whenever GetLastError() returns an error.
As as special case, ERROR_IO_PENDING is ignored.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
result = f(*args, **kwargs)
retcode = GetLastError()
if retcode and retcode != ERROR_IO_PENDING:
err = WinError(code=retcode)
windll.kernel32.SetLastError(0) # clear error code so that we don't raise twice.
raise err
return result
return wrapper
WINDIVERT_FUNCTIONS = {
"WinDivertHelperParsePacket": [HANDLE, c_uint, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p,
c_void_p, POINTER(c_uint)],
"WinDivertHelperParseIPv4Address": [c_char_p, POINTER(c_uint32)],
"WinDivertHelperParseIPv6Address": [c_char_p, POINTER(ARRAY(c_uint8, 16))],
"WinDivertHelperCalcChecksums": [c_void_p, c_uint, c_uint64],
"WinDivertHelperCheckFilter": [c_char_p, c_int, POINTER(c_char_p), POINTER(c_uint)],
"WinDivertHelperEvalFilter": [c_char_p, c_int, c_void_p, c_uint, c_void_p],
"WinDivertOpen": [c_char_p, c_int, c_int16, c_uint64],
"WinDivertRecv": [HANDLE, c_void_p, c_uint, c_void_p, c_void_p],
"WinDivertSend": [HANDLE, c_void_p, c_uint, c_void_p, c_void_p],
"WinDivertRecvEx": [HANDLE, c_void_p, c_uint, c_uint64, c_void_p, c_void_p, c_void_p],
"WinDivertSendEx": [HANDLE, c_void_p, c_uint, c_uint64, c_void_p, c_void_p, c_void_p],
"WinDivertClose": [HANDLE],
"WinDivertGetParam": [HANDLE, c_int, POINTER(c_uint64)],
"WinDivertSetParam": [HANDLE, c_int, c_uint64],
}
_instance = None
def instance():
global _instance
if _instance is None:
_instance = WinDLL(DLL_PATH)
for funcname, argtypes in WINDIVERT_FUNCTIONS.items():
func = getattr(_instance, funcname)
func.argtypes = argtypes
return _instance
# Dark magic happens below.
# On init, windivert_dll.WinDivertOpen is a proxy function that loads the DLL on the first invocation
# and then replaces all existing proxy function with direct handles to the DLL's functions.
_module = sys.modules[__name__]
def _init():
"""
Lazy-load DLL, replace proxy functions with actual ones.
"""
i = instance()
for funcname in WINDIVERT_FUNCTIONS:
func = getattr(i, funcname)
func = raise_on_error(func)
setattr(_module, funcname, func)
for funcname in WINDIVERT_FUNCTIONS:
setattr(_module, funcname, _mkprox(funcname))
|
ffalcinelli/pydivert | pydivert/packet/ip.py | IPHeader.src_addr | python | def src_addr(self):
try:
return socket.inet_ntop(self._af, self.raw[self._src_addr].tobytes())
except (ValueError, socket.error):
pass | The packet source address. | train | https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/packet/ip.py#L29-L36 | null | class IPHeader(Header):
_src_addr = slice(0, 0)
_dst_addr = slice(0, 0)
_af = None
@property
@src_addr.setter
def src_addr(self, val):
self.raw[self._src_addr] = socket.inet_pton(self._af, val)
@property
def dst_addr(self):
"""
The packet destination address.
"""
try:
return socket.inet_ntop(self._af, self.raw[self._dst_addr].tobytes())
except (ValueError, socket.error):
pass
@dst_addr.setter
def dst_addr(self, val):
self.raw[self._dst_addr] = socket.inet_pton(self._af, val)
@property
def packet_len(self):
"""
The total packet length, including *all* headers, as reported by the IP header.
"""
raise NotImplementedError() # pragma: no cover
@packet_len.setter
def packet_len(self, val):
raise NotImplementedError() # pragma: no cover
|
ffalcinelli/pydivert | pydivert/packet/ip.py | IPHeader.dst_addr | python | def dst_addr(self):
try:
return socket.inet_ntop(self._af, self.raw[self._dst_addr].tobytes())
except (ValueError, socket.error):
pass | The packet destination address. | train | https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/packet/ip.py#L43-L50 | null | class IPHeader(Header):
_src_addr = slice(0, 0)
_dst_addr = slice(0, 0)
_af = None
@property
def src_addr(self):
"""
The packet source address.
"""
try:
return socket.inet_ntop(self._af, self.raw[self._src_addr].tobytes())
except (ValueError, socket.error):
pass
@src_addr.setter
def src_addr(self, val):
self.raw[self._src_addr] = socket.inet_pton(self._af, val)
@property
@dst_addr.setter
def dst_addr(self, val):
self.raw[self._dst_addr] = socket.inet_pton(self._af, val)
@property
def packet_len(self):
"""
The total packet length, including *all* headers, as reported by the IP header.
"""
raise NotImplementedError() # pragma: no cover
@packet_len.setter
def packet_len(self, val):
raise NotImplementedError() # pragma: no cover
|
ffalcinelli/pydivert | pydivert/packet/__init__.py | Packet.address_family | python | def address_family(self):
if len(self.raw) >= 20:
v = i(self.raw[0]) >> 4
if v == 4:
return socket.AF_INET
if v == 6:
return socket.AF_INET6 | The packet address family:
- socket.AF_INET, if IPv4
- socket.AF_INET6, if IPv6
- None, otherwise. | train | https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/packet/__init__.py#L88-L100 | [
"indexbyte = lambda x: x\n"
] | class Packet(object):
"""
A single packet, possibly including an IP header, a TCP/UDP header and a payload.
Creation of packets is cheap, parsing is done on first attribute access.
"""
def __init__(self, raw, interface, direction):
if isinstance(raw, bytes):
raw = memoryview(bytearray(raw))
self.raw = raw # type: memoryview
self.interface = interface
self.direction = direction
def __repr__(self):
def dump(x):
if isinstance(x, Header) or isinstance(x, Packet):
d = {}
for k in dir(x):
v = getattr(x, k)
if k.startswith("_") or callable(v):
continue
if k in {"address_family", "protocol", "ip", "icmp"}:
continue
if k == "payload" and v and len(v) > 20:
v = v[:20] + b"..."
d[k] = dump(v)
if isinstance(x, Packet):
return pprint.pformat(d)
return d
return x
return "Packet({})".format(dump(self))
@property
def is_outbound(self):
"""
Indicates if the packet is outbound.
Convenience method for ``.direction``.
"""
return self.direction == Direction.OUTBOUND
@property
def is_inbound(self):
"""
Indicates if the packet is inbound.
Convenience method for ``.direction``.
"""
return self.direction == Direction.INBOUND
@property
def is_loopback(self):
"""
- True, if the packet is on the loopback interface.
- False, otherwise.
"""
return self.interface[0] == 1
@cached_property
@cached_property
def protocol(self):
"""
- | A (ipproto, proto_start) tuple.
| ``ipproto`` is the IP protocol in use, e.g. Protocol.TCP or Protocol.UDP.
| ``proto_start`` denotes the beginning of the protocol data.
| If the packet does not match our expectations, both ipproto and proto_start are None.
"""
if self.address_family == socket.AF_INET:
proto = i(self.raw[9])
start = (i(self.raw[0]) & 0b1111) * 4
elif self.address_family == socket.AF_INET6:
proto = i(self.raw[6])
# skip over well-known ipv6 headers
start = 40
while proto in IPV6_EXT_HEADERS:
if start >= len(self.raw):
# less than two bytes left
start = None
proto = None
break
if proto == Protocol.FRAGMENT:
hdrlen = 8
elif proto == Protocol.AH:
hdrlen = (i(self.raw[start + 1]) + 2) * 4
else:
# Protocol.HOPOPT, Protocol.DSTOPTS, Protocol.ROUTING
hdrlen = (i(self.raw[start + 1]) + 1) * 8
proto = i(self.raw[start])
start += hdrlen
else:
start = None
proto = None
out_of_bounds = (
(proto == Protocol.TCP and start + 20 > len(self.raw)) or
(proto == Protocol.UDP and start + 8 > len(self.raw)) or
(proto in {Protocol.ICMP, Protocol.ICMPV6} and start + 4 > len(self.raw))
)
if out_of_bounds:
# special-case tcp/udp so that we can rely on .protocol for the port properties.
start = None
proto = None
return proto, start
@cached_property
def ipv4(self):
"""
- An IPv4Header instance, if the packet is valid IPv4.
- None, otherwise.
"""
if self.address_family == socket.AF_INET:
return IPv4Header(self)
@cached_property
def ipv6(self):
"""
- An IPv6Header instance, if the packet is valid IPv6.
- None, otherwise.
"""
if self.address_family == socket.AF_INET6:
return IPv6Header(self)
@cached_property
def ip(self):
"""
- An IPHeader instance, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
return self.ipv4 or self.ipv6
@cached_property
def icmpv4(self):
"""
- An ICMPv4Header instance, if the packet is valid ICMPv4.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.ICMP:
return ICMPv4Header(self, proto_start)
@cached_property
def icmpv6(self):
"""
- An ICMPv6Header instance, if the packet is valid ICMPv6.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.ICMPV6:
return ICMPv6Header(self, proto_start)
@cached_property
def icmp(self):
"""
- An ICMPHeader instance, if the packet is valid ICMPv4 or ICMPv6.
- None, otherwise.
"""
return self.icmpv4 or self.icmpv6
@cached_property
def tcp(self):
"""
- An TCPHeader instance, if the packet is valid TCP.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.TCP:
return TCPHeader(self, proto_start)
@cached_property
def udp(self):
"""
- An TCPHeader instance, if the packet is valid UDP.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.UDP:
return UDPHeader(self, proto_start)
@cached_property
def _port(self):
"""header that implements PortMixin"""
return self.tcp or self.udp
@cached_property
def _payload(self):
"""header that implements PayloadMixin"""
return self.tcp or self.udp or self.icmpv4 or self.icmpv6
@property
def src_addr(self):
"""
- The source address, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
if self.ip:
return self.ip.src_addr
@src_addr.setter
def src_addr(self, val):
self.ip.src_addr = val
@property
def dst_addr(self):
"""
- The destination address, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
if self.ip:
return self.ip.dst_addr
@dst_addr.setter
def dst_addr(self, val):
self.ip.dst_addr = val
@property
def src_port(self):
"""
- The source port, if the packet is valid TCP or UDP.
- None, otherwise.
"""
if self._port:
return self._port.src_port
@src_port.setter
def src_port(self, val):
self._port.src_port = val
@property
def dst_port(self):
"""
- The destination port, if the packet is valid TCP or UDP.
- None, otherwise.
"""
if self._port:
return self._port.dst_port
@dst_port.setter
def dst_port(self, val):
self._port.dst_port = val
@property
def payload(self):
"""
- The payload, if the packet is valid TCP, UDP, ICMP or ICMPv6.
- None, otherwise.
"""
if self._payload:
return self._payload.payload
@payload.setter
def payload(self, val):
self._payload.payload = val
def recalculate_checksums(self, flags=0):
"""
(Re)calculates the checksum for any IPv4/ICMP/ICMPv6/TCP/UDP checksum present in the given packet.
Individual checksum calculations may be disabled via the appropriate flag.
Typically this function should be invoked on a modified packet before it is injected with WinDivert.send().
Returns the number of checksums calculated.
See: https://reqrypt.org/windivert-doc.html#divert_helper_calc_checksums
"""
buff, buff_ = self.__to_buffers()
num = windivert_dll.WinDivertHelperCalcChecksums(ctypes.byref(buff_), len(self.raw), flags)
if PY2:
self.raw = memoryview(buff)[:len(self.raw)]
return num
def __to_buffers(self):
buff = bytearray(self.raw.tobytes()) if PY2 else self.raw.obj
return buff, (ctypes.c_char * len(self.raw)).from_buffer(buff)
@property
def wd_addr(self):
"""
Gets the interface and direction as a `WINDIVERT_ADDRESS` structure.
:return: The `WINDIVERT_ADDRESS` structure.
"""
address = windivert_dll.WinDivertAddress()
address.IfIdx, address.SubIfIdx = self.interface
address.Direction = self.direction
return address
def matches(self, filter, layer=Layer.NETWORK):
"""
Evaluates the packet against the given packet filter string.
The remapped function is::
BOOL WinDivertHelperEvalFilter(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__in PVOID pPacket,
__in UINT packetLen,
__in PWINDIVERT_ADDRESS pAddr
);
See: https://reqrypt.org/windivert-doc.html#divert_helper_eval_filter
:param filter: The filter string.
:param layer: The network layer.
:return: True if the packet matches, and False otherwise.
"""
buff, buff_ = self.__to_buffers()
return windivert_dll.WinDivertHelperEvalFilter(filter.encode(), layer, ctypes.byref(buff_), len(self.raw),
ctypes.byref(self.wd_addr))
|
ffalcinelli/pydivert | pydivert/packet/__init__.py | Packet.protocol | python | def protocol(self):
if self.address_family == socket.AF_INET:
proto = i(self.raw[9])
start = (i(self.raw[0]) & 0b1111) * 4
elif self.address_family == socket.AF_INET6:
proto = i(self.raw[6])
# skip over well-known ipv6 headers
start = 40
while proto in IPV6_EXT_HEADERS:
if start >= len(self.raw):
# less than two bytes left
start = None
proto = None
break
if proto == Protocol.FRAGMENT:
hdrlen = 8
elif proto == Protocol.AH:
hdrlen = (i(self.raw[start + 1]) + 2) * 4
else:
# Protocol.HOPOPT, Protocol.DSTOPTS, Protocol.ROUTING
hdrlen = (i(self.raw[start + 1]) + 1) * 8
proto = i(self.raw[start])
start += hdrlen
else:
start = None
proto = None
out_of_bounds = (
(proto == Protocol.TCP and start + 20 > len(self.raw)) or
(proto == Protocol.UDP and start + 8 > len(self.raw)) or
(proto in {Protocol.ICMP, Protocol.ICMPV6} and start + 4 > len(self.raw))
)
if out_of_bounds:
# special-case tcp/udp so that we can rely on .protocol for the port properties.
start = None
proto = None
return proto, start | - | A (ipproto, proto_start) tuple.
| ``ipproto`` is the IP protocol in use, e.g. Protocol.TCP or Protocol.UDP.
| ``proto_start`` denotes the beginning of the protocol data.
| If the packet does not match our expectations, both ipproto and proto_start are None. | train | https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/packet/__init__.py#L103-L147 | [
"indexbyte = lambda x: x\n"
] | class Packet(object):
"""
A single packet, possibly including an IP header, a TCP/UDP header and a payload.
Creation of packets is cheap, parsing is done on first attribute access.
"""
def __init__(self, raw, interface, direction):
if isinstance(raw, bytes):
raw = memoryview(bytearray(raw))
self.raw = raw # type: memoryview
self.interface = interface
self.direction = direction
def __repr__(self):
def dump(x):
if isinstance(x, Header) or isinstance(x, Packet):
d = {}
for k in dir(x):
v = getattr(x, k)
if k.startswith("_") or callable(v):
continue
if k in {"address_family", "protocol", "ip", "icmp"}:
continue
if k == "payload" and v and len(v) > 20:
v = v[:20] + b"..."
d[k] = dump(v)
if isinstance(x, Packet):
return pprint.pformat(d)
return d
return x
return "Packet({})".format(dump(self))
@property
def is_outbound(self):
"""
Indicates if the packet is outbound.
Convenience method for ``.direction``.
"""
return self.direction == Direction.OUTBOUND
@property
def is_inbound(self):
"""
Indicates if the packet is inbound.
Convenience method for ``.direction``.
"""
return self.direction == Direction.INBOUND
@property
def is_loopback(self):
"""
- True, if the packet is on the loopback interface.
- False, otherwise.
"""
return self.interface[0] == 1
@cached_property
def address_family(self):
"""
The packet address family:
- socket.AF_INET, if IPv4
- socket.AF_INET6, if IPv6
- None, otherwise.
"""
if len(self.raw) >= 20:
v = i(self.raw[0]) >> 4
if v == 4:
return socket.AF_INET
if v == 6:
return socket.AF_INET6
@cached_property
@cached_property
def ipv4(self):
"""
- An IPv4Header instance, if the packet is valid IPv4.
- None, otherwise.
"""
if self.address_family == socket.AF_INET:
return IPv4Header(self)
@cached_property
def ipv6(self):
"""
- An IPv6Header instance, if the packet is valid IPv6.
- None, otherwise.
"""
if self.address_family == socket.AF_INET6:
return IPv6Header(self)
@cached_property
def ip(self):
"""
- An IPHeader instance, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
return self.ipv4 or self.ipv6
@cached_property
def icmpv4(self):
"""
- An ICMPv4Header instance, if the packet is valid ICMPv4.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.ICMP:
return ICMPv4Header(self, proto_start)
@cached_property
def icmpv6(self):
"""
- An ICMPv6Header instance, if the packet is valid ICMPv6.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.ICMPV6:
return ICMPv6Header(self, proto_start)
@cached_property
def icmp(self):
"""
- An ICMPHeader instance, if the packet is valid ICMPv4 or ICMPv6.
- None, otherwise.
"""
return self.icmpv4 or self.icmpv6
@cached_property
def tcp(self):
"""
- An TCPHeader instance, if the packet is valid TCP.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.TCP:
return TCPHeader(self, proto_start)
@cached_property
def udp(self):
"""
- An TCPHeader instance, if the packet is valid UDP.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.UDP:
return UDPHeader(self, proto_start)
@cached_property
def _port(self):
"""header that implements PortMixin"""
return self.tcp or self.udp
@cached_property
def _payload(self):
"""header that implements PayloadMixin"""
return self.tcp or self.udp or self.icmpv4 or self.icmpv6
@property
def src_addr(self):
"""
- The source address, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
if self.ip:
return self.ip.src_addr
@src_addr.setter
def src_addr(self, val):
self.ip.src_addr = val
@property
def dst_addr(self):
"""
- The destination address, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
if self.ip:
return self.ip.dst_addr
@dst_addr.setter
def dst_addr(self, val):
self.ip.dst_addr = val
@property
def src_port(self):
"""
- The source port, if the packet is valid TCP or UDP.
- None, otherwise.
"""
if self._port:
return self._port.src_port
@src_port.setter
def src_port(self, val):
self._port.src_port = val
@property
def dst_port(self):
"""
- The destination port, if the packet is valid TCP or UDP.
- None, otherwise.
"""
if self._port:
return self._port.dst_port
@dst_port.setter
def dst_port(self, val):
self._port.dst_port = val
@property
def payload(self):
"""
- The payload, if the packet is valid TCP, UDP, ICMP or ICMPv6.
- None, otherwise.
"""
if self._payload:
return self._payload.payload
@payload.setter
def payload(self, val):
self._payload.payload = val
def recalculate_checksums(self, flags=0):
"""
(Re)calculates the checksum for any IPv4/ICMP/ICMPv6/TCP/UDP checksum present in the given packet.
Individual checksum calculations may be disabled via the appropriate flag.
Typically this function should be invoked on a modified packet before it is injected with WinDivert.send().
Returns the number of checksums calculated.
See: https://reqrypt.org/windivert-doc.html#divert_helper_calc_checksums
"""
buff, buff_ = self.__to_buffers()
num = windivert_dll.WinDivertHelperCalcChecksums(ctypes.byref(buff_), len(self.raw), flags)
if PY2:
self.raw = memoryview(buff)[:len(self.raw)]
return num
def __to_buffers(self):
buff = bytearray(self.raw.tobytes()) if PY2 else self.raw.obj
return buff, (ctypes.c_char * len(self.raw)).from_buffer(buff)
@property
def wd_addr(self):
"""
Gets the interface and direction as a `WINDIVERT_ADDRESS` structure.
:return: The `WINDIVERT_ADDRESS` structure.
"""
address = windivert_dll.WinDivertAddress()
address.IfIdx, address.SubIfIdx = self.interface
address.Direction = self.direction
return address
def matches(self, filter, layer=Layer.NETWORK):
"""
Evaluates the packet against the given packet filter string.
The remapped function is::
BOOL WinDivertHelperEvalFilter(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__in PVOID pPacket,
__in UINT packetLen,
__in PWINDIVERT_ADDRESS pAddr
);
See: https://reqrypt.org/windivert-doc.html#divert_helper_eval_filter
:param filter: The filter string.
:param layer: The network layer.
:return: True if the packet matches, and False otherwise.
"""
buff, buff_ = self.__to_buffers()
return windivert_dll.WinDivertHelperEvalFilter(filter.encode(), layer, ctypes.byref(buff_), len(self.raw),
ctypes.byref(self.wd_addr))
|
ffalcinelli/pydivert | pydivert/packet/__init__.py | Packet.icmpv4 | python | def icmpv4(self):
ipproto, proto_start = self.protocol
if ipproto == Protocol.ICMP:
return ICMPv4Header(self, proto_start) | - An ICMPv4Header instance, if the packet is valid ICMPv4.
- None, otherwise. | train | https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/packet/__init__.py#L176-L183 | null | class Packet(object):
"""
A single packet, possibly including an IP header, a TCP/UDP header and a payload.
Creation of packets is cheap, parsing is done on first attribute access.
"""
def __init__(self, raw, interface, direction):
if isinstance(raw, bytes):
raw = memoryview(bytearray(raw))
self.raw = raw # type: memoryview
self.interface = interface
self.direction = direction
def __repr__(self):
def dump(x):
if isinstance(x, Header) or isinstance(x, Packet):
d = {}
for k in dir(x):
v = getattr(x, k)
if k.startswith("_") or callable(v):
continue
if k in {"address_family", "protocol", "ip", "icmp"}:
continue
if k == "payload" and v and len(v) > 20:
v = v[:20] + b"..."
d[k] = dump(v)
if isinstance(x, Packet):
return pprint.pformat(d)
return d
return x
return "Packet({})".format(dump(self))
@property
def is_outbound(self):
"""
Indicates if the packet is outbound.
Convenience method for ``.direction``.
"""
return self.direction == Direction.OUTBOUND
@property
def is_inbound(self):
"""
Indicates if the packet is inbound.
Convenience method for ``.direction``.
"""
return self.direction == Direction.INBOUND
@property
def is_loopback(self):
"""
- True, if the packet is on the loopback interface.
- False, otherwise.
"""
return self.interface[0] == 1
@cached_property
def address_family(self):
"""
The packet address family:
- socket.AF_INET, if IPv4
- socket.AF_INET6, if IPv6
- None, otherwise.
"""
if len(self.raw) >= 20:
v = i(self.raw[0]) >> 4
if v == 4:
return socket.AF_INET
if v == 6:
return socket.AF_INET6
@cached_property
def protocol(self):
"""
- | A (ipproto, proto_start) tuple.
| ``ipproto`` is the IP protocol in use, e.g. Protocol.TCP or Protocol.UDP.
| ``proto_start`` denotes the beginning of the protocol data.
| If the packet does not match our expectations, both ipproto and proto_start are None.
"""
if self.address_family == socket.AF_INET:
proto = i(self.raw[9])
start = (i(self.raw[0]) & 0b1111) * 4
elif self.address_family == socket.AF_INET6:
proto = i(self.raw[6])
# skip over well-known ipv6 headers
start = 40
while proto in IPV6_EXT_HEADERS:
if start >= len(self.raw):
# less than two bytes left
start = None
proto = None
break
if proto == Protocol.FRAGMENT:
hdrlen = 8
elif proto == Protocol.AH:
hdrlen = (i(self.raw[start + 1]) + 2) * 4
else:
# Protocol.HOPOPT, Protocol.DSTOPTS, Protocol.ROUTING
hdrlen = (i(self.raw[start + 1]) + 1) * 8
proto = i(self.raw[start])
start += hdrlen
else:
start = None
proto = None
out_of_bounds = (
(proto == Protocol.TCP and start + 20 > len(self.raw)) or
(proto == Protocol.UDP and start + 8 > len(self.raw)) or
(proto in {Protocol.ICMP, Protocol.ICMPV6} and start + 4 > len(self.raw))
)
if out_of_bounds:
# special-case tcp/udp so that we can rely on .protocol for the port properties.
start = None
proto = None
return proto, start
@cached_property
def ipv4(self):
"""
- An IPv4Header instance, if the packet is valid IPv4.
- None, otherwise.
"""
if self.address_family == socket.AF_INET:
return IPv4Header(self)
@cached_property
def ipv6(self):
"""
- An IPv6Header instance, if the packet is valid IPv6.
- None, otherwise.
"""
if self.address_family == socket.AF_INET6:
return IPv6Header(self)
@cached_property
def ip(self):
"""
- An IPHeader instance, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
return self.ipv4 or self.ipv6
@cached_property
@cached_property
def icmpv6(self):
"""
- An ICMPv6Header instance, if the packet is valid ICMPv6.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.ICMPV6:
return ICMPv6Header(self, proto_start)
@cached_property
def icmp(self):
"""
- An ICMPHeader instance, if the packet is valid ICMPv4 or ICMPv6.
- None, otherwise.
"""
return self.icmpv4 or self.icmpv6
@cached_property
def tcp(self):
"""
- An TCPHeader instance, if the packet is valid TCP.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.TCP:
return TCPHeader(self, proto_start)
@cached_property
def udp(self):
"""
- An TCPHeader instance, if the packet is valid UDP.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.UDP:
return UDPHeader(self, proto_start)
@cached_property
def _port(self):
"""header that implements PortMixin"""
return self.tcp or self.udp
@cached_property
def _payload(self):
"""header that implements PayloadMixin"""
return self.tcp or self.udp or self.icmpv4 or self.icmpv6
@property
def src_addr(self):
"""
- The source address, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
if self.ip:
return self.ip.src_addr
@src_addr.setter
def src_addr(self, val):
self.ip.src_addr = val
@property
def dst_addr(self):
"""
- The destination address, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
if self.ip:
return self.ip.dst_addr
@dst_addr.setter
def dst_addr(self, val):
self.ip.dst_addr = val
@property
def src_port(self):
"""
- The source port, if the packet is valid TCP or UDP.
- None, otherwise.
"""
if self._port:
return self._port.src_port
@src_port.setter
def src_port(self, val):
self._port.src_port = val
@property
def dst_port(self):
"""
- The destination port, if the packet is valid TCP or UDP.
- None, otherwise.
"""
if self._port:
return self._port.dst_port
@dst_port.setter
def dst_port(self, val):
self._port.dst_port = val
@property
def payload(self):
"""
- The payload, if the packet is valid TCP, UDP, ICMP or ICMPv6.
- None, otherwise.
"""
if self._payload:
return self._payload.payload
@payload.setter
def payload(self, val):
self._payload.payload = val
def recalculate_checksums(self, flags=0):
"""
(Re)calculates the checksum for any IPv4/ICMP/ICMPv6/TCP/UDP checksum present in the given packet.
Individual checksum calculations may be disabled via the appropriate flag.
Typically this function should be invoked on a modified packet before it is injected with WinDivert.send().
Returns the number of checksums calculated.
See: https://reqrypt.org/windivert-doc.html#divert_helper_calc_checksums
"""
buff, buff_ = self.__to_buffers()
num = windivert_dll.WinDivertHelperCalcChecksums(ctypes.byref(buff_), len(self.raw), flags)
if PY2:
self.raw = memoryview(buff)[:len(self.raw)]
return num
def __to_buffers(self):
buff = bytearray(self.raw.tobytes()) if PY2 else self.raw.obj
return buff, (ctypes.c_char * len(self.raw)).from_buffer(buff)
@property
def wd_addr(self):
"""
Gets the interface and direction as a `WINDIVERT_ADDRESS` structure.
:return: The `WINDIVERT_ADDRESS` structure.
"""
address = windivert_dll.WinDivertAddress()
address.IfIdx, address.SubIfIdx = self.interface
address.Direction = self.direction
return address
def matches(self, filter, layer=Layer.NETWORK):
"""
Evaluates the packet against the given packet filter string.
The remapped function is::
BOOL WinDivertHelperEvalFilter(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__in PVOID pPacket,
__in UINT packetLen,
__in PWINDIVERT_ADDRESS pAddr
);
See: https://reqrypt.org/windivert-doc.html#divert_helper_eval_filter
:param filter: The filter string.
:param layer: The network layer.
:return: True if the packet matches, and False otherwise.
"""
buff, buff_ = self.__to_buffers()
return windivert_dll.WinDivertHelperEvalFilter(filter.encode(), layer, ctypes.byref(buff_), len(self.raw),
ctypes.byref(self.wd_addr))
|
ffalcinelli/pydivert | pydivert/packet/__init__.py | Packet.icmpv6 | python | def icmpv6(self):
ipproto, proto_start = self.protocol
if ipproto == Protocol.ICMPV6:
return ICMPv6Header(self, proto_start) | - An ICMPv6Header instance, if the packet is valid ICMPv6.
- None, otherwise. | train | https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/packet/__init__.py#L186-L193 | null | class Packet(object):
"""
A single packet, possibly including an IP header, a TCP/UDP header and a payload.
Creation of packets is cheap, parsing is done on first attribute access.
"""
def __init__(self, raw, interface, direction):
if isinstance(raw, bytes):
raw = memoryview(bytearray(raw))
self.raw = raw # type: memoryview
self.interface = interface
self.direction = direction
def __repr__(self):
def dump(x):
if isinstance(x, Header) or isinstance(x, Packet):
d = {}
for k in dir(x):
v = getattr(x, k)
if k.startswith("_") or callable(v):
continue
if k in {"address_family", "protocol", "ip", "icmp"}:
continue
if k == "payload" and v and len(v) > 20:
v = v[:20] + b"..."
d[k] = dump(v)
if isinstance(x, Packet):
return pprint.pformat(d)
return d
return x
return "Packet({})".format(dump(self))
@property
def is_outbound(self):
"""
Indicates if the packet is outbound.
Convenience method for ``.direction``.
"""
return self.direction == Direction.OUTBOUND
@property
def is_inbound(self):
"""
Indicates if the packet is inbound.
Convenience method for ``.direction``.
"""
return self.direction == Direction.INBOUND
@property
def is_loopback(self):
"""
- True, if the packet is on the loopback interface.
- False, otherwise.
"""
return self.interface[0] == 1
@cached_property
def address_family(self):
"""
The packet address family:
- socket.AF_INET, if IPv4
- socket.AF_INET6, if IPv6
- None, otherwise.
"""
if len(self.raw) >= 20:
v = i(self.raw[0]) >> 4
if v == 4:
return socket.AF_INET
if v == 6:
return socket.AF_INET6
@cached_property
def protocol(self):
"""
- | A (ipproto, proto_start) tuple.
| ``ipproto`` is the IP protocol in use, e.g. Protocol.TCP or Protocol.UDP.
| ``proto_start`` denotes the beginning of the protocol data.
| If the packet does not match our expectations, both ipproto and proto_start are None.
"""
if self.address_family == socket.AF_INET:
proto = i(self.raw[9])
start = (i(self.raw[0]) & 0b1111) * 4
elif self.address_family == socket.AF_INET6:
proto = i(self.raw[6])
# skip over well-known ipv6 headers
start = 40
while proto in IPV6_EXT_HEADERS:
if start >= len(self.raw):
# less than two bytes left
start = None
proto = None
break
if proto == Protocol.FRAGMENT:
hdrlen = 8
elif proto == Protocol.AH:
hdrlen = (i(self.raw[start + 1]) + 2) * 4
else:
# Protocol.HOPOPT, Protocol.DSTOPTS, Protocol.ROUTING
hdrlen = (i(self.raw[start + 1]) + 1) * 8
proto = i(self.raw[start])
start += hdrlen
else:
start = None
proto = None
out_of_bounds = (
(proto == Protocol.TCP and start + 20 > len(self.raw)) or
(proto == Protocol.UDP and start + 8 > len(self.raw)) or
(proto in {Protocol.ICMP, Protocol.ICMPV6} and start + 4 > len(self.raw))
)
if out_of_bounds:
# special-case tcp/udp so that we can rely on .protocol for the port properties.
start = None
proto = None
return proto, start
@cached_property
def ipv4(self):
"""
- An IPv4Header instance, if the packet is valid IPv4.
- None, otherwise.
"""
if self.address_family == socket.AF_INET:
return IPv4Header(self)
@cached_property
def ipv6(self):
"""
- An IPv6Header instance, if the packet is valid IPv6.
- None, otherwise.
"""
if self.address_family == socket.AF_INET6:
return IPv6Header(self)
@cached_property
def ip(self):
"""
- An IPHeader instance, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
return self.ipv4 or self.ipv6
@cached_property
def icmpv4(self):
"""
- An ICMPv4Header instance, if the packet is valid ICMPv4.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.ICMP:
return ICMPv4Header(self, proto_start)
@cached_property
@cached_property
def icmp(self):
"""
- An ICMPHeader instance, if the packet is valid ICMPv4 or ICMPv6.
- None, otherwise.
"""
return self.icmpv4 or self.icmpv6
@cached_property
def tcp(self):
"""
- An TCPHeader instance, if the packet is valid TCP.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.TCP:
return TCPHeader(self, proto_start)
@cached_property
def udp(self):
"""
- An TCPHeader instance, if the packet is valid UDP.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.UDP:
return UDPHeader(self, proto_start)
@cached_property
def _port(self):
"""header that implements PortMixin"""
return self.tcp or self.udp
@cached_property
def _payload(self):
"""header that implements PayloadMixin"""
return self.tcp or self.udp or self.icmpv4 or self.icmpv6
@property
def src_addr(self):
"""
- The source address, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
if self.ip:
return self.ip.src_addr
@src_addr.setter
def src_addr(self, val):
self.ip.src_addr = val
@property
def dst_addr(self):
"""
- The destination address, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
if self.ip:
return self.ip.dst_addr
@dst_addr.setter
def dst_addr(self, val):
self.ip.dst_addr = val
@property
def src_port(self):
"""
- The source port, if the packet is valid TCP or UDP.
- None, otherwise.
"""
if self._port:
return self._port.src_port
@src_port.setter
def src_port(self, val):
self._port.src_port = val
@property
def dst_port(self):
"""
- The destination port, if the packet is valid TCP or UDP.
- None, otherwise.
"""
if self._port:
return self._port.dst_port
@dst_port.setter
def dst_port(self, val):
self._port.dst_port = val
@property
def payload(self):
"""
- The payload, if the packet is valid TCP, UDP, ICMP or ICMPv6.
- None, otherwise.
"""
if self._payload:
return self._payload.payload
@payload.setter
def payload(self, val):
self._payload.payload = val
def recalculate_checksums(self, flags=0):
"""
(Re)calculates the checksum for any IPv4/ICMP/ICMPv6/TCP/UDP checksum present in the given packet.
Individual checksum calculations may be disabled via the appropriate flag.
Typically this function should be invoked on a modified packet before it is injected with WinDivert.send().
Returns the number of checksums calculated.
See: https://reqrypt.org/windivert-doc.html#divert_helper_calc_checksums
"""
buff, buff_ = self.__to_buffers()
num = windivert_dll.WinDivertHelperCalcChecksums(ctypes.byref(buff_), len(self.raw), flags)
if PY2:
self.raw = memoryview(buff)[:len(self.raw)]
return num
def __to_buffers(self):
buff = bytearray(self.raw.tobytes()) if PY2 else self.raw.obj
return buff, (ctypes.c_char * len(self.raw)).from_buffer(buff)
@property
def wd_addr(self):
"""
Gets the interface and direction as a `WINDIVERT_ADDRESS` structure.
:return: The `WINDIVERT_ADDRESS` structure.
"""
address = windivert_dll.WinDivertAddress()
address.IfIdx, address.SubIfIdx = self.interface
address.Direction = self.direction
return address
def matches(self, filter, layer=Layer.NETWORK):
"""
Evaluates the packet against the given packet filter string.
The remapped function is::
BOOL WinDivertHelperEvalFilter(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__in PVOID pPacket,
__in UINT packetLen,
__in PWINDIVERT_ADDRESS pAddr
);
See: https://reqrypt.org/windivert-doc.html#divert_helper_eval_filter
:param filter: The filter string.
:param layer: The network layer.
:return: True if the packet matches, and False otherwise.
"""
buff, buff_ = self.__to_buffers()
return windivert_dll.WinDivertHelperEvalFilter(filter.encode(), layer, ctypes.byref(buff_), len(self.raw),
ctypes.byref(self.wd_addr))
|
ffalcinelli/pydivert | pydivert/packet/__init__.py | Packet.tcp | python | def tcp(self):
ipproto, proto_start = self.protocol
if ipproto == Protocol.TCP:
return TCPHeader(self, proto_start) | - An TCPHeader instance, if the packet is valid TCP.
- None, otherwise. | train | https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/packet/__init__.py#L204-L211 | null | class Packet(object):
"""
A single packet, possibly including an IP header, a TCP/UDP header and a payload.
Creation of packets is cheap, parsing is done on first attribute access.
"""
def __init__(self, raw, interface, direction):
if isinstance(raw, bytes):
raw = memoryview(bytearray(raw))
self.raw = raw # type: memoryview
self.interface = interface
self.direction = direction
def __repr__(self):
def dump(x):
if isinstance(x, Header) or isinstance(x, Packet):
d = {}
for k in dir(x):
v = getattr(x, k)
if k.startswith("_") or callable(v):
continue
if k in {"address_family", "protocol", "ip", "icmp"}:
continue
if k == "payload" and v and len(v) > 20:
v = v[:20] + b"..."
d[k] = dump(v)
if isinstance(x, Packet):
return pprint.pformat(d)
return d
return x
return "Packet({})".format(dump(self))
@property
def is_outbound(self):
"""
Indicates if the packet is outbound.
Convenience method for ``.direction``.
"""
return self.direction == Direction.OUTBOUND
@property
def is_inbound(self):
"""
Indicates if the packet is inbound.
Convenience method for ``.direction``.
"""
return self.direction == Direction.INBOUND
@property
def is_loopback(self):
"""
- True, if the packet is on the loopback interface.
- False, otherwise.
"""
return self.interface[0] == 1
@cached_property
def address_family(self):
"""
The packet address family:
- socket.AF_INET, if IPv4
- socket.AF_INET6, if IPv6
- None, otherwise.
"""
if len(self.raw) >= 20:
v = i(self.raw[0]) >> 4
if v == 4:
return socket.AF_INET
if v == 6:
return socket.AF_INET6
@cached_property
def protocol(self):
"""
- | A (ipproto, proto_start) tuple.
| ``ipproto`` is the IP protocol in use, e.g. Protocol.TCP or Protocol.UDP.
| ``proto_start`` denotes the beginning of the protocol data.
| If the packet does not match our expectations, both ipproto and proto_start are None.
"""
if self.address_family == socket.AF_INET:
proto = i(self.raw[9])
start = (i(self.raw[0]) & 0b1111) * 4
elif self.address_family == socket.AF_INET6:
proto = i(self.raw[6])
# skip over well-known ipv6 headers
start = 40
while proto in IPV6_EXT_HEADERS:
if start >= len(self.raw):
# less than two bytes left
start = None
proto = None
break
if proto == Protocol.FRAGMENT:
hdrlen = 8
elif proto == Protocol.AH:
hdrlen = (i(self.raw[start + 1]) + 2) * 4
else:
# Protocol.HOPOPT, Protocol.DSTOPTS, Protocol.ROUTING
hdrlen = (i(self.raw[start + 1]) + 1) * 8
proto = i(self.raw[start])
start += hdrlen
else:
start = None
proto = None
out_of_bounds = (
(proto == Protocol.TCP and start + 20 > len(self.raw)) or
(proto == Protocol.UDP and start + 8 > len(self.raw)) or
(proto in {Protocol.ICMP, Protocol.ICMPV6} and start + 4 > len(self.raw))
)
if out_of_bounds:
# special-case tcp/udp so that we can rely on .protocol for the port properties.
start = None
proto = None
return proto, start
@cached_property
def ipv4(self):
"""
- An IPv4Header instance, if the packet is valid IPv4.
- None, otherwise.
"""
if self.address_family == socket.AF_INET:
return IPv4Header(self)
@cached_property
def ipv6(self):
"""
- An IPv6Header instance, if the packet is valid IPv6.
- None, otherwise.
"""
if self.address_family == socket.AF_INET6:
return IPv6Header(self)
@cached_property
def ip(self):
"""
- An IPHeader instance, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
return self.ipv4 or self.ipv6
@cached_property
def icmpv4(self):
"""
- An ICMPv4Header instance, if the packet is valid ICMPv4.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.ICMP:
return ICMPv4Header(self, proto_start)
@cached_property
def icmpv6(self):
"""
- An ICMPv6Header instance, if the packet is valid ICMPv6.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.ICMPV6:
return ICMPv6Header(self, proto_start)
@cached_property
def icmp(self):
"""
- An ICMPHeader instance, if the packet is valid ICMPv4 or ICMPv6.
- None, otherwise.
"""
return self.icmpv4 or self.icmpv6
@cached_property
@cached_property
def udp(self):
"""
- An TCPHeader instance, if the packet is valid UDP.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.UDP:
return UDPHeader(self, proto_start)
@cached_property
def _port(self):
"""header that implements PortMixin"""
return self.tcp or self.udp
@cached_property
def _payload(self):
"""header that implements PayloadMixin"""
return self.tcp or self.udp or self.icmpv4 or self.icmpv6
@property
def src_addr(self):
"""
- The source address, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
if self.ip:
return self.ip.src_addr
@src_addr.setter
def src_addr(self, val):
self.ip.src_addr = val
@property
def dst_addr(self):
"""
- The destination address, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
if self.ip:
return self.ip.dst_addr
@dst_addr.setter
def dst_addr(self, val):
self.ip.dst_addr = val
@property
def src_port(self):
"""
- The source port, if the packet is valid TCP or UDP.
- None, otherwise.
"""
if self._port:
return self._port.src_port
@src_port.setter
def src_port(self, val):
self._port.src_port = val
@property
def dst_port(self):
"""
- The destination port, if the packet is valid TCP or UDP.
- None, otherwise.
"""
if self._port:
return self._port.dst_port
@dst_port.setter
def dst_port(self, val):
self._port.dst_port = val
@property
def payload(self):
"""
- The payload, if the packet is valid TCP, UDP, ICMP or ICMPv6.
- None, otherwise.
"""
if self._payload:
return self._payload.payload
@payload.setter
def payload(self, val):
self._payload.payload = val
def recalculate_checksums(self, flags=0):
"""
(Re)calculates the checksum for any IPv4/ICMP/ICMPv6/TCP/UDP checksum present in the given packet.
Individual checksum calculations may be disabled via the appropriate flag.
Typically this function should be invoked on a modified packet before it is injected with WinDivert.send().
Returns the number of checksums calculated.
See: https://reqrypt.org/windivert-doc.html#divert_helper_calc_checksums
"""
buff, buff_ = self.__to_buffers()
num = windivert_dll.WinDivertHelperCalcChecksums(ctypes.byref(buff_), len(self.raw), flags)
if PY2:
self.raw = memoryview(buff)[:len(self.raw)]
return num
def __to_buffers(self):
buff = bytearray(self.raw.tobytes()) if PY2 else self.raw.obj
return buff, (ctypes.c_char * len(self.raw)).from_buffer(buff)
@property
def wd_addr(self):
"""
Gets the interface and direction as a `WINDIVERT_ADDRESS` structure.
:return: The `WINDIVERT_ADDRESS` structure.
"""
address = windivert_dll.WinDivertAddress()
address.IfIdx, address.SubIfIdx = self.interface
address.Direction = self.direction
return address
def matches(self, filter, layer=Layer.NETWORK):
"""
Evaluates the packet against the given packet filter string.
The remapped function is::
BOOL WinDivertHelperEvalFilter(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__in PVOID pPacket,
__in UINT packetLen,
__in PWINDIVERT_ADDRESS pAddr
);
See: https://reqrypt.org/windivert-doc.html#divert_helper_eval_filter
:param filter: The filter string.
:param layer: The network layer.
:return: True if the packet matches, and False otherwise.
"""
buff, buff_ = self.__to_buffers()
return windivert_dll.WinDivertHelperEvalFilter(filter.encode(), layer, ctypes.byref(buff_), len(self.raw),
ctypes.byref(self.wd_addr))
|
ffalcinelli/pydivert | pydivert/packet/__init__.py | Packet.udp | python | def udp(self):
ipproto, proto_start = self.protocol
if ipproto == Protocol.UDP:
return UDPHeader(self, proto_start) | - An TCPHeader instance, if the packet is valid UDP.
- None, otherwise. | train | https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/packet/__init__.py#L214-L221 | null | class Packet(object):
"""
A single packet, possibly including an IP header, a TCP/UDP header and a payload.
Creation of packets is cheap, parsing is done on first attribute access.
"""
def __init__(self, raw, interface, direction):
if isinstance(raw, bytes):
raw = memoryview(bytearray(raw))
self.raw = raw # type: memoryview
self.interface = interface
self.direction = direction
def __repr__(self):
def dump(x):
if isinstance(x, Header) or isinstance(x, Packet):
d = {}
for k in dir(x):
v = getattr(x, k)
if k.startswith("_") or callable(v):
continue
if k in {"address_family", "protocol", "ip", "icmp"}:
continue
if k == "payload" and v and len(v) > 20:
v = v[:20] + b"..."
d[k] = dump(v)
if isinstance(x, Packet):
return pprint.pformat(d)
return d
return x
return "Packet({})".format(dump(self))
@property
def is_outbound(self):
"""
Indicates if the packet is outbound.
Convenience method for ``.direction``.
"""
return self.direction == Direction.OUTBOUND
@property
def is_inbound(self):
"""
Indicates if the packet is inbound.
Convenience method for ``.direction``.
"""
return self.direction == Direction.INBOUND
@property
def is_loopback(self):
"""
- True, if the packet is on the loopback interface.
- False, otherwise.
"""
return self.interface[0] == 1
@cached_property
def address_family(self):
"""
The packet address family:
- socket.AF_INET, if IPv4
- socket.AF_INET6, if IPv6
- None, otherwise.
"""
if len(self.raw) >= 20:
v = i(self.raw[0]) >> 4
if v == 4:
return socket.AF_INET
if v == 6:
return socket.AF_INET6
@cached_property
def protocol(self):
"""
- | A (ipproto, proto_start) tuple.
| ``ipproto`` is the IP protocol in use, e.g. Protocol.TCP or Protocol.UDP.
| ``proto_start`` denotes the beginning of the protocol data.
| If the packet does not match our expectations, both ipproto and proto_start are None.
"""
if self.address_family == socket.AF_INET:
proto = i(self.raw[9])
start = (i(self.raw[0]) & 0b1111) * 4
elif self.address_family == socket.AF_INET6:
proto = i(self.raw[6])
# skip over well-known ipv6 headers
start = 40
while proto in IPV6_EXT_HEADERS:
if start >= len(self.raw):
# less than two bytes left
start = None
proto = None
break
if proto == Protocol.FRAGMENT:
hdrlen = 8
elif proto == Protocol.AH:
hdrlen = (i(self.raw[start + 1]) + 2) * 4
else:
# Protocol.HOPOPT, Protocol.DSTOPTS, Protocol.ROUTING
hdrlen = (i(self.raw[start + 1]) + 1) * 8
proto = i(self.raw[start])
start += hdrlen
else:
start = None
proto = None
out_of_bounds = (
(proto == Protocol.TCP and start + 20 > len(self.raw)) or
(proto == Protocol.UDP and start + 8 > len(self.raw)) or
(proto in {Protocol.ICMP, Protocol.ICMPV6} and start + 4 > len(self.raw))
)
if out_of_bounds:
# special-case tcp/udp so that we can rely on .protocol for the port properties.
start = None
proto = None
return proto, start
@cached_property
def ipv4(self):
"""
- An IPv4Header instance, if the packet is valid IPv4.
- None, otherwise.
"""
if self.address_family == socket.AF_INET:
return IPv4Header(self)
@cached_property
def ipv6(self):
"""
- An IPv6Header instance, if the packet is valid IPv6.
- None, otherwise.
"""
if self.address_family == socket.AF_INET6:
return IPv6Header(self)
@cached_property
def ip(self):
"""
- An IPHeader instance, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
return self.ipv4 or self.ipv6
@cached_property
def icmpv4(self):
"""
- An ICMPv4Header instance, if the packet is valid ICMPv4.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.ICMP:
return ICMPv4Header(self, proto_start)
@cached_property
def icmpv6(self):
"""
- An ICMPv6Header instance, if the packet is valid ICMPv6.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.ICMPV6:
return ICMPv6Header(self, proto_start)
@cached_property
def icmp(self):
"""
- An ICMPHeader instance, if the packet is valid ICMPv4 or ICMPv6.
- None, otherwise.
"""
return self.icmpv4 or self.icmpv6
@cached_property
def tcp(self):
"""
- An TCPHeader instance, if the packet is valid TCP.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.TCP:
return TCPHeader(self, proto_start)
@cached_property
@cached_property
def _port(self):
"""header that implements PortMixin"""
return self.tcp or self.udp
@cached_property
def _payload(self):
"""header that implements PayloadMixin"""
return self.tcp or self.udp or self.icmpv4 or self.icmpv6
@property
def src_addr(self):
"""
- The source address, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
if self.ip:
return self.ip.src_addr
@src_addr.setter
def src_addr(self, val):
self.ip.src_addr = val
@property
def dst_addr(self):
"""
- The destination address, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
if self.ip:
return self.ip.dst_addr
@dst_addr.setter
def dst_addr(self, val):
self.ip.dst_addr = val
@property
def src_port(self):
"""
- The source port, if the packet is valid TCP or UDP.
- None, otherwise.
"""
if self._port:
return self._port.src_port
@src_port.setter
def src_port(self, val):
self._port.src_port = val
@property
def dst_port(self):
"""
- The destination port, if the packet is valid TCP or UDP.
- None, otherwise.
"""
if self._port:
return self._port.dst_port
@dst_port.setter
def dst_port(self, val):
self._port.dst_port = val
@property
def payload(self):
"""
- The payload, if the packet is valid TCP, UDP, ICMP or ICMPv6.
- None, otherwise.
"""
if self._payload:
return self._payload.payload
@payload.setter
def payload(self, val):
self._payload.payload = val
def recalculate_checksums(self, flags=0):
"""
(Re)calculates the checksum for any IPv4/ICMP/ICMPv6/TCP/UDP checksum present in the given packet.
Individual checksum calculations may be disabled via the appropriate flag.
Typically this function should be invoked on a modified packet before it is injected with WinDivert.send().
Returns the number of checksums calculated.
See: https://reqrypt.org/windivert-doc.html#divert_helper_calc_checksums
"""
buff, buff_ = self.__to_buffers()
num = windivert_dll.WinDivertHelperCalcChecksums(ctypes.byref(buff_), len(self.raw), flags)
if PY2:
self.raw = memoryview(buff)[:len(self.raw)]
return num
def __to_buffers(self):
buff = bytearray(self.raw.tobytes()) if PY2 else self.raw.obj
return buff, (ctypes.c_char * len(self.raw)).from_buffer(buff)
@property
def wd_addr(self):
"""
Gets the interface and direction as a `WINDIVERT_ADDRESS` structure.
:return: The `WINDIVERT_ADDRESS` structure.
"""
address = windivert_dll.WinDivertAddress()
address.IfIdx, address.SubIfIdx = self.interface
address.Direction = self.direction
return address
def matches(self, filter, layer=Layer.NETWORK):
"""
Evaluates the packet against the given packet filter string.
The remapped function is::
BOOL WinDivertHelperEvalFilter(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__in PVOID pPacket,
__in UINT packetLen,
__in PWINDIVERT_ADDRESS pAddr
);
See: https://reqrypt.org/windivert-doc.html#divert_helper_eval_filter
:param filter: The filter string.
:param layer: The network layer.
:return: True if the packet matches, and False otherwise.
"""
buff, buff_ = self.__to_buffers()
return windivert_dll.WinDivertHelperEvalFilter(filter.encode(), layer, ctypes.byref(buff_), len(self.raw),
ctypes.byref(self.wd_addr))
|
ffalcinelli/pydivert | pydivert/packet/__init__.py | Packet._payload | python | def _payload(self):
return self.tcp or self.udp or self.icmpv4 or self.icmpv6 | header that implements PayloadMixin | train | https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/packet/__init__.py#L229-L231 | null | class Packet(object):
"""
A single packet, possibly including an IP header, a TCP/UDP header and a payload.
Creation of packets is cheap, parsing is done on first attribute access.
"""
def __init__(self, raw, interface, direction):
if isinstance(raw, bytes):
raw = memoryview(bytearray(raw))
self.raw = raw # type: memoryview
self.interface = interface
self.direction = direction
def __repr__(self):
def dump(x):
if isinstance(x, Header) or isinstance(x, Packet):
d = {}
for k in dir(x):
v = getattr(x, k)
if k.startswith("_") or callable(v):
continue
if k in {"address_family", "protocol", "ip", "icmp"}:
continue
if k == "payload" and v and len(v) > 20:
v = v[:20] + b"..."
d[k] = dump(v)
if isinstance(x, Packet):
return pprint.pformat(d)
return d
return x
return "Packet({})".format(dump(self))
@property
def is_outbound(self):
"""
Indicates if the packet is outbound.
Convenience method for ``.direction``.
"""
return self.direction == Direction.OUTBOUND
@property
def is_inbound(self):
"""
Indicates if the packet is inbound.
Convenience method for ``.direction``.
"""
return self.direction == Direction.INBOUND
@property
def is_loopback(self):
"""
- True, if the packet is on the loopback interface.
- False, otherwise.
"""
return self.interface[0] == 1
@cached_property
def address_family(self):
"""
The packet address family:
- socket.AF_INET, if IPv4
- socket.AF_INET6, if IPv6
- None, otherwise.
"""
if len(self.raw) >= 20:
v = i(self.raw[0]) >> 4
if v == 4:
return socket.AF_INET
if v == 6:
return socket.AF_INET6
@cached_property
def protocol(self):
"""
- | A (ipproto, proto_start) tuple.
| ``ipproto`` is the IP protocol in use, e.g. Protocol.TCP or Protocol.UDP.
| ``proto_start`` denotes the beginning of the protocol data.
| If the packet does not match our expectations, both ipproto and proto_start are None.
"""
if self.address_family == socket.AF_INET:
proto = i(self.raw[9])
start = (i(self.raw[0]) & 0b1111) * 4
elif self.address_family == socket.AF_INET6:
proto = i(self.raw[6])
# skip over well-known ipv6 headers
start = 40
while proto in IPV6_EXT_HEADERS:
if start >= len(self.raw):
# less than two bytes left
start = None
proto = None
break
if proto == Protocol.FRAGMENT:
hdrlen = 8
elif proto == Protocol.AH:
hdrlen = (i(self.raw[start + 1]) + 2) * 4
else:
# Protocol.HOPOPT, Protocol.DSTOPTS, Protocol.ROUTING
hdrlen = (i(self.raw[start + 1]) + 1) * 8
proto = i(self.raw[start])
start += hdrlen
else:
start = None
proto = None
out_of_bounds = (
(proto == Protocol.TCP and start + 20 > len(self.raw)) or
(proto == Protocol.UDP and start + 8 > len(self.raw)) or
(proto in {Protocol.ICMP, Protocol.ICMPV6} and start + 4 > len(self.raw))
)
if out_of_bounds:
# special-case tcp/udp so that we can rely on .protocol for the port properties.
start = None
proto = None
return proto, start
@cached_property
def ipv4(self):
"""
- An IPv4Header instance, if the packet is valid IPv4.
- None, otherwise.
"""
if self.address_family == socket.AF_INET:
return IPv4Header(self)
@cached_property
def ipv6(self):
"""
- An IPv6Header instance, if the packet is valid IPv6.
- None, otherwise.
"""
if self.address_family == socket.AF_INET6:
return IPv6Header(self)
@cached_property
def ip(self):
"""
- An IPHeader instance, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
return self.ipv4 or self.ipv6
@cached_property
def icmpv4(self):
"""
- An ICMPv4Header instance, if the packet is valid ICMPv4.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.ICMP:
return ICMPv4Header(self, proto_start)
@cached_property
def icmpv6(self):
"""
- An ICMPv6Header instance, if the packet is valid ICMPv6.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.ICMPV6:
return ICMPv6Header(self, proto_start)
@cached_property
def icmp(self):
"""
- An ICMPHeader instance, if the packet is valid ICMPv4 or ICMPv6.
- None, otherwise.
"""
return self.icmpv4 or self.icmpv6
@cached_property
def tcp(self):
"""
- An TCPHeader instance, if the packet is valid TCP.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.TCP:
return TCPHeader(self, proto_start)
@cached_property
def udp(self):
"""
- An TCPHeader instance, if the packet is valid UDP.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.UDP:
return UDPHeader(self, proto_start)
@cached_property
def _port(self):
"""header that implements PortMixin"""
return self.tcp or self.udp
@cached_property
@property
def src_addr(self):
"""
- The source address, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
if self.ip:
return self.ip.src_addr
@src_addr.setter
def src_addr(self, val):
self.ip.src_addr = val
@property
def dst_addr(self):
"""
- The destination address, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
if self.ip:
return self.ip.dst_addr
@dst_addr.setter
def dst_addr(self, val):
self.ip.dst_addr = val
@property
def src_port(self):
"""
- The source port, if the packet is valid TCP or UDP.
- None, otherwise.
"""
if self._port:
return self._port.src_port
@src_port.setter
def src_port(self, val):
self._port.src_port = val
@property
def dst_port(self):
"""
- The destination port, if the packet is valid TCP or UDP.
- None, otherwise.
"""
if self._port:
return self._port.dst_port
@dst_port.setter
def dst_port(self, val):
self._port.dst_port = val
@property
def payload(self):
"""
- The payload, if the packet is valid TCP, UDP, ICMP or ICMPv6.
- None, otherwise.
"""
if self._payload:
return self._payload.payload
@payload.setter
def payload(self, val):
self._payload.payload = val
def recalculate_checksums(self, flags=0):
"""
(Re)calculates the checksum for any IPv4/ICMP/ICMPv6/TCP/UDP checksum present in the given packet.
Individual checksum calculations may be disabled via the appropriate flag.
Typically this function should be invoked on a modified packet before it is injected with WinDivert.send().
Returns the number of checksums calculated.
See: https://reqrypt.org/windivert-doc.html#divert_helper_calc_checksums
"""
buff, buff_ = self.__to_buffers()
num = windivert_dll.WinDivertHelperCalcChecksums(ctypes.byref(buff_), len(self.raw), flags)
if PY2:
self.raw = memoryview(buff)[:len(self.raw)]
return num
def __to_buffers(self):
buff = bytearray(self.raw.tobytes()) if PY2 else self.raw.obj
return buff, (ctypes.c_char * len(self.raw)).from_buffer(buff)
@property
def wd_addr(self):
"""
Gets the interface and direction as a `WINDIVERT_ADDRESS` structure.
:return: The `WINDIVERT_ADDRESS` structure.
"""
address = windivert_dll.WinDivertAddress()
address.IfIdx, address.SubIfIdx = self.interface
address.Direction = self.direction
return address
def matches(self, filter, layer=Layer.NETWORK):
"""
Evaluates the packet against the given packet filter string.
The remapped function is::
BOOL WinDivertHelperEvalFilter(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__in PVOID pPacket,
__in UINT packetLen,
__in PWINDIVERT_ADDRESS pAddr
);
See: https://reqrypt.org/windivert-doc.html#divert_helper_eval_filter
:param filter: The filter string.
:param layer: The network layer.
:return: True if the packet matches, and False otherwise.
"""
buff, buff_ = self.__to_buffers()
return windivert_dll.WinDivertHelperEvalFilter(filter.encode(), layer, ctypes.byref(buff_), len(self.raw),
ctypes.byref(self.wd_addr))
|
ffalcinelli/pydivert | pydivert/packet/__init__.py | Packet.recalculate_checksums | python | def recalculate_checksums(self, flags=0):
buff, buff_ = self.__to_buffers()
num = windivert_dll.WinDivertHelperCalcChecksums(ctypes.byref(buff_), len(self.raw), flags)
if PY2:
self.raw = memoryview(buff)[:len(self.raw)]
return num | (Re)calculates the checksum for any IPv4/ICMP/ICMPv6/TCP/UDP checksum present in the given packet.
Individual checksum calculations may be disabled via the appropriate flag.
Typically this function should be invoked on a modified packet before it is injected with WinDivert.send().
Returns the number of checksums calculated.
See: https://reqrypt.org/windivert-doc.html#divert_helper_calc_checksums | train | https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/packet/__init__.py#L298-L311 | [
"def __to_buffers(self):\n buff = bytearray(self.raw.tobytes()) if PY2 else self.raw.obj\n return buff, (ctypes.c_char * len(self.raw)).from_buffer(buff)\n"
] | class Packet(object):
"""
A single packet, possibly including an IP header, a TCP/UDP header and a payload.
Creation of packets is cheap, parsing is done on first attribute access.
"""
def __init__(self, raw, interface, direction):
if isinstance(raw, bytes):
raw = memoryview(bytearray(raw))
self.raw = raw # type: memoryview
self.interface = interface
self.direction = direction
def __repr__(self):
def dump(x):
if isinstance(x, Header) or isinstance(x, Packet):
d = {}
for k in dir(x):
v = getattr(x, k)
if k.startswith("_") or callable(v):
continue
if k in {"address_family", "protocol", "ip", "icmp"}:
continue
if k == "payload" and v and len(v) > 20:
v = v[:20] + b"..."
d[k] = dump(v)
if isinstance(x, Packet):
return pprint.pformat(d)
return d
return x
return "Packet({})".format(dump(self))
@property
def is_outbound(self):
"""
Indicates if the packet is outbound.
Convenience method for ``.direction``.
"""
return self.direction == Direction.OUTBOUND
@property
def is_inbound(self):
"""
Indicates if the packet is inbound.
Convenience method for ``.direction``.
"""
return self.direction == Direction.INBOUND
@property
def is_loopback(self):
"""
- True, if the packet is on the loopback interface.
- False, otherwise.
"""
return self.interface[0] == 1
@cached_property
def address_family(self):
"""
The packet address family:
- socket.AF_INET, if IPv4
- socket.AF_INET6, if IPv6
- None, otherwise.
"""
if len(self.raw) >= 20:
v = i(self.raw[0]) >> 4
if v == 4:
return socket.AF_INET
if v == 6:
return socket.AF_INET6
@cached_property
def protocol(self):
"""
- | A (ipproto, proto_start) tuple.
| ``ipproto`` is the IP protocol in use, e.g. Protocol.TCP or Protocol.UDP.
| ``proto_start`` denotes the beginning of the protocol data.
| If the packet does not match our expectations, both ipproto and proto_start are None.
"""
if self.address_family == socket.AF_INET:
proto = i(self.raw[9])
start = (i(self.raw[0]) & 0b1111) * 4
elif self.address_family == socket.AF_INET6:
proto = i(self.raw[6])
# skip over well-known ipv6 headers
start = 40
while proto in IPV6_EXT_HEADERS:
if start >= len(self.raw):
# less than two bytes left
start = None
proto = None
break
if proto == Protocol.FRAGMENT:
hdrlen = 8
elif proto == Protocol.AH:
hdrlen = (i(self.raw[start + 1]) + 2) * 4
else:
# Protocol.HOPOPT, Protocol.DSTOPTS, Protocol.ROUTING
hdrlen = (i(self.raw[start + 1]) + 1) * 8
proto = i(self.raw[start])
start += hdrlen
else:
start = None
proto = None
out_of_bounds = (
(proto == Protocol.TCP and start + 20 > len(self.raw)) or
(proto == Protocol.UDP and start + 8 > len(self.raw)) or
(proto in {Protocol.ICMP, Protocol.ICMPV6} and start + 4 > len(self.raw))
)
if out_of_bounds:
# special-case tcp/udp so that we can rely on .protocol for the port properties.
start = None
proto = None
return proto, start
@cached_property
def ipv4(self):
"""
- An IPv4Header instance, if the packet is valid IPv4.
- None, otherwise.
"""
if self.address_family == socket.AF_INET:
return IPv4Header(self)
@cached_property
def ipv6(self):
"""
- An IPv6Header instance, if the packet is valid IPv6.
- None, otherwise.
"""
if self.address_family == socket.AF_INET6:
return IPv6Header(self)
@cached_property
def ip(self):
"""
- An IPHeader instance, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
return self.ipv4 or self.ipv6
@cached_property
def icmpv4(self):
"""
- An ICMPv4Header instance, if the packet is valid ICMPv4.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.ICMP:
return ICMPv4Header(self, proto_start)
@cached_property
def icmpv6(self):
"""
- An ICMPv6Header instance, if the packet is valid ICMPv6.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.ICMPV6:
return ICMPv6Header(self, proto_start)
@cached_property
def icmp(self):
"""
- An ICMPHeader instance, if the packet is valid ICMPv4 or ICMPv6.
- None, otherwise.
"""
return self.icmpv4 or self.icmpv6
@cached_property
def tcp(self):
"""
- An TCPHeader instance, if the packet is valid TCP.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.TCP:
return TCPHeader(self, proto_start)
@cached_property
def udp(self):
"""
- An TCPHeader instance, if the packet is valid UDP.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.UDP:
return UDPHeader(self, proto_start)
@cached_property
def _port(self):
"""header that implements PortMixin"""
return self.tcp or self.udp
@cached_property
def _payload(self):
"""header that implements PayloadMixin"""
return self.tcp or self.udp or self.icmpv4 or self.icmpv6
@property
def src_addr(self):
"""
- The source address, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
if self.ip:
return self.ip.src_addr
@src_addr.setter
def src_addr(self, val):
self.ip.src_addr = val
@property
def dst_addr(self):
"""
- The destination address, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
if self.ip:
return self.ip.dst_addr
@dst_addr.setter
def dst_addr(self, val):
self.ip.dst_addr = val
@property
def src_port(self):
"""
- The source port, if the packet is valid TCP or UDP.
- None, otherwise.
"""
if self._port:
return self._port.src_port
@src_port.setter
def src_port(self, val):
self._port.src_port = val
@property
def dst_port(self):
"""
- The destination port, if the packet is valid TCP or UDP.
- None, otherwise.
"""
if self._port:
return self._port.dst_port
@dst_port.setter
def dst_port(self, val):
self._port.dst_port = val
@property
def payload(self):
"""
- The payload, if the packet is valid TCP, UDP, ICMP or ICMPv6.
- None, otherwise.
"""
if self._payload:
return self._payload.payload
@payload.setter
def payload(self, val):
self._payload.payload = val
def __to_buffers(self):
buff = bytearray(self.raw.tobytes()) if PY2 else self.raw.obj
return buff, (ctypes.c_char * len(self.raw)).from_buffer(buff)
@property
def wd_addr(self):
"""
Gets the interface and direction as a `WINDIVERT_ADDRESS` structure.
:return: The `WINDIVERT_ADDRESS` structure.
"""
address = windivert_dll.WinDivertAddress()
address.IfIdx, address.SubIfIdx = self.interface
address.Direction = self.direction
return address
def matches(self, filter, layer=Layer.NETWORK):
"""
Evaluates the packet against the given packet filter string.
The remapped function is::
BOOL WinDivertHelperEvalFilter(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__in PVOID pPacket,
__in UINT packetLen,
__in PWINDIVERT_ADDRESS pAddr
);
See: https://reqrypt.org/windivert-doc.html#divert_helper_eval_filter
:param filter: The filter string.
:param layer: The network layer.
:return: True if the packet matches, and False otherwise.
"""
buff, buff_ = self.__to_buffers()
return windivert_dll.WinDivertHelperEvalFilter(filter.encode(), layer, ctypes.byref(buff_), len(self.raw),
ctypes.byref(self.wd_addr))
|
ffalcinelli/pydivert | pydivert/packet/__init__.py | Packet.wd_addr | python | def wd_addr(self):
address = windivert_dll.WinDivertAddress()
address.IfIdx, address.SubIfIdx = self.interface
address.Direction = self.direction
return address | Gets the interface and direction as a `WINDIVERT_ADDRESS` structure.
:return: The `WINDIVERT_ADDRESS` structure. | train | https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/packet/__init__.py#L318-L326 | null | class Packet(object):
"""
A single packet, possibly including an IP header, a TCP/UDP header and a payload.
Creation of packets is cheap, parsing is done on first attribute access.
"""
def __init__(self, raw, interface, direction):
if isinstance(raw, bytes):
raw = memoryview(bytearray(raw))
self.raw = raw # type: memoryview
self.interface = interface
self.direction = direction
def __repr__(self):
def dump(x):
if isinstance(x, Header) or isinstance(x, Packet):
d = {}
for k in dir(x):
v = getattr(x, k)
if k.startswith("_") or callable(v):
continue
if k in {"address_family", "protocol", "ip", "icmp"}:
continue
if k == "payload" and v and len(v) > 20:
v = v[:20] + b"..."
d[k] = dump(v)
if isinstance(x, Packet):
return pprint.pformat(d)
return d
return x
return "Packet({})".format(dump(self))
@property
def is_outbound(self):
"""
Indicates if the packet is outbound.
Convenience method for ``.direction``.
"""
return self.direction == Direction.OUTBOUND
@property
def is_inbound(self):
"""
Indicates if the packet is inbound.
Convenience method for ``.direction``.
"""
return self.direction == Direction.INBOUND
@property
def is_loopback(self):
"""
- True, if the packet is on the loopback interface.
- False, otherwise.
"""
return self.interface[0] == 1
@cached_property
def address_family(self):
"""
The packet address family:
- socket.AF_INET, if IPv4
- socket.AF_INET6, if IPv6
- None, otherwise.
"""
if len(self.raw) >= 20:
v = i(self.raw[0]) >> 4
if v == 4:
return socket.AF_INET
if v == 6:
return socket.AF_INET6
@cached_property
def protocol(self):
"""
- | A (ipproto, proto_start) tuple.
| ``ipproto`` is the IP protocol in use, e.g. Protocol.TCP or Protocol.UDP.
| ``proto_start`` denotes the beginning of the protocol data.
| If the packet does not match our expectations, both ipproto and proto_start are None.
"""
if self.address_family == socket.AF_INET:
proto = i(self.raw[9])
start = (i(self.raw[0]) & 0b1111) * 4
elif self.address_family == socket.AF_INET6:
proto = i(self.raw[6])
# skip over well-known ipv6 headers
start = 40
while proto in IPV6_EXT_HEADERS:
if start >= len(self.raw):
# less than two bytes left
start = None
proto = None
break
if proto == Protocol.FRAGMENT:
hdrlen = 8
elif proto == Protocol.AH:
hdrlen = (i(self.raw[start + 1]) + 2) * 4
else:
# Protocol.HOPOPT, Protocol.DSTOPTS, Protocol.ROUTING
hdrlen = (i(self.raw[start + 1]) + 1) * 8
proto = i(self.raw[start])
start += hdrlen
else:
start = None
proto = None
out_of_bounds = (
(proto == Protocol.TCP and start + 20 > len(self.raw)) or
(proto == Protocol.UDP and start + 8 > len(self.raw)) or
(proto in {Protocol.ICMP, Protocol.ICMPV6} and start + 4 > len(self.raw))
)
if out_of_bounds:
# special-case tcp/udp so that we can rely on .protocol for the port properties.
start = None
proto = None
return proto, start
@cached_property
def ipv4(self):
"""
- An IPv4Header instance, if the packet is valid IPv4.
- None, otherwise.
"""
if self.address_family == socket.AF_INET:
return IPv4Header(self)
@cached_property
def ipv6(self):
"""
- An IPv6Header instance, if the packet is valid IPv6.
- None, otherwise.
"""
if self.address_family == socket.AF_INET6:
return IPv6Header(self)
@cached_property
def ip(self):
"""
- An IPHeader instance, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
return self.ipv4 or self.ipv6
@cached_property
def icmpv4(self):
"""
- An ICMPv4Header instance, if the packet is valid ICMPv4.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.ICMP:
return ICMPv4Header(self, proto_start)
@cached_property
def icmpv6(self):
"""
- An ICMPv6Header instance, if the packet is valid ICMPv6.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.ICMPV6:
return ICMPv6Header(self, proto_start)
@cached_property
def icmp(self):
"""
- An ICMPHeader instance, if the packet is valid ICMPv4 or ICMPv6.
- None, otherwise.
"""
return self.icmpv4 or self.icmpv6
@cached_property
def tcp(self):
"""
- An TCPHeader instance, if the packet is valid TCP.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.TCP:
return TCPHeader(self, proto_start)
@cached_property
def udp(self):
"""
- An TCPHeader instance, if the packet is valid UDP.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.UDP:
return UDPHeader(self, proto_start)
@cached_property
def _port(self):
"""header that implements PortMixin"""
return self.tcp or self.udp
@cached_property
def _payload(self):
"""header that implements PayloadMixin"""
return self.tcp or self.udp or self.icmpv4 or self.icmpv6
@property
def src_addr(self):
"""
- The source address, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
if self.ip:
return self.ip.src_addr
@src_addr.setter
def src_addr(self, val):
self.ip.src_addr = val
@property
def dst_addr(self):
"""
- The destination address, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
if self.ip:
return self.ip.dst_addr
@dst_addr.setter
def dst_addr(self, val):
self.ip.dst_addr = val
@property
def src_port(self):
"""
- The source port, if the packet is valid TCP or UDP.
- None, otherwise.
"""
if self._port:
return self._port.src_port
@src_port.setter
def src_port(self, val):
self._port.src_port = val
@property
def dst_port(self):
"""
- The destination port, if the packet is valid TCP or UDP.
- None, otherwise.
"""
if self._port:
return self._port.dst_port
@dst_port.setter
def dst_port(self, val):
self._port.dst_port = val
@property
def payload(self):
"""
- The payload, if the packet is valid TCP, UDP, ICMP or ICMPv6.
- None, otherwise.
"""
if self._payload:
return self._payload.payload
@payload.setter
def payload(self, val):
self._payload.payload = val
def recalculate_checksums(self, flags=0):
"""
(Re)calculates the checksum for any IPv4/ICMP/ICMPv6/TCP/UDP checksum present in the given packet.
Individual checksum calculations may be disabled via the appropriate flag.
Typically this function should be invoked on a modified packet before it is injected with WinDivert.send().
Returns the number of checksums calculated.
See: https://reqrypt.org/windivert-doc.html#divert_helper_calc_checksums
"""
buff, buff_ = self.__to_buffers()
num = windivert_dll.WinDivertHelperCalcChecksums(ctypes.byref(buff_), len(self.raw), flags)
if PY2:
self.raw = memoryview(buff)[:len(self.raw)]
return num
def __to_buffers(self):
buff = bytearray(self.raw.tobytes()) if PY2 else self.raw.obj
return buff, (ctypes.c_char * len(self.raw)).from_buffer(buff)
@property
def matches(self, filter, layer=Layer.NETWORK):
"""
Evaluates the packet against the given packet filter string.
The remapped function is::
BOOL WinDivertHelperEvalFilter(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__in PVOID pPacket,
__in UINT packetLen,
__in PWINDIVERT_ADDRESS pAddr
);
See: https://reqrypt.org/windivert-doc.html#divert_helper_eval_filter
:param filter: The filter string.
:param layer: The network layer.
:return: True if the packet matches, and False otherwise.
"""
buff, buff_ = self.__to_buffers()
return windivert_dll.WinDivertHelperEvalFilter(filter.encode(), layer, ctypes.byref(buff_), len(self.raw),
ctypes.byref(self.wd_addr))
|
ffalcinelli/pydivert | pydivert/packet/__init__.py | Packet.matches | python | def matches(self, filter, layer=Layer.NETWORK):
buff, buff_ = self.__to_buffers()
return windivert_dll.WinDivertHelperEvalFilter(filter.encode(), layer, ctypes.byref(buff_), len(self.raw),
ctypes.byref(self.wd_addr)) | Evaluates the packet against the given packet filter string.
The remapped function is::
BOOL WinDivertHelperEvalFilter(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__in PVOID pPacket,
__in UINT packetLen,
__in PWINDIVERT_ADDRESS pAddr
);
See: https://reqrypt.org/windivert-doc.html#divert_helper_eval_filter
:param filter: The filter string.
:param layer: The network layer.
:return: True if the packet matches, and False otherwise. | train | https://github.com/ffalcinelli/pydivert/blob/f75eba4126c527b5a43ace0a49369c7479cf5ee8/pydivert/packet/__init__.py#L328-L350 | [
"def __to_buffers(self):\n buff = bytearray(self.raw.tobytes()) if PY2 else self.raw.obj\n return buff, (ctypes.c_char * len(self.raw)).from_buffer(buff)\n"
] | class Packet(object):
"""
A single packet, possibly including an IP header, a TCP/UDP header and a payload.
Creation of packets is cheap, parsing is done on first attribute access.
"""
def __init__(self, raw, interface, direction):
if isinstance(raw, bytes):
raw = memoryview(bytearray(raw))
self.raw = raw # type: memoryview
self.interface = interface
self.direction = direction
def __repr__(self):
def dump(x):
if isinstance(x, Header) or isinstance(x, Packet):
d = {}
for k in dir(x):
v = getattr(x, k)
if k.startswith("_") or callable(v):
continue
if k in {"address_family", "protocol", "ip", "icmp"}:
continue
if k == "payload" and v and len(v) > 20:
v = v[:20] + b"..."
d[k] = dump(v)
if isinstance(x, Packet):
return pprint.pformat(d)
return d
return x
return "Packet({})".format(dump(self))
@property
def is_outbound(self):
"""
Indicates if the packet is outbound.
Convenience method for ``.direction``.
"""
return self.direction == Direction.OUTBOUND
@property
def is_inbound(self):
"""
Indicates if the packet is inbound.
Convenience method for ``.direction``.
"""
return self.direction == Direction.INBOUND
@property
def is_loopback(self):
"""
- True, if the packet is on the loopback interface.
- False, otherwise.
"""
return self.interface[0] == 1
@cached_property
def address_family(self):
"""
The packet address family:
- socket.AF_INET, if IPv4
- socket.AF_INET6, if IPv6
- None, otherwise.
"""
if len(self.raw) >= 20:
v = i(self.raw[0]) >> 4
if v == 4:
return socket.AF_INET
if v == 6:
return socket.AF_INET6
@cached_property
def protocol(self):
"""
- | A (ipproto, proto_start) tuple.
| ``ipproto`` is the IP protocol in use, e.g. Protocol.TCP or Protocol.UDP.
| ``proto_start`` denotes the beginning of the protocol data.
| If the packet does not match our expectations, both ipproto and proto_start are None.
"""
if self.address_family == socket.AF_INET:
proto = i(self.raw[9])
start = (i(self.raw[0]) & 0b1111) * 4
elif self.address_family == socket.AF_INET6:
proto = i(self.raw[6])
# skip over well-known ipv6 headers
start = 40
while proto in IPV6_EXT_HEADERS:
if start >= len(self.raw):
# less than two bytes left
start = None
proto = None
break
if proto == Protocol.FRAGMENT:
hdrlen = 8
elif proto == Protocol.AH:
hdrlen = (i(self.raw[start + 1]) + 2) * 4
else:
# Protocol.HOPOPT, Protocol.DSTOPTS, Protocol.ROUTING
hdrlen = (i(self.raw[start + 1]) + 1) * 8
proto = i(self.raw[start])
start += hdrlen
else:
start = None
proto = None
out_of_bounds = (
(proto == Protocol.TCP and start + 20 > len(self.raw)) or
(proto == Protocol.UDP and start + 8 > len(self.raw)) or
(proto in {Protocol.ICMP, Protocol.ICMPV6} and start + 4 > len(self.raw))
)
if out_of_bounds:
# special-case tcp/udp so that we can rely on .protocol for the port properties.
start = None
proto = None
return proto, start
@cached_property
def ipv4(self):
"""
- An IPv4Header instance, if the packet is valid IPv4.
- None, otherwise.
"""
if self.address_family == socket.AF_INET:
return IPv4Header(self)
@cached_property
def ipv6(self):
"""
- An IPv6Header instance, if the packet is valid IPv6.
- None, otherwise.
"""
if self.address_family == socket.AF_INET6:
return IPv6Header(self)
@cached_property
def ip(self):
"""
- An IPHeader instance, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
return self.ipv4 or self.ipv6
@cached_property
def icmpv4(self):
"""
- An ICMPv4Header instance, if the packet is valid ICMPv4.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.ICMP:
return ICMPv4Header(self, proto_start)
@cached_property
def icmpv6(self):
"""
- An ICMPv6Header instance, if the packet is valid ICMPv6.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.ICMPV6:
return ICMPv6Header(self, proto_start)
@cached_property
def icmp(self):
"""
- An ICMPHeader instance, if the packet is valid ICMPv4 or ICMPv6.
- None, otherwise.
"""
return self.icmpv4 or self.icmpv6
@cached_property
def tcp(self):
"""
- An TCPHeader instance, if the packet is valid TCP.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.TCP:
return TCPHeader(self, proto_start)
@cached_property
def udp(self):
"""
- An TCPHeader instance, if the packet is valid UDP.
- None, otherwise.
"""
ipproto, proto_start = self.protocol
if ipproto == Protocol.UDP:
return UDPHeader(self, proto_start)
@cached_property
def _port(self):
"""header that implements PortMixin"""
return self.tcp or self.udp
@cached_property
def _payload(self):
"""header that implements PayloadMixin"""
return self.tcp or self.udp or self.icmpv4 or self.icmpv6
@property
def src_addr(self):
"""
- The source address, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
if self.ip:
return self.ip.src_addr
@src_addr.setter
def src_addr(self, val):
self.ip.src_addr = val
@property
def dst_addr(self):
"""
- The destination address, if the packet is valid IPv4 or IPv6.
- None, otherwise.
"""
if self.ip:
return self.ip.dst_addr
@dst_addr.setter
def dst_addr(self, val):
self.ip.dst_addr = val
@property
def src_port(self):
"""
- The source port, if the packet is valid TCP or UDP.
- None, otherwise.
"""
if self._port:
return self._port.src_port
@src_port.setter
def src_port(self, val):
self._port.src_port = val
@property
def dst_port(self):
"""
- The destination port, if the packet is valid TCP or UDP.
- None, otherwise.
"""
if self._port:
return self._port.dst_port
@dst_port.setter
def dst_port(self, val):
self._port.dst_port = val
@property
def payload(self):
"""
- The payload, if the packet is valid TCP, UDP, ICMP or ICMPv6.
- None, otherwise.
"""
if self._payload:
return self._payload.payload
@payload.setter
def payload(self, val):
self._payload.payload = val
def recalculate_checksums(self, flags=0):
"""
(Re)calculates the checksum for any IPv4/ICMP/ICMPv6/TCP/UDP checksum present in the given packet.
Individual checksum calculations may be disabled via the appropriate flag.
Typically this function should be invoked on a modified packet before it is injected with WinDivert.send().
Returns the number of checksums calculated.
See: https://reqrypt.org/windivert-doc.html#divert_helper_calc_checksums
"""
buff, buff_ = self.__to_buffers()
num = windivert_dll.WinDivertHelperCalcChecksums(ctypes.byref(buff_), len(self.raw), flags)
if PY2:
self.raw = memoryview(buff)[:len(self.raw)]
return num
def __to_buffers(self):
buff = bytearray(self.raw.tobytes()) if PY2 else self.raw.obj
return buff, (ctypes.c_char * len(self.raw)).from_buffer(buff)
@property
def wd_addr(self):
"""
Gets the interface and direction as a `WINDIVERT_ADDRESS` structure.
:return: The `WINDIVERT_ADDRESS` structure.
"""
address = windivert_dll.WinDivertAddress()
address.IfIdx, address.SubIfIdx = self.interface
address.Direction = self.direction
return address
|
CLARIAH/grlc | src/projection.py | project | python | def project(dataIn, projectionScript):
'''Programs may make use of data in the `dataIn` variable and should
produce data on the `dataOut` variable.'''
# We don't really need to initialize it, but we do it to avoid linter errors
dataOut = {}
try:
projectionScript = str(projectionScript)
program = makeProgramFromString(projectionScript)
if PY3:
loc = {
'dataIn': dataIn,
'dataOut': dataOut
}
exec(program, {}, loc)
dataOut = loc['dataOut']
else:
exec(program)
except Exception as e:
glogger.error("Error while executing SPARQL projection")
glogger.error(projectionScript)
glogger.error("Encountered exception: ")
glogger.error(e)
dataOut = {
'status': 'error',
'message': e.message
}
return dataOut | Programs may make use of data in the `dataIn` variable and should
produce data on the `dataOut` variable. | train | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/projection.py#L7-L33 | null | import logging
from pythonql.parser.Preprocessor import makeProgramFromString
from six import PY3
glogger = logging.getLogger(__name__)
|
CLARIAH/grlc | src/prov.py | grlcPROV.init_prov_graph | python | def init_prov_graph(self):
try:
# Use git2prov to get prov on the repo
repo_prov = check_output(
['node_modules/git2prov/bin/git2prov', 'https://github.com/{}/{}/'.format(self.user, self.repo),
'PROV-O']).decode("utf-8")
repo_prov = repo_prov[repo_prov.find('@'):]
# glogger.debug('Git2PROV output: {}'.format(repo_prov))
glogger.debug('Ingesting Git2PROV output into RDF graph')
with open('temp.prov.ttl', 'w') as temp_prov:
temp_prov.write(repo_prov)
self.prov_g.parse('temp.prov.ttl', format='turtle')
except Exception as e:
glogger.error(e)
glogger.error("Couldn't parse Git2PROV graph, continuing without repo PROV")
pass
self.prov_g.add((self.agent, RDF.type, self.prov.Agent))
self.prov_g.add((self.entity_d, RDF.type, self.prov.Entity))
self.prov_g.add((self.activity, RDF.type, self.prov.Activity))
# entity_d
self.prov_g.add((self.entity_d, self.prov.wasGeneratedBy, self.activity))
self.prov_g.add((self.entity_d, self.prov.wasAttributedTo, self.agent))
# later: entity_d genereated at time (when we know the end time)
# activity
self.prov_g.add((self.activity, self.prov.wasAssociatedWith, self.agent))
self.prov_g.add((self.activity, self.prov.startedAtTime, Literal(datetime.now()))) | Initialize PROV graph with all we know at the start of the recording | train | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/prov.py#L35-L68 | null | class grlcPROV():
def __init__(self, user, repo):
"""
Default constructor
"""
self.user = user
self.repo = repo
self.prov_g = Graph()
prov_uri = URIRef("http://www.w3.org/ns/prov#")
self.prov = Namespace(prov_uri)
self.prov_g.bind('prov', self.prov)
self.agent = URIRef("http://{}".format(static.SERVER_NAME))
self.entity_d = URIRef("http://{}/api/{}/{}/spec".format(static.SERVER_NAME, self.user, self.repo))
self.activity = URIRef(self.entity_d + "-activity")
self.init_prov_graph()
# later: activity used entity_o_1 ... entity_o_n
# later: activity endedAtTime (when we know the end time)
def add_used_entity(self, entity_uri):
"""
Add the provided URI as a used entity by the logged activity
"""
entity_o = URIRef(entity_uri)
self.prov_g.add((entity_o, RDF.type, self.prov.Entity))
self.prov_g.add((self.activity, self.prov.used, entity_o))
def end_prov_graph(self):
"""
Finalize prov recording with end time
"""
endTime = Literal(datetime.now())
self.prov_g.add((self.entity_d, self.prov.generatedAtTime, endTime))
self.prov_g.add((self.activity, self.prov.endedAtTime, endTime))
def log_prov_graph(self):
"""
Log provenance graph so far
"""
glogger.debug("Spec generation provenance graph:")
glogger.debug(self.prov_g.serialize(format='turtle'))
def serialize(self, format):
"""
Serialize provenance graph in the specified format
"""
if PY3:
return self.prov_g.serialize(format=format).decode('utf-8')
else:
return self.prov_g.serialize(format=format)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.