repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
constverum/ProxyBroker
|
proxybroker/proxy.py
|
Proxy.avg_resp_time
|
python
|
def avg_resp_time(self):
if not self._runtimes:
return 0
return round(sum(self._runtimes) / len(self._runtimes), 2)
|
The average connection/response time.
:rtype: float
|
train
|
https://github.com/constverum/ProxyBroker/blob/d21aae8575fc3a95493233ecfd2c7cf47b36b069/proxybroker/proxy.py#L188-L195
| null |
class Proxy:
"""Proxy.
:param str host: IP address of the proxy
:param int port: Port of the proxy
:param tuple types:
(optional) List of types (protocols) which may be supported
by the proxy and which can be checked to work with the proxy
:param int timeout:
(optional) Timeout of a connection and receive a response in seconds
:param bool verify_ssl:
(optional) Flag indicating whether to check the SSL certificates.
Set to True to check ssl certifications
:raises ValueError: If the host not is IP address, or if the port > 65535
"""
@classmethod
async def create(cls, host, *args, **kwargs):
"""Asynchronously create a :class:`Proxy` object.
:param str host: A passed host can be a domain or IP address.
If the host is a domain, try to resolve it
:param str \*args:
(optional) Positional arguments that :class:`Proxy` takes
:param str \*\*kwargs:
(optional) Keyword arguments that :class:`Proxy` takes
:return: :class:`Proxy` object
:rtype: proxybroker.Proxy
:raises ResolveError: If could not resolve the host
:raises ValueError: If the port > 65535
""" # noqa: W605
loop = kwargs.pop('loop', None)
resolver = kwargs.pop('resolver', Resolver(loop=loop))
try:
_host = await resolver.resolve(host)
self = cls(_host, *args, **kwargs)
except (ResolveError, ValueError) as e:
log.error('%s:%s: Error at creating: %s' % (host, args[0], e))
raise
return self
def __init__(
self, host=None, port=None, types=(), timeout=8, verify_ssl=False
):
self.host = host
if not Resolver.host_is_ip(self.host):
raise ValueError(
'The host of proxy should be the IP address. '
'Try Proxy.create() if the host is a domain'
)
self.port = int(port)
if self.port > 65535:
raise ValueError('The port of proxy cannot be greater than 65535')
self.expected_types = set(types) & {
'HTTP',
'HTTPS',
'CONNECT:80',
'CONNECT:25',
'SOCKS4',
'SOCKS5',
}
self._timeout = timeout
self._ssl_context = (
True if verify_ssl else _ssl._create_unverified_context()
)
self._types = {}
self._is_working = False
self.stat = {'requests': 0, 'errors': Counter()}
self._ngtr = None
self._geo = Resolver.get_ip_info(self.host)
self._log = []
self._runtimes = []
self._schemes = ()
self._closed = True
self._reader = {'conn': None, 'ssl': None}
self._writer = {'conn': None, 'ssl': None}
def __repr__(self):
# <Proxy US 1.12 [HTTP: Anonymous, HTTPS] 10.0.0.1:8080>
tpinfo = []
order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731
for tp, lvl in sorted(self.types.items(), key=order):
s = '{tp}: {lvl}' if lvl else '{tp}'
s = s.format(tp=tp, lvl=lvl)
tpinfo.append(s)
tpinfo = ', '.join(tpinfo)
return '<Proxy {code} {avg:.2f}s [{types}] {host}:{port}>'.format(
code=self._geo.code,
types=tpinfo,
host=self.host,
port=self.port,
avg=self.avg_resp_time,
)
@property
def types(self):
"""Types (protocols) supported by the proxy.
| Where key is type, value is level of anonymity
(only for HTTP, for other types level always is None).
| Available types: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25
| Available levels: Transparent, Anonymous, High.
:rtype: dict
"""
return self._types
@property
def is_working(self):
"""True if the proxy is working, False otherwise.
:rtype: bool
"""
return self._is_working
@is_working.setter
def is_working(self, val):
self._is_working = val
@property
def writer(self):
return self._writer.get('ssl') or self._writer.get('conn')
@property
def reader(self):
return self._reader.get('ssl') or self._reader.get('conn')
@property
def priority(self):
return (self.error_rate, self.avg_resp_time)
@property
def error_rate(self):
"""Error rate: from 0 to 1.
For example: 0.7 = 70% requests ends with error.
:rtype: float
.. versionadded:: 0.2.0
"""
if not self.stat['requests']:
return 0
return round(
sum(self.stat['errors'].values()) / self.stat['requests'], 2
)
@property
def schemes(self):
"""Return supported schemes."""
if not self._schemes:
_schemes = []
if self.types.keys() & _HTTP_PROTOS:
_schemes.append('HTTP')
if self.types.keys() & _HTTPS_PROTOS:
_schemes.append('HTTPS')
self._schemes = tuple(_schemes)
return self._schemes
@property
@property
def avgRespTime(self):
"""
.. deprecated:: 2.0
Use :attr:`avg_resp_time` instead.
"""
warnings.warn(
'`avgRespTime` property is deprecated, '
'use `avg_resp_time` instead.',
DeprecationWarning,
)
return self.avg_resp_time
@property
def geo(self):
"""Geo information about IP address of the proxy.
:return:
Named tuple with fields:
* ``code`` - ISO country code
* ``name`` - Full name of country
* ``region_code`` - ISO region code
* ``region_name`` - Full name of region
* ``city_name`` - Full name of city
:rtype: collections.namedtuple
.. versionchanged:: 0.2.0
In previous versions return a dictionary, now named tuple.
"""
return self._geo
@property
def ngtr(self):
return self._ngtr
@ngtr.setter
def ngtr(self, proto):
self._ngtr = NGTRS[proto](self)
def as_json(self):
"""Return the proxy's properties in JSON format.
:rtype: dict
"""
info = {
'host': self.host,
'port': self.port,
'geo': {
'country': {'code': self._geo.code, 'name': self._geo.name},
'region': {
'code': self._geo.region_code,
'name': self._geo.region_name,
},
'city': self._geo.city_name,
},
'types': [],
'avg_resp_time': self.avg_resp_time,
'error_rate': self.error_rate,
}
order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731
for tp, lvl in sorted(self.types.items(), key=order):
info['types'].append({'type': tp, 'level': lvl or ''})
return info
def log(self, msg, stime=0, err=None):
ngtr = self.ngtr.name if self.ngtr else 'INFO'
runtime = time.time() - stime if stime else 0
log.debug(
'{h}:{p} [{n}]: {msg}; Runtime: {rt:.2f}'.format(
h=self.host, p=self.port, n=ngtr, msg=msg, rt=runtime
)
)
trunc = '...' if len(msg) > 58 else ''
msg = '{msg:.60s}{trunc}'.format(msg=msg, trunc=trunc)
self._log.append((ngtr, msg, runtime))
if err:
self.stat['errors'][err.errmsg] += 1
if runtime and 'timeout' not in msg:
self._runtimes.append(runtime)
def get_log(self):
"""Proxy log.
:return: The proxy log in format: (negotaitor, msg, runtime)
:rtype: tuple
.. versionadded:: 0.2.0
"""
return self._log
async def connect(self, ssl=False):
err = None
msg = '%s' % 'SSL: ' if ssl else ''
stime = time.time()
self.log('%sInitial connection' % msg)
try:
if ssl:
_type = 'ssl'
sock = self._writer['conn'].get_extra_info('socket')
params = {
'ssl': self._ssl_context,
'sock': sock,
'server_hostname': self.host,
}
else:
_type = 'conn'
params = {'host': self.host, 'port': self.port}
self._reader[_type], self._writer[_type] = await asyncio.wait_for(
asyncio.open_connection(**params), timeout=self._timeout
)
except asyncio.TimeoutError:
msg += 'Connection: timeout'
err = ProxyTimeoutError(msg)
raise err
except (ConnectionRefusedError, OSError, _ssl.SSLError):
msg += 'Connection: failed'
err = ProxyConnError(msg)
raise err
# except asyncio.CancelledError:
# log.debug('Cancelled in proxy.connect()')
# raise ProxyConnError()
else:
msg += 'Connection: success'
self._closed = False
finally:
self.stat['requests'] += 1
self.log(msg, stime, err=err)
def close(self):
if self._closed:
return
self._closed = True
if self.writer:
# try:
self.writer.close()
# except RuntimeError:
# print('Try proxy.close() when loop is closed:',
# asyncio.get_event_loop()._closed)
self._reader = {'conn': None, 'ssl': None}
self._writer = {'conn': None, 'ssl': None}
self.log('Connection: closed')
self._ngtr = None
async def send(self, req):
msg, err = '', None
_req = req.encode() if not isinstance(req, bytes) else req
try:
self.writer.write(_req)
await self.writer.drain()
except ConnectionResetError:
msg = '; Sending: failed'
err = ProxySendError(msg)
raise err
finally:
self.log('Request: %s%s' % (req, msg), err=err)
async def recv(self, length=0, head_only=False):
resp, msg, err = b'', '', None
stime = time.time()
try:
resp = await asyncio.wait_for(
self._recv(length, head_only), timeout=self._timeout
)
except asyncio.TimeoutError:
msg = 'Received: timeout'
err = ProxyTimeoutError(msg)
raise err
except (ConnectionResetError, OSError):
msg = 'Received: failed' # (connection is reset by the peer)
err = ProxyRecvError(msg)
raise err
else:
msg = 'Received: %s bytes' % len(resp)
if not resp:
err = ProxyEmptyRecvError(msg)
raise err
finally:
if resp:
msg += ': %s' % resp[:12]
self.log(msg, stime, err=err)
return resp
async def _recv(self, length=0, head_only=False):
resp = b''
if length:
try:
resp = await self.reader.readexactly(length)
except asyncio.IncompleteReadError as e:
resp = e.partial
else:
body_size, body_recv, chunked = 0, 0, None
while not self.reader.at_eof():
line = await self.reader.readline()
resp += line
if body_size:
body_recv += len(line)
if body_recv >= body_size:
break
elif chunked and line == b'0\r\n':
break
elif not body_size and line == b'\r\n':
if head_only:
break
headers = parse_headers(resp)
body_size = int(headers.get('Content-Length', 0))
if not body_size:
chunked = headers.get('Transfer-Encoding') == 'chunked'
return resp
|
constverum/ProxyBroker
|
proxybroker/proxy.py
|
Proxy.as_json
|
python
|
def as_json(self):
info = {
'host': self.host,
'port': self.port,
'geo': {
'country': {'code': self._geo.code, 'name': self._geo.name},
'region': {
'code': self._geo.region_code,
'name': self._geo.region_name,
},
'city': self._geo.city_name,
},
'types': [],
'avg_resp_time': self.avg_resp_time,
'error_rate': self.error_rate,
}
order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731
for tp, lvl in sorted(self.types.items(), key=order):
info['types'].append({'type': tp, 'level': lvl or ''})
return info
|
Return the proxy's properties in JSON format.
:rtype: dict
|
train
|
https://github.com/constverum/ProxyBroker/blob/d21aae8575fc3a95493233ecfd2c7cf47b36b069/proxybroker/proxy.py#L236-L260
| null |
class Proxy:
"""Proxy.
:param str host: IP address of the proxy
:param int port: Port of the proxy
:param tuple types:
(optional) List of types (protocols) which may be supported
by the proxy and which can be checked to work with the proxy
:param int timeout:
(optional) Timeout of a connection and receive a response in seconds
:param bool verify_ssl:
(optional) Flag indicating whether to check the SSL certificates.
Set to True to check ssl certifications
:raises ValueError: If the host not is IP address, or if the port > 65535
"""
@classmethod
async def create(cls, host, *args, **kwargs):
"""Asynchronously create a :class:`Proxy` object.
:param str host: A passed host can be a domain or IP address.
If the host is a domain, try to resolve it
:param str \*args:
(optional) Positional arguments that :class:`Proxy` takes
:param str \*\*kwargs:
(optional) Keyword arguments that :class:`Proxy` takes
:return: :class:`Proxy` object
:rtype: proxybroker.Proxy
:raises ResolveError: If could not resolve the host
:raises ValueError: If the port > 65535
""" # noqa: W605
loop = kwargs.pop('loop', None)
resolver = kwargs.pop('resolver', Resolver(loop=loop))
try:
_host = await resolver.resolve(host)
self = cls(_host, *args, **kwargs)
except (ResolveError, ValueError) as e:
log.error('%s:%s: Error at creating: %s' % (host, args[0], e))
raise
return self
def __init__(
self, host=None, port=None, types=(), timeout=8, verify_ssl=False
):
self.host = host
if not Resolver.host_is_ip(self.host):
raise ValueError(
'The host of proxy should be the IP address. '
'Try Proxy.create() if the host is a domain'
)
self.port = int(port)
if self.port > 65535:
raise ValueError('The port of proxy cannot be greater than 65535')
self.expected_types = set(types) & {
'HTTP',
'HTTPS',
'CONNECT:80',
'CONNECT:25',
'SOCKS4',
'SOCKS5',
}
self._timeout = timeout
self._ssl_context = (
True if verify_ssl else _ssl._create_unverified_context()
)
self._types = {}
self._is_working = False
self.stat = {'requests': 0, 'errors': Counter()}
self._ngtr = None
self._geo = Resolver.get_ip_info(self.host)
self._log = []
self._runtimes = []
self._schemes = ()
self._closed = True
self._reader = {'conn': None, 'ssl': None}
self._writer = {'conn': None, 'ssl': None}
def __repr__(self):
# <Proxy US 1.12 [HTTP: Anonymous, HTTPS] 10.0.0.1:8080>
tpinfo = []
order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731
for tp, lvl in sorted(self.types.items(), key=order):
s = '{tp}: {lvl}' if lvl else '{tp}'
s = s.format(tp=tp, lvl=lvl)
tpinfo.append(s)
tpinfo = ', '.join(tpinfo)
return '<Proxy {code} {avg:.2f}s [{types}] {host}:{port}>'.format(
code=self._geo.code,
types=tpinfo,
host=self.host,
port=self.port,
avg=self.avg_resp_time,
)
@property
def types(self):
"""Types (protocols) supported by the proxy.
| Where key is type, value is level of anonymity
(only for HTTP, for other types level always is None).
| Available types: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25
| Available levels: Transparent, Anonymous, High.
:rtype: dict
"""
return self._types
@property
def is_working(self):
"""True if the proxy is working, False otherwise.
:rtype: bool
"""
return self._is_working
@is_working.setter
def is_working(self, val):
self._is_working = val
@property
def writer(self):
return self._writer.get('ssl') or self._writer.get('conn')
@property
def reader(self):
return self._reader.get('ssl') or self._reader.get('conn')
@property
def priority(self):
return (self.error_rate, self.avg_resp_time)
@property
def error_rate(self):
"""Error rate: from 0 to 1.
For example: 0.7 = 70% requests ends with error.
:rtype: float
.. versionadded:: 0.2.0
"""
if not self.stat['requests']:
return 0
return round(
sum(self.stat['errors'].values()) / self.stat['requests'], 2
)
@property
def schemes(self):
"""Return supported schemes."""
if not self._schemes:
_schemes = []
if self.types.keys() & _HTTP_PROTOS:
_schemes.append('HTTP')
if self.types.keys() & _HTTPS_PROTOS:
_schemes.append('HTTPS')
self._schemes = tuple(_schemes)
return self._schemes
@property
def avg_resp_time(self):
"""The average connection/response time.
:rtype: float
"""
if not self._runtimes:
return 0
return round(sum(self._runtimes) / len(self._runtimes), 2)
@property
def avgRespTime(self):
"""
.. deprecated:: 2.0
Use :attr:`avg_resp_time` instead.
"""
warnings.warn(
'`avgRespTime` property is deprecated, '
'use `avg_resp_time` instead.',
DeprecationWarning,
)
return self.avg_resp_time
@property
def geo(self):
"""Geo information about IP address of the proxy.
:return:
Named tuple with fields:
* ``code`` - ISO country code
* ``name`` - Full name of country
* ``region_code`` - ISO region code
* ``region_name`` - Full name of region
* ``city_name`` - Full name of city
:rtype: collections.namedtuple
.. versionchanged:: 0.2.0
In previous versions return a dictionary, now named tuple.
"""
return self._geo
@property
def ngtr(self):
return self._ngtr
@ngtr.setter
def ngtr(self, proto):
self._ngtr = NGTRS[proto](self)
def log(self, msg, stime=0, err=None):
ngtr = self.ngtr.name if self.ngtr else 'INFO'
runtime = time.time() - stime if stime else 0
log.debug(
'{h}:{p} [{n}]: {msg}; Runtime: {rt:.2f}'.format(
h=self.host, p=self.port, n=ngtr, msg=msg, rt=runtime
)
)
trunc = '...' if len(msg) > 58 else ''
msg = '{msg:.60s}{trunc}'.format(msg=msg, trunc=trunc)
self._log.append((ngtr, msg, runtime))
if err:
self.stat['errors'][err.errmsg] += 1
if runtime and 'timeout' not in msg:
self._runtimes.append(runtime)
def get_log(self):
"""Proxy log.
:return: The proxy log in format: (negotaitor, msg, runtime)
:rtype: tuple
.. versionadded:: 0.2.0
"""
return self._log
async def connect(self, ssl=False):
err = None
msg = '%s' % 'SSL: ' if ssl else ''
stime = time.time()
self.log('%sInitial connection' % msg)
try:
if ssl:
_type = 'ssl'
sock = self._writer['conn'].get_extra_info('socket')
params = {
'ssl': self._ssl_context,
'sock': sock,
'server_hostname': self.host,
}
else:
_type = 'conn'
params = {'host': self.host, 'port': self.port}
self._reader[_type], self._writer[_type] = await asyncio.wait_for(
asyncio.open_connection(**params), timeout=self._timeout
)
except asyncio.TimeoutError:
msg += 'Connection: timeout'
err = ProxyTimeoutError(msg)
raise err
except (ConnectionRefusedError, OSError, _ssl.SSLError):
msg += 'Connection: failed'
err = ProxyConnError(msg)
raise err
# except asyncio.CancelledError:
# log.debug('Cancelled in proxy.connect()')
# raise ProxyConnError()
else:
msg += 'Connection: success'
self._closed = False
finally:
self.stat['requests'] += 1
self.log(msg, stime, err=err)
def close(self):
if self._closed:
return
self._closed = True
if self.writer:
# try:
self.writer.close()
# except RuntimeError:
# print('Try proxy.close() when loop is closed:',
# asyncio.get_event_loop()._closed)
self._reader = {'conn': None, 'ssl': None}
self._writer = {'conn': None, 'ssl': None}
self.log('Connection: closed')
self._ngtr = None
async def send(self, req):
msg, err = '', None
_req = req.encode() if not isinstance(req, bytes) else req
try:
self.writer.write(_req)
await self.writer.drain()
except ConnectionResetError:
msg = '; Sending: failed'
err = ProxySendError(msg)
raise err
finally:
self.log('Request: %s%s' % (req, msg), err=err)
async def recv(self, length=0, head_only=False):
resp, msg, err = b'', '', None
stime = time.time()
try:
resp = await asyncio.wait_for(
self._recv(length, head_only), timeout=self._timeout
)
except asyncio.TimeoutError:
msg = 'Received: timeout'
err = ProxyTimeoutError(msg)
raise err
except (ConnectionResetError, OSError):
msg = 'Received: failed' # (connection is reset by the peer)
err = ProxyRecvError(msg)
raise err
else:
msg = 'Received: %s bytes' % len(resp)
if not resp:
err = ProxyEmptyRecvError(msg)
raise err
finally:
if resp:
msg += ': %s' % resp[:12]
self.log(msg, stime, err=err)
return resp
async def _recv(self, length=0, head_only=False):
resp = b''
if length:
try:
resp = await self.reader.readexactly(length)
except asyncio.IncompleteReadError as e:
resp = e.partial
else:
body_size, body_recv, chunked = 0, 0, None
while not self.reader.at_eof():
line = await self.reader.readline()
resp += line
if body_size:
body_recv += len(line)
if body_recv >= body_size:
break
elif chunked and line == b'0\r\n':
break
elif not body_size and line == b'\r\n':
if head_only:
break
headers = parse_headers(resp)
body_size = int(headers.get('Content-Length', 0))
if not body_size:
chunked = headers.get('Transfer-Encoding') == 'chunked'
return resp
|
constverum/ProxyBroker
|
proxybroker/providers.py
|
Provider.get_proxies
|
python
|
async def get_proxies(self):
log.debug('Try to get proxies from %s' % self.domain)
async with aiohttp.ClientSession(
headers=get_headers(), cookies=self._cookies, loop=self._loop
) as self._session:
await self._pipe()
log.debug(
'%d proxies received from %s: %s'
% (len(self.proxies), self.domain, self.proxies)
)
return self.proxies
|
Receive proxies from the provider and return them.
:return: :attr:`.proxies`
|
train
|
https://github.com/constverum/ProxyBroker/blob/d21aae8575fc3a95493233ecfd2c7cf47b36b069/proxybroker/providers.py#L70-L86
|
[
"def get_headers(rv=False):\n _rv = str(random.randint(1000, 9999)) if rv else ''\n headers = {\n # 'User-Agent': 'Mozilla/5.0 (X11; U; Linux i386; ru-RU; rv:2.0) Gecko/20100625 Firefox/3.5.11', # noqa\n 'User-Agent': 'PxBroker/%s/%s' % (version, _rv),\n 'Accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate',\n 'Pragma': 'no-cache',\n 'Cache-control': 'no-cache',\n 'Cookie': 'cookie=ok',\n 'Referer': 'https://www.google.com/',\n }\n return headers if not rv else (headers, _rv)\n",
"async def _pipe(self):\n await self._find_on_page(self.url)\n"
] |
class Provider:
"""Proxy provider.
Provider - a website that publish free public proxy lists.
:param str url: Url of page where to find proxies
:param tuple proto:
(optional) List of the types (protocols) that may be supported
by proxies returned by the provider. Then used as :attr:`Proxy.types`
:param int max_conn:
(optional) The maximum number of concurrent connections on the provider
:param int max_tries:
(optional) The maximum number of attempts to receive response
:param int timeout:
(optional) Timeout of a request in seconds
"""
_pattern = IPPortPatternGlobal
def __init__(
self, url=None, proto=(), max_conn=4, max_tries=3, timeout=20, loop=None
):
if url:
self.domain = urlparse(url).netloc
self.url = url
self.proto = proto
self._max_tries = max_tries
self._timeout = timeout
self._session = None
self._cookies = {}
self._proxies = set()
# concurrent connections on the current provider
self._sem_provider = asyncio.Semaphore(max_conn)
self._loop = loop or asyncio.get_event_loop()
@property
def proxies(self):
"""Return all found proxies.
:return:
Set of tuples with proxy hosts, ports and types (protocols)
that may be supported (from :attr:`.proto`).
For example:
{('192.168.0.1', '80', ('HTTP', 'HTTPS'), ...)}
:rtype: set
"""
return self._proxies
@proxies.setter
def proxies(self, new):
new = [(host, port, self.proto) for host, port in new if port]
self._proxies.update(new)
async def _pipe(self):
await self._find_on_page(self.url)
async def _find_on_pages(self, urls):
if not urls:
return
tasks = []
if not isinstance(urls[0], dict):
urls = set(urls)
for url in urls:
if isinstance(url, dict):
tasks.append(self._find_on_page(**url))
else:
tasks.append(self._find_on_page(url))
await asyncio.gather(*tasks)
async def _find_on_page(self, url, data=None, headers=None, method='GET'):
page = await self.get(url, data=data, headers=headers, method=method)
oldcount = len(self.proxies)
try:
received = self.find_proxies(page)
except Exception as e:
received = []
log.error(
'Error when executing find_proxies.'
'Domain: %s; Error: %r' % (self.domain, e)
)
self.proxies = received
added = len(self.proxies) - oldcount
log.debug(
'%d(%d) proxies added(received) from %s'
% (added, len(received), url)
)
async def get(self, url, data=None, headers=None, method='GET'):
for _ in range(self._max_tries):
page = await self._get(
url, data=data, headers=headers, method=method
)
if page:
break
return page
async def _get(self, url, data=None, headers=None, method='GET'):
page = ''
try:
timeout = aiohttp.ClientTimeout(total=self._timeout)
async with self._sem_provider, self._session.request(
method, url, data=data, headers=headers, timeout=timeout
) as resp:
page = await resp.text()
if resp.status != 200:
log.debug(
'url: %s\nheaders: %s\ncookies: %s\npage:\n%s'
% (url, resp.headers, resp.cookies, page)
)
raise BadStatusError('Status: %s' % resp.status)
except (
UnicodeDecodeError,
BadStatusError,
asyncio.TimeoutError,
aiohttp.ClientOSError,
aiohttp.ClientResponseError,
aiohttp.ServerDisconnectedError,
) as e:
page = ''
log.debug('%s is failed. Error: %r;' % (url, e))
return page
def find_proxies(self, page):
return self._find_proxies(page)
def _find_proxies(self, page):
proxies = self._pattern.findall(page)
return proxies
|
constverum/ProxyBroker
|
examples/find_and_save.py
|
save
|
python
|
async def save(proxies, filename):
with open(filename, 'w') as f:
while True:
proxy = await proxies.get()
if proxy is None:
break
proto = 'https' if 'HTTPS' in proxy.types else 'http'
row = '%s://%s:%d\n' % (proto, proxy.host, proxy.port)
f.write(row)
|
Save proxies to a file.
|
train
|
https://github.com/constverum/ProxyBroker/blob/d21aae8575fc3a95493233ecfd2c7cf47b36b069/examples/find_and_save.py#L8-L17
| null |
"""Find 10 working HTTP(S) proxies and save them to a file."""
import asyncio
from proxybroker import Broker
def main():
proxies = asyncio.Queue()
broker = Broker(proxies)
tasks = asyncio.gather(
broker.find(types=['HTTP', 'HTTPS'], limit=10),
save(proxies, filename='proxies.txt'),
)
loop = asyncio.get_event_loop()
loop.run_until_complete(tasks)
if __name__ == '__main__':
main()
|
wkentaro/pytorch-fcn
|
torchfcn/ext/fcn.berkeleyvision.org/nyud_layers.py
|
NYUDSegDataLayer.setup
|
python
|
def setup(self, bottom, top):
# config
params = eval(self.param_str)
self.nyud_dir = params['nyud_dir']
self.split = params['split']
self.tops = params['tops']
self.random = params.get('randomize', True)
self.seed = params.get('seed', None)
# store top data for reshape + forward
self.data = {}
# means
self.mean_bgr = np.array((116.190, 97.203, 92.318), dtype=np.float32)
self.mean_hha = np.array((132.431, 94.076, 118.477), dtype=np.float32)
self.mean_logd = np.array((7.844,), dtype=np.float32)
# tops: check configuration
if len(top) != len(self.tops):
raise Exception("Need to define {} tops for all outputs.")
# data layers have no bottoms
if len(bottom) != 0:
raise Exception("Do not define a bottom.")
# load indices for images and labels
split_f = '{}/{}.txt'.format(self.nyud_dir, self.split)
self.indices = open(split_f, 'r').read().splitlines()
self.idx = 0
# make eval deterministic
if 'train' not in self.split:
self.random = False
# randomization: seed and pick
if self.random:
random.seed(self.seed)
self.idx = random.randint(0, len(self.indices)-1)
|
Setup data layer according to parameters:
- nyud_dir: path to NYUDv2 dir
- split: train / val / test
- tops: list of tops to output from {color, depth, hha, label}
- randomize: load in random order (default: True)
- seed: seed for randomization (default: None / current time)
for NYUDv2 semantic segmentation.
example: params = dict(nyud_dir="/path/to/NYUDVOC2011", split="val",
tops=['color', 'hha', 'label'])
|
train
|
https://github.com/wkentaro/pytorch-fcn/blob/97189cbccb2c9b8bd776b356a1fd4b6c03f67d79/torchfcn/ext/fcn.berkeleyvision.org/nyud_layers.py#L24-L74
| null |
class NYUDSegDataLayer(caffe.Layer):
"""
Load (input image, label image) pairs from NYUDv2
one-at-a-time while reshaping the net to preserve dimensions.
The labels follow the 40 class task defined by
S. Gupta, R. Girshick, p. Arbelaez, and J. Malik. Learning rich features
from RGB-D images for object detection and segmentation. ECCV 2014.
with 0 as the void label and 1-40 the classes.
Use this to feed data to a fully convolutional network.
"""
def reshape(self, bottom, top):
# load data for tops and reshape tops to fit (1 is the batch dim)
for i, t in enumerate(self.tops):
self.data[t] = self.load(t, self.indices[self.idx])
top[i].reshape(1, *self.data[t].shape)
def forward(self, bottom, top):
# assign output
for i, t in enumerate(self.tops):
top[i].data[...] = self.data[t]
# pick next input
if self.random:
self.idx = random.randint(0, len(self.indices)-1)
else:
self.idx += 1
if self.idx == len(self.indices):
self.idx = 0
def backward(self, top, propagate_down, bottom):
pass
def load(self, top, idx):
if top == 'color':
return self.load_image(idx)
elif top == 'label':
return self.load_label(idx)
elif top == 'depth':
return self.load_depth(idx)
elif top == 'hha':
return self.load_hha(idx)
else:
raise Exception("Unknown output type: {}".format(top))
def load_image(self, idx):
"""
Load input image and preprocess for Caffe:
- cast to float
- switch channels RGB -> BGR
- subtract mean
- transpose to channel x height x width order
"""
im = Image.open('{}/data/images/img_{}.png'.format(self.nyud_dir, idx))
in_ = np.array(im, dtype=np.float32)
in_ = in_[:,:,::-1]
in_ -= self.mean_bgr
in_ = in_.transpose((2,0,1))
return in_
def load_label(self, idx):
"""
Load label image as 1 x height x width integer array of label indices.
Shift labels so that classes are 0-39 and void is 255 (to ignore it).
The leading singleton dimension is required by the loss.
"""
label = scipy.io.loadmat('{}/segmentation/img_{}.mat'.format(self.nyud_dir, idx))['segmentation'].astype(np.uint8)
label -= 1 # rotate labels
label = label[np.newaxis, ...]
return label
def load_depth(self, idx):
"""
Load pre-processed depth for NYUDv2 segmentation set.
"""
im = Image.open('{}/data/depth/img_{}.png'.format(self.nyud_dir, idx))
d = np.array(im, dtype=np.float32)
d = np.log(d)
d -= self.mean_logd
d = d[np.newaxis, ...]
return d
def load_hha(self, idx):
"""
Load HHA features from Gupta et al. ECCV14.
See https://github.com/s-gupta/rcnn-depth/blob/master/rcnn/saveHHA.m
"""
im = Image.open('{}/data/hha/img_{}.png'.format(self.nyud_dir, idx))
hha = np.array(im, dtype=np.float32)
hha -= self.mean_hha
hha = hha.transpose((2,0,1))
return hha
|
wkentaro/pytorch-fcn
|
torchfcn/ext/fcn.berkeleyvision.org/nyud_layers.py
|
NYUDSegDataLayer.load_image
|
python
|
def load_image(self, idx):
im = Image.open('{}/data/images/img_{}.png'.format(self.nyud_dir, idx))
in_ = np.array(im, dtype=np.float32)
in_ = in_[:,:,::-1]
in_ -= self.mean_bgr
in_ = in_.transpose((2,0,1))
return in_
|
Load input image and preprocess for Caffe:
- cast to float
- switch channels RGB -> BGR
- subtract mean
- transpose to channel x height x width order
|
train
|
https://github.com/wkentaro/pytorch-fcn/blob/97189cbccb2c9b8bd776b356a1fd4b6c03f67d79/torchfcn/ext/fcn.berkeleyvision.org/nyud_layers.py#L110-L123
| null |
class NYUDSegDataLayer(caffe.Layer):
"""
Load (input image, label image) pairs from NYUDv2
one-at-a-time while reshaping the net to preserve dimensions.
The labels follow the 40 class task defined by
S. Gupta, R. Girshick, p. Arbelaez, and J. Malik. Learning rich features
from RGB-D images for object detection and segmentation. ECCV 2014.
with 0 as the void label and 1-40 the classes.
Use this to feed data to a fully convolutional network.
"""
def setup(self, bottom, top):
"""
Setup data layer according to parameters:
- nyud_dir: path to NYUDv2 dir
- split: train / val / test
- tops: list of tops to output from {color, depth, hha, label}
- randomize: load in random order (default: True)
- seed: seed for randomization (default: None / current time)
for NYUDv2 semantic segmentation.
example: params = dict(nyud_dir="/path/to/NYUDVOC2011", split="val",
tops=['color', 'hha', 'label'])
"""
# config
params = eval(self.param_str)
self.nyud_dir = params['nyud_dir']
self.split = params['split']
self.tops = params['tops']
self.random = params.get('randomize', True)
self.seed = params.get('seed', None)
# store top data for reshape + forward
self.data = {}
# means
self.mean_bgr = np.array((116.190, 97.203, 92.318), dtype=np.float32)
self.mean_hha = np.array((132.431, 94.076, 118.477), dtype=np.float32)
self.mean_logd = np.array((7.844,), dtype=np.float32)
# tops: check configuration
if len(top) != len(self.tops):
raise Exception("Need to define {} tops for all outputs.")
# data layers have no bottoms
if len(bottom) != 0:
raise Exception("Do not define a bottom.")
# load indices for images and labels
split_f = '{}/{}.txt'.format(self.nyud_dir, self.split)
self.indices = open(split_f, 'r').read().splitlines()
self.idx = 0
# make eval deterministic
if 'train' not in self.split:
self.random = False
# randomization: seed and pick
if self.random:
random.seed(self.seed)
self.idx = random.randint(0, len(self.indices)-1)
def reshape(self, bottom, top):
# load data for tops and reshape tops to fit (1 is the batch dim)
for i, t in enumerate(self.tops):
self.data[t] = self.load(t, self.indices[self.idx])
top[i].reshape(1, *self.data[t].shape)
def forward(self, bottom, top):
# assign output
for i, t in enumerate(self.tops):
top[i].data[...] = self.data[t]
# pick next input
if self.random:
self.idx = random.randint(0, len(self.indices)-1)
else:
self.idx += 1
if self.idx == len(self.indices):
self.idx = 0
def backward(self, top, propagate_down, bottom):
pass
def load(self, top, idx):
if top == 'color':
return self.load_image(idx)
elif top == 'label':
return self.load_label(idx)
elif top == 'depth':
return self.load_depth(idx)
elif top == 'hha':
return self.load_hha(idx)
else:
raise Exception("Unknown output type: {}".format(top))
def load_label(self, idx):
"""
Load label image as 1 x height x width integer array of label indices.
Shift labels so that classes are 0-39 and void is 255 (to ignore it).
The leading singleton dimension is required by the loss.
"""
label = scipy.io.loadmat('{}/segmentation/img_{}.mat'.format(self.nyud_dir, idx))['segmentation'].astype(np.uint8)
label -= 1 # rotate labels
label = label[np.newaxis, ...]
return label
def load_depth(self, idx):
"""
Load pre-processed depth for NYUDv2 segmentation set.
"""
im = Image.open('{}/data/depth/img_{}.png'.format(self.nyud_dir, idx))
d = np.array(im, dtype=np.float32)
d = np.log(d)
d -= self.mean_logd
d = d[np.newaxis, ...]
return d
def load_hha(self, idx):
"""
Load HHA features from Gupta et al. ECCV14.
See https://github.com/s-gupta/rcnn-depth/blob/master/rcnn/saveHHA.m
"""
im = Image.open('{}/data/hha/img_{}.png'.format(self.nyud_dir, idx))
hha = np.array(im, dtype=np.float32)
hha -= self.mean_hha
hha = hha.transpose((2,0,1))
return hha
|
wkentaro/pytorch-fcn
|
torchfcn/ext/fcn.berkeleyvision.org/nyud_layers.py
|
NYUDSegDataLayer.load_label
|
python
|
def load_label(self, idx):
label = scipy.io.loadmat('{}/segmentation/img_{}.mat'.format(self.nyud_dir, idx))['segmentation'].astype(np.uint8)
label -= 1 # rotate labels
label = label[np.newaxis, ...]
return label
|
Load label image as 1 x height x width integer array of label indices.
Shift labels so that classes are 0-39 and void is 255 (to ignore it).
The leading singleton dimension is required by the loss.
|
train
|
https://github.com/wkentaro/pytorch-fcn/blob/97189cbccb2c9b8bd776b356a1fd4b6c03f67d79/torchfcn/ext/fcn.berkeleyvision.org/nyud_layers.py#L125-L134
| null |
class NYUDSegDataLayer(caffe.Layer):
"""
Load (input image, label image) pairs from NYUDv2
one-at-a-time while reshaping the net to preserve dimensions.
The labels follow the 40 class task defined by
S. Gupta, R. Girshick, p. Arbelaez, and J. Malik. Learning rich features
from RGB-D images for object detection and segmentation. ECCV 2014.
with 0 as the void label and 1-40 the classes.
Use this to feed data to a fully convolutional network.
"""
def setup(self, bottom, top):
"""
Setup data layer according to parameters:
- nyud_dir: path to NYUDv2 dir
- split: train / val / test
- tops: list of tops to output from {color, depth, hha, label}
- randomize: load in random order (default: True)
- seed: seed for randomization (default: None / current time)
for NYUDv2 semantic segmentation.
example: params = dict(nyud_dir="/path/to/NYUDVOC2011", split="val",
tops=['color', 'hha', 'label'])
"""
# config
params = eval(self.param_str)
self.nyud_dir = params['nyud_dir']
self.split = params['split']
self.tops = params['tops']
self.random = params.get('randomize', True)
self.seed = params.get('seed', None)
# store top data for reshape + forward
self.data = {}
# means
self.mean_bgr = np.array((116.190, 97.203, 92.318), dtype=np.float32)
self.mean_hha = np.array((132.431, 94.076, 118.477), dtype=np.float32)
self.mean_logd = np.array((7.844,), dtype=np.float32)
# tops: check configuration
if len(top) != len(self.tops):
raise Exception("Need to define {} tops for all outputs.")
# data layers have no bottoms
if len(bottom) != 0:
raise Exception("Do not define a bottom.")
# load indices for images and labels
split_f = '{}/{}.txt'.format(self.nyud_dir, self.split)
self.indices = open(split_f, 'r').read().splitlines()
self.idx = 0
# make eval deterministic
if 'train' not in self.split:
self.random = False
# randomization: seed and pick
if self.random:
random.seed(self.seed)
self.idx = random.randint(0, len(self.indices)-1)
def reshape(self, bottom, top):
# load data for tops and reshape tops to fit (1 is the batch dim)
for i, t in enumerate(self.tops):
self.data[t] = self.load(t, self.indices[self.idx])
top[i].reshape(1, *self.data[t].shape)
def forward(self, bottom, top):
# assign output
for i, t in enumerate(self.tops):
top[i].data[...] = self.data[t]
# pick next input
if self.random:
self.idx = random.randint(0, len(self.indices)-1)
else:
self.idx += 1
if self.idx == len(self.indices):
self.idx = 0
def backward(self, top, propagate_down, bottom):
pass
def load(self, top, idx):
if top == 'color':
return self.load_image(idx)
elif top == 'label':
return self.load_label(idx)
elif top == 'depth':
return self.load_depth(idx)
elif top == 'hha':
return self.load_hha(idx)
else:
raise Exception("Unknown output type: {}".format(top))
def load_image(self, idx):
"""
Load input image and preprocess for Caffe:
- cast to float
- switch channels RGB -> BGR
- subtract mean
- transpose to channel x height x width order
"""
im = Image.open('{}/data/images/img_{}.png'.format(self.nyud_dir, idx))
in_ = np.array(im, dtype=np.float32)
in_ = in_[:,:,::-1]
in_ -= self.mean_bgr
in_ = in_.transpose((2,0,1))
return in_
def load_depth(self, idx):
"""
Load pre-processed depth for NYUDv2 segmentation set.
"""
im = Image.open('{}/data/depth/img_{}.png'.format(self.nyud_dir, idx))
d = np.array(im, dtype=np.float32)
d = np.log(d)
d -= self.mean_logd
d = d[np.newaxis, ...]
return d
def load_hha(self, idx):
"""
Load HHA features from Gupta et al. ECCV14.
See https://github.com/s-gupta/rcnn-depth/blob/master/rcnn/saveHHA.m
"""
im = Image.open('{}/data/hha/img_{}.png'.format(self.nyud_dir, idx))
hha = np.array(im, dtype=np.float32)
hha -= self.mean_hha
hha = hha.transpose((2,0,1))
return hha
|
wkentaro/pytorch-fcn
|
torchfcn/ext/fcn.berkeleyvision.org/nyud_layers.py
|
NYUDSegDataLayer.load_depth
|
python
|
def load_depth(self, idx):
im = Image.open('{}/data/depth/img_{}.png'.format(self.nyud_dir, idx))
d = np.array(im, dtype=np.float32)
d = np.log(d)
d -= self.mean_logd
d = d[np.newaxis, ...]
return d
|
Load pre-processed depth for NYUDv2 segmentation set.
|
train
|
https://github.com/wkentaro/pytorch-fcn/blob/97189cbccb2c9b8bd776b356a1fd4b6c03f67d79/torchfcn/ext/fcn.berkeleyvision.org/nyud_layers.py#L136-L145
| null |
class NYUDSegDataLayer(caffe.Layer):
"""
Load (input image, label image) pairs from NYUDv2
one-at-a-time while reshaping the net to preserve dimensions.
The labels follow the 40 class task defined by
S. Gupta, R. Girshick, p. Arbelaez, and J. Malik. Learning rich features
from RGB-D images for object detection and segmentation. ECCV 2014.
with 0 as the void label and 1-40 the classes.
Use this to feed data to a fully convolutional network.
"""
def setup(self, bottom, top):
"""
Setup data layer according to parameters:
- nyud_dir: path to NYUDv2 dir
- split: train / val / test
- tops: list of tops to output from {color, depth, hha, label}
- randomize: load in random order (default: True)
- seed: seed for randomization (default: None / current time)
for NYUDv2 semantic segmentation.
example: params = dict(nyud_dir="/path/to/NYUDVOC2011", split="val",
tops=['color', 'hha', 'label'])
"""
# config
params = eval(self.param_str)
self.nyud_dir = params['nyud_dir']
self.split = params['split']
self.tops = params['tops']
self.random = params.get('randomize', True)
self.seed = params.get('seed', None)
# store top data for reshape + forward
self.data = {}
# means
self.mean_bgr = np.array((116.190, 97.203, 92.318), dtype=np.float32)
self.mean_hha = np.array((132.431, 94.076, 118.477), dtype=np.float32)
self.mean_logd = np.array((7.844,), dtype=np.float32)
# tops: check configuration
if len(top) != len(self.tops):
raise Exception("Need to define {} tops for all outputs.")
# data layers have no bottoms
if len(bottom) != 0:
raise Exception("Do not define a bottom.")
# load indices for images and labels
split_f = '{}/{}.txt'.format(self.nyud_dir, self.split)
self.indices = open(split_f, 'r').read().splitlines()
self.idx = 0
# make eval deterministic
if 'train' not in self.split:
self.random = False
# randomization: seed and pick
if self.random:
random.seed(self.seed)
self.idx = random.randint(0, len(self.indices)-1)
def reshape(self, bottom, top):
# load data for tops and reshape tops to fit (1 is the batch dim)
for i, t in enumerate(self.tops):
self.data[t] = self.load(t, self.indices[self.idx])
top[i].reshape(1, *self.data[t].shape)
def forward(self, bottom, top):
# assign output
for i, t in enumerate(self.tops):
top[i].data[...] = self.data[t]
# pick next input
if self.random:
self.idx = random.randint(0, len(self.indices)-1)
else:
self.idx += 1
if self.idx == len(self.indices):
self.idx = 0
def backward(self, top, propagate_down, bottom):
pass
def load(self, top, idx):
if top == 'color':
return self.load_image(idx)
elif top == 'label':
return self.load_label(idx)
elif top == 'depth':
return self.load_depth(idx)
elif top == 'hha':
return self.load_hha(idx)
else:
raise Exception("Unknown output type: {}".format(top))
def load_image(self, idx):
"""
Load input image and preprocess for Caffe:
- cast to float
- switch channels RGB -> BGR
- subtract mean
- transpose to channel x height x width order
"""
im = Image.open('{}/data/images/img_{}.png'.format(self.nyud_dir, idx))
in_ = np.array(im, dtype=np.float32)
in_ = in_[:,:,::-1]
in_ -= self.mean_bgr
in_ = in_.transpose((2,0,1))
return in_
def load_label(self, idx):
"""
Load label image as 1 x height x width integer array of label indices.
Shift labels so that classes are 0-39 and void is 255 (to ignore it).
The leading singleton dimension is required by the loss.
"""
label = scipy.io.loadmat('{}/segmentation/img_{}.mat'.format(self.nyud_dir, idx))['segmentation'].astype(np.uint8)
label -= 1 # rotate labels
label = label[np.newaxis, ...]
return label
def load_hha(self, idx):
"""
Load HHA features from Gupta et al. ECCV14.
See https://github.com/s-gupta/rcnn-depth/blob/master/rcnn/saveHHA.m
"""
im = Image.open('{}/data/hha/img_{}.png'.format(self.nyud_dir, idx))
hha = np.array(im, dtype=np.float32)
hha -= self.mean_hha
hha = hha.transpose((2,0,1))
return hha
|
wkentaro/pytorch-fcn
|
torchfcn/ext/fcn.berkeleyvision.org/nyud_layers.py
|
NYUDSegDataLayer.load_hha
|
python
|
def load_hha(self, idx):
im = Image.open('{}/data/hha/img_{}.png'.format(self.nyud_dir, idx))
hha = np.array(im, dtype=np.float32)
hha -= self.mean_hha
hha = hha.transpose((2,0,1))
return hha
|
Load HHA features from Gupta et al. ECCV14.
See https://github.com/s-gupta/rcnn-depth/blob/master/rcnn/saveHHA.m
|
train
|
https://github.com/wkentaro/pytorch-fcn/blob/97189cbccb2c9b8bd776b356a1fd4b6c03f67d79/torchfcn/ext/fcn.berkeleyvision.org/nyud_layers.py#L147-L156
| null |
class NYUDSegDataLayer(caffe.Layer):
"""
Load (input image, label image) pairs from NYUDv2
one-at-a-time while reshaping the net to preserve dimensions.
The labels follow the 40 class task defined by
S. Gupta, R. Girshick, p. Arbelaez, and J. Malik. Learning rich features
from RGB-D images for object detection and segmentation. ECCV 2014.
with 0 as the void label and 1-40 the classes.
Use this to feed data to a fully convolutional network.
"""
def setup(self, bottom, top):
"""
Setup data layer according to parameters:
- nyud_dir: path to NYUDv2 dir
- split: train / val / test
- tops: list of tops to output from {color, depth, hha, label}
- randomize: load in random order (default: True)
- seed: seed for randomization (default: None / current time)
for NYUDv2 semantic segmentation.
example: params = dict(nyud_dir="/path/to/NYUDVOC2011", split="val",
tops=['color', 'hha', 'label'])
"""
# config
params = eval(self.param_str)
self.nyud_dir = params['nyud_dir']
self.split = params['split']
self.tops = params['tops']
self.random = params.get('randomize', True)
self.seed = params.get('seed', None)
# store top data for reshape + forward
self.data = {}
# means
self.mean_bgr = np.array((116.190, 97.203, 92.318), dtype=np.float32)
self.mean_hha = np.array((132.431, 94.076, 118.477), dtype=np.float32)
self.mean_logd = np.array((7.844,), dtype=np.float32)
# tops: check configuration
if len(top) != len(self.tops):
raise Exception("Need to define {} tops for all outputs.")
# data layers have no bottoms
if len(bottom) != 0:
raise Exception("Do not define a bottom.")
# load indices for images and labels
split_f = '{}/{}.txt'.format(self.nyud_dir, self.split)
self.indices = open(split_f, 'r').read().splitlines()
self.idx = 0
# make eval deterministic
if 'train' not in self.split:
self.random = False
# randomization: seed and pick
if self.random:
random.seed(self.seed)
self.idx = random.randint(0, len(self.indices)-1)
def reshape(self, bottom, top):
# load data for tops and reshape tops to fit (1 is the batch dim)
for i, t in enumerate(self.tops):
self.data[t] = self.load(t, self.indices[self.idx])
top[i].reshape(1, *self.data[t].shape)
def forward(self, bottom, top):
# assign output
for i, t in enumerate(self.tops):
top[i].data[...] = self.data[t]
# pick next input
if self.random:
self.idx = random.randint(0, len(self.indices)-1)
else:
self.idx += 1
if self.idx == len(self.indices):
self.idx = 0
def backward(self, top, propagate_down, bottom):
pass
def load(self, top, idx):
if top == 'color':
return self.load_image(idx)
elif top == 'label':
return self.load_label(idx)
elif top == 'depth':
return self.load_depth(idx)
elif top == 'hha':
return self.load_hha(idx)
else:
raise Exception("Unknown output type: {}".format(top))
def load_image(self, idx):
"""
Load input image and preprocess for Caffe:
- cast to float
- switch channels RGB -> BGR
- subtract mean
- transpose to channel x height x width order
"""
im = Image.open('{}/data/images/img_{}.png'.format(self.nyud_dir, idx))
in_ = np.array(im, dtype=np.float32)
in_ = in_[:,:,::-1]
in_ -= self.mean_bgr
in_ = in_.transpose((2,0,1))
return in_
def load_label(self, idx):
"""
Load label image as 1 x height x width integer array of label indices.
Shift labels so that classes are 0-39 and void is 255 (to ignore it).
The leading singleton dimension is required by the loss.
"""
label = scipy.io.loadmat('{}/segmentation/img_{}.mat'.format(self.nyud_dir, idx))['segmentation'].astype(np.uint8)
label -= 1 # rotate labels
label = label[np.newaxis, ...]
return label
def load_depth(self, idx):
"""
Load pre-processed depth for NYUDv2 segmentation set.
"""
im = Image.open('{}/data/depth/img_{}.png'.format(self.nyud_dir, idx))
d = np.array(im, dtype=np.float32)
d = np.log(d)
d -= self.mean_logd
d = d[np.newaxis, ...]
return d
|
wkentaro/pytorch-fcn
|
torchfcn/ext/fcn.berkeleyvision.org/siftflow_layers.py
|
SIFTFlowSegDataLayer.load_image
|
python
|
def load_image(self, idx):
im = Image.open('{}/Images/spatial_envelope_256x256_static_8outdoorcategories/{}.jpg'.format(self.siftflow_dir, idx))
in_ = np.array(im, dtype=np.float32)
in_ = in_[:,:,::-1]
in_ -= self.mean
in_ = in_.transpose((2,0,1))
return in_
|
Load input image and preprocess for Caffe:
- cast to float
- switch channels RGB -> BGR
- subtract mean
- transpose to channel x height x width order
|
train
|
https://github.com/wkentaro/pytorch-fcn/blob/97189cbccb2c9b8bd776b356a1fd4b6c03f67d79/torchfcn/ext/fcn.berkeleyvision.org/siftflow_layers.py#L92-L105
| null |
class SIFTFlowSegDataLayer(caffe.Layer):
"""
Load (input image, label image) pairs from SIFT Flow
one-at-a-time while reshaping the net to preserve dimensions.
This data layer has three tops:
1. the data, pre-processed
2. the semantic labels 0-32 and void 255
3. the geometric labels 0-2 and void 255
Use this to feed data to a fully convolutional network.
"""
def setup(self, bottom, top):
"""
Setup data layer according to parameters:
- siftflow_dir: path to SIFT Flow dir
- split: train / val / test
- randomize: load in random order (default: True)
- seed: seed for randomization (default: None / current time)
for semantic segmentation of object and geometric classes.
example: params = dict(siftflow_dir="/path/to/siftflow", split="val")
"""
# config
params = eval(self.param_str)
self.siftflow_dir = params['siftflow_dir']
self.split = params['split']
self.mean = np.array((114.578, 115.294, 108.353), dtype=np.float32)
self.random = params.get('randomize', True)
self.seed = params.get('seed', None)
# three tops: data, semantic, geometric
if len(top) != 3:
raise Exception("Need to define three tops: data, semantic label, and geometric label.")
# data layers have no bottoms
if len(bottom) != 0:
raise Exception("Do not define a bottom.")
# load indices for images and labels
split_f = '{}/{}.txt'.format(self.siftflow_dir, self.split)
self.indices = open(split_f, 'r').read().splitlines()
self.idx = 0
# make eval deterministic
if 'train' not in self.split:
self.random = False
# randomization: seed and pick
if self.random:
random.seed(self.seed)
self.idx = random.randint(0, len(self.indices)-1)
def reshape(self, bottom, top):
# load image + label image pair
self.data = self.load_image(self.indices[self.idx])
self.label_semantic = self.load_label(self.indices[self.idx], label_type='semantic')
self.label_geometric = self.load_label(self.indices[self.idx], label_type='geometric')
# reshape tops to fit (leading 1 is for batch dimension)
top[0].reshape(1, *self.data.shape)
top[1].reshape(1, *self.label_semantic.shape)
top[2].reshape(1, *self.label_geometric.shape)
def forward(self, bottom, top):
# assign output
top[0].data[...] = self.data
top[1].data[...] = self.label_semantic
top[2].data[...] = self.label_geometric
# pick next input
if self.random:
self.idx = random.randint(0, len(self.indices)-1)
else:
self.idx += 1
if self.idx == len(self.indices):
self.idx = 0
def backward(self, top, propagate_down, bottom):
pass
def load_label(self, idx, label_type=None):
"""
Load label image as 1 x height x width integer array of label indices.
The leading singleton dimension is required by the loss.
"""
if label_type == 'semantic':
label = scipy.io.loadmat('{}/SemanticLabels/spatial_envelope_256x256_static_8outdoorcategories/{}.mat'.format(self.siftflow_dir, idx))['S']
elif label_type == 'geometric':
label = scipy.io.loadmat('{}/GeoLabels/spatial_envelope_256x256_static_8outdoorcategories/{}.mat'.format(self.siftflow_dir, idx))['S']
label[label == -1] = 0
else:
raise Exception("Unknown label type: {}. Pick semantic or geometric.".format(label_type))
label = label.astype(np.uint8)
label -= 1 # rotate labels so classes start at 0, void is 255
label = label[np.newaxis, ...]
return label.copy()
|
wkentaro/pytorch-fcn
|
torchfcn/ext/fcn.berkeleyvision.org/siftflow_layers.py
|
SIFTFlowSegDataLayer.load_label
|
python
|
def load_label(self, idx, label_type=None):
if label_type == 'semantic':
label = scipy.io.loadmat('{}/SemanticLabels/spatial_envelope_256x256_static_8outdoorcategories/{}.mat'.format(self.siftflow_dir, idx))['S']
elif label_type == 'geometric':
label = scipy.io.loadmat('{}/GeoLabels/spatial_envelope_256x256_static_8outdoorcategories/{}.mat'.format(self.siftflow_dir, idx))['S']
label[label == -1] = 0
else:
raise Exception("Unknown label type: {}. Pick semantic or geometric.".format(label_type))
label = label.astype(np.uint8)
label -= 1 # rotate labels so classes start at 0, void is 255
label = label[np.newaxis, ...]
return label.copy()
|
Load label image as 1 x height x width integer array of label indices.
The leading singleton dimension is required by the loss.
|
train
|
https://github.com/wkentaro/pytorch-fcn/blob/97189cbccb2c9b8bd776b356a1fd4b6c03f67d79/torchfcn/ext/fcn.berkeleyvision.org/siftflow_layers.py#L107-L122
| null |
class SIFTFlowSegDataLayer(caffe.Layer):
"""
Load (input image, label image) pairs from SIFT Flow
one-at-a-time while reshaping the net to preserve dimensions.
This data layer has three tops:
1. the data, pre-processed
2. the semantic labels 0-32 and void 255
3. the geometric labels 0-2 and void 255
Use this to feed data to a fully convolutional network.
"""
def setup(self, bottom, top):
"""
Setup data layer according to parameters:
- siftflow_dir: path to SIFT Flow dir
- split: train / val / test
- randomize: load in random order (default: True)
- seed: seed for randomization (default: None / current time)
for semantic segmentation of object and geometric classes.
example: params = dict(siftflow_dir="/path/to/siftflow", split="val")
"""
# config
params = eval(self.param_str)
self.siftflow_dir = params['siftflow_dir']
self.split = params['split']
self.mean = np.array((114.578, 115.294, 108.353), dtype=np.float32)
self.random = params.get('randomize', True)
self.seed = params.get('seed', None)
# three tops: data, semantic, geometric
if len(top) != 3:
raise Exception("Need to define three tops: data, semantic label, and geometric label.")
# data layers have no bottoms
if len(bottom) != 0:
raise Exception("Do not define a bottom.")
# load indices for images and labels
split_f = '{}/{}.txt'.format(self.siftflow_dir, self.split)
self.indices = open(split_f, 'r').read().splitlines()
self.idx = 0
# make eval deterministic
if 'train' not in self.split:
self.random = False
# randomization: seed and pick
if self.random:
random.seed(self.seed)
self.idx = random.randint(0, len(self.indices)-1)
def reshape(self, bottom, top):
# load image + label image pair
self.data = self.load_image(self.indices[self.idx])
self.label_semantic = self.load_label(self.indices[self.idx], label_type='semantic')
self.label_geometric = self.load_label(self.indices[self.idx], label_type='geometric')
# reshape tops to fit (leading 1 is for batch dimension)
top[0].reshape(1, *self.data.shape)
top[1].reshape(1, *self.label_semantic.shape)
top[2].reshape(1, *self.label_geometric.shape)
def forward(self, bottom, top):
# assign output
top[0].data[...] = self.data
top[1].data[...] = self.label_semantic
top[2].data[...] = self.label_geometric
# pick next input
if self.random:
self.idx = random.randint(0, len(self.indices)-1)
else:
self.idx += 1
if self.idx == len(self.indices):
self.idx = 0
def backward(self, top, propagate_down, bottom):
pass
def load_image(self, idx):
"""
Load input image and preprocess for Caffe:
- cast to float
- switch channels RGB -> BGR
- subtract mean
- transpose to channel x height x width order
"""
im = Image.open('{}/Images/spatial_envelope_256x256_static_8outdoorcategories/{}.jpg'.format(self.siftflow_dir, idx))
in_ = np.array(im, dtype=np.float32)
in_ = in_[:,:,::-1]
in_ -= self.mean
in_ = in_.transpose((2,0,1))
return in_
|
wkentaro/pytorch-fcn
|
torchfcn/models/fcn32s.py
|
get_upsampling_weight
|
python
|
def get_upsampling_weight(in_channels, out_channels, kernel_size):
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size),
dtype=np.float64)
weight[range(in_channels), range(out_channels), :, :] = filt
return torch.from_numpy(weight).float()
|
Make a 2D bilinear kernel suitable for upsampling
|
train
|
https://github.com/wkentaro/pytorch-fcn/blob/97189cbccb2c9b8bd776b356a1fd4b6c03f67d79/torchfcn/models/fcn32s.py#L10-L23
| null |
import os.path as osp
import fcn
import numpy as np
import torch
import torch.nn as nn
# https://github.com/shelhamer/fcn.berkeleyvision.org/blob/master/surgery.py
class FCN32s(nn.Module):
pretrained_model = \
osp.expanduser('~/data/models/pytorch/fcn32s_from_caffe.pth')
@classmethod
def download(cls):
return fcn.data.cached_download(
url='http://drive.google.com/uc?id=0B9P1L--7Wd2vM2oya3k0Zlgtekk',
path=cls.pretrained_model,
md5='8acf386d722dc3484625964cbe2aba49',
)
def __init__(self, n_class=21):
super(FCN32s, self).__init__()
# conv1
self.conv1_1 = nn.Conv2d(3, 64, 3, padding=100)
self.relu1_1 = nn.ReLU(inplace=True)
self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1)
self.relu1_2 = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/2
# conv2
self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1)
self.relu2_1 = nn.ReLU(inplace=True)
self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1)
self.relu2_2 = nn.ReLU(inplace=True)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/4
# conv3
self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1)
self.relu3_1 = nn.ReLU(inplace=True)
self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_2 = nn.ReLU(inplace=True)
self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_3 = nn.ReLU(inplace=True)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/8
# conv4
self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1)
self.relu4_1 = nn.ReLU(inplace=True)
self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_2 = nn.ReLU(inplace=True)
self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_3 = nn.ReLU(inplace=True)
self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/16
# conv5
self.conv5_1 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_1 = nn.ReLU(inplace=True)
self.conv5_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_2 = nn.ReLU(inplace=True)
self.conv5_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_3 = nn.ReLU(inplace=True)
self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/32
# fc6
self.fc6 = nn.Conv2d(512, 4096, 7)
self.relu6 = nn.ReLU(inplace=True)
self.drop6 = nn.Dropout2d()
# fc7
self.fc7 = nn.Conv2d(4096, 4096, 1)
self.relu7 = nn.ReLU(inplace=True)
self.drop7 = nn.Dropout2d()
self.score_fr = nn.Conv2d(4096, n_class, 1)
self.upscore = nn.ConvTranspose2d(n_class, n_class, 64, stride=32,
bias=False)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.zero_()
if m.bias is not None:
m.bias.data.zero_()
if isinstance(m, nn.ConvTranspose2d):
assert m.kernel_size[0] == m.kernel_size[1]
initial_weight = get_upsampling_weight(
m.in_channels, m.out_channels, m.kernel_size[0])
m.weight.data.copy_(initial_weight)
def forward(self, x):
h = x
h = self.relu1_1(self.conv1_1(h))
h = self.relu1_2(self.conv1_2(h))
h = self.pool1(h)
h = self.relu2_1(self.conv2_1(h))
h = self.relu2_2(self.conv2_2(h))
h = self.pool2(h)
h = self.relu3_1(self.conv3_1(h))
h = self.relu3_2(self.conv3_2(h))
h = self.relu3_3(self.conv3_3(h))
h = self.pool3(h)
h = self.relu4_1(self.conv4_1(h))
h = self.relu4_2(self.conv4_2(h))
h = self.relu4_3(self.conv4_3(h))
h = self.pool4(h)
h = self.relu5_1(self.conv5_1(h))
h = self.relu5_2(self.conv5_2(h))
h = self.relu5_3(self.conv5_3(h))
h = self.pool5(h)
h = self.relu6(self.fc6(h))
h = self.drop6(h)
h = self.relu7(self.fc7(h))
h = self.drop7(h)
h = self.score_fr(h)
h = self.upscore(h)
h = h[:, :, 19:19 + x.size()[2], 19:19 + x.size()[3]].contiguous()
return h
def copy_params_from_vgg16(self, vgg16):
features = [
self.conv1_1, self.relu1_1,
self.conv1_2, self.relu1_2,
self.pool1,
self.conv2_1, self.relu2_1,
self.conv2_2, self.relu2_2,
self.pool2,
self.conv3_1, self.relu3_1,
self.conv3_2, self.relu3_2,
self.conv3_3, self.relu3_3,
self.pool3,
self.conv4_1, self.relu4_1,
self.conv4_2, self.relu4_2,
self.conv4_3, self.relu4_3,
self.pool4,
self.conv5_1, self.relu5_1,
self.conv5_2, self.relu5_2,
self.conv5_3, self.relu5_3,
self.pool5,
]
for l1, l2 in zip(vgg16.features, features):
if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):
assert l1.weight.size() == l2.weight.size()
assert l1.bias.size() == l2.bias.size()
l2.weight.data = l1.weight.data
l2.bias.data = l1.bias.data
for i, name in zip([0, 3], ['fc6', 'fc7']):
l1 = vgg16.classifier[i]
l2 = getattr(self, name)
l2.weight.data = l1.weight.data.view(l2.weight.size())
l2.bias.data = l1.bias.data.view(l2.bias.size())
|
wkentaro/pytorch-fcn
|
torchfcn/utils.py
|
label_accuracy_score
|
python
|
def label_accuracy_score(label_trues, label_preds, n_class):
hist = np.zeros((n_class, n_class))
for lt, lp in zip(label_trues, label_preds):
hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)
acc = np.diag(hist).sum() / hist.sum()
with np.errstate(divide='ignore', invalid='ignore'):
acc_cls = np.diag(hist) / hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
with np.errstate(divide='ignore', invalid='ignore'):
iu = np.diag(hist) / (
hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)
)
mean_iu = np.nanmean(iu)
freq = hist.sum(axis=1) / hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
return acc, acc_cls, mean_iu, fwavacc
|
Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
|
train
|
https://github.com/wkentaro/pytorch-fcn/blob/97189cbccb2c9b8bd776b356a1fd4b6c03f67d79/torchfcn/utils.py#L12-L34
|
[
"def _fast_hist(label_true, label_pred, n_class):\n mask = (label_true >= 0) & (label_true < n_class)\n hist = np.bincount(\n n_class * label_true[mask].astype(int) +\n label_pred[mask], minlength=n_class ** 2).reshape(n_class, n_class)\n return hist\n"
] |
import numpy as np
def _fast_hist(label_true, label_pred, n_class):
mask = (label_true >= 0) & (label_true < n_class)
hist = np.bincount(
n_class * label_true[mask].astype(int) +
label_pred[mask], minlength=n_class ** 2).reshape(n_class, n_class)
return hist
|
wkentaro/pytorch-fcn
|
torchfcn/ext/fcn.berkeleyvision.org/surgery.py
|
transplant
|
python
|
def transplant(new_net, net, suffix=''):
for p in net.params:
p_new = p + suffix
if p_new not in new_net.params:
print 'dropping', p
continue
for i in range(len(net.params[p])):
if i > (len(new_net.params[p_new]) - 1):
print 'dropping', p, i
break
if net.params[p][i].data.shape != new_net.params[p_new][i].data.shape:
print 'coercing', p, i, 'from', net.params[p][i].data.shape, 'to', new_net.params[p_new][i].data.shape
else:
print 'copying', p, ' -> ', p_new, i
new_net.params[p_new][i].data.flat = net.params[p][i].data.flat
|
Transfer weights by copying matching parameters, coercing parameters of
incompatible shape, and dropping unmatched parameters.
The coercion is useful to convert fully connected layers to their
equivalent convolutional layers, since the weights are the same and only
the shapes are different. In particular, equivalent fully connected and
convolution layers have shapes O x I and O x I x H x W respectively for O
outputs channels, I input channels, H kernel height, and W kernel width.
Both `net` to `new_net` arguments must be instantiated `caffe.Net`s.
|
train
|
https://github.com/wkentaro/pytorch-fcn/blob/97189cbccb2c9b8bd776b356a1fd4b6c03f67d79/torchfcn/ext/fcn.berkeleyvision.org/surgery.py#L5-L31
| null |
from __future__ import division
import caffe
import numpy as np
def upsample_filt(size):
"""
Make a 2D bilinear kernel suitable for upsampling of the given (h, w) size.
"""
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
def interp(net, layers):
"""
Set weights of each layer in layers to bilinear kernels for interpolation.
"""
for l in layers:
m, k, h, w = net.params[l][0].data.shape
if m != k and k != 1:
print 'input + output channels need to be the same or |output| == 1'
raise
if h != w:
print 'filters need to be square'
raise
filt = upsample_filt(h)
net.params[l][0].data[range(m), range(k), :, :] = filt
def expand_score(new_net, new_layer, net, layer):
"""
Transplant an old score layer's parameters, with k < k' classes, into a new
score layer with k classes s.t. the first k' are the old classes.
"""
old_cl = net.params[layer][0].num
new_net.params[new_layer][0].data[:old_cl][...] = net.params[layer][0].data
new_net.params[new_layer][1].data[0,0,0,:old_cl][...] = net.params[layer][1].data
|
wkentaro/pytorch-fcn
|
torchfcn/ext/fcn.berkeleyvision.org/surgery.py
|
interp
|
python
|
def interp(net, layers):
for l in layers:
m, k, h, w = net.params[l][0].data.shape
if m != k and k != 1:
print 'input + output channels need to be the same or |output| == 1'
raise
if h != w:
print 'filters need to be square'
raise
filt = upsample_filt(h)
net.params[l][0].data[range(m), range(k), :, :] = filt
|
Set weights of each layer in layers to bilinear kernels for interpolation.
|
train
|
https://github.com/wkentaro/pytorch-fcn/blob/97189cbccb2c9b8bd776b356a1fd4b6c03f67d79/torchfcn/ext/fcn.berkeleyvision.org/surgery.py#L46-L59
|
[
"def upsample_filt(size):\n \"\"\"\n Make a 2D bilinear kernel suitable for upsampling of the given (h, w) size.\n \"\"\"\n factor = (size + 1) // 2\n if size % 2 == 1:\n center = factor - 1\n else:\n center = factor - 0.5\n og = np.ogrid[:size, :size]\n return (1 - abs(og[0] - center) / factor) * \\\n (1 - abs(og[1] - center) / factor)\n"
] |
from __future__ import division
import caffe
import numpy as np
def transplant(new_net, net, suffix=''):
"""
Transfer weights by copying matching parameters, coercing parameters of
incompatible shape, and dropping unmatched parameters.
The coercion is useful to convert fully connected layers to their
equivalent convolutional layers, since the weights are the same and only
the shapes are different. In particular, equivalent fully connected and
convolution layers have shapes O x I and O x I x H x W respectively for O
outputs channels, I input channels, H kernel height, and W kernel width.
Both `net` to `new_net` arguments must be instantiated `caffe.Net`s.
"""
for p in net.params:
p_new = p + suffix
if p_new not in new_net.params:
print 'dropping', p
continue
for i in range(len(net.params[p])):
if i > (len(new_net.params[p_new]) - 1):
print 'dropping', p, i
break
if net.params[p][i].data.shape != new_net.params[p_new][i].data.shape:
print 'coercing', p, i, 'from', net.params[p][i].data.shape, 'to', new_net.params[p_new][i].data.shape
else:
print 'copying', p, ' -> ', p_new, i
new_net.params[p_new][i].data.flat = net.params[p][i].data.flat
def upsample_filt(size):
"""
Make a 2D bilinear kernel suitable for upsampling of the given (h, w) size.
"""
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
def expand_score(new_net, new_layer, net, layer):
"""
Transplant an old score layer's parameters, with k < k' classes, into a new
score layer with k classes s.t. the first k' are the old classes.
"""
old_cl = net.params[layer][0].num
new_net.params[new_layer][0].data[:old_cl][...] = net.params[layer][0].data
new_net.params[new_layer][1].data[0,0,0,:old_cl][...] = net.params[layer][1].data
|
wkentaro/pytorch-fcn
|
torchfcn/ext/fcn.berkeleyvision.org/surgery.py
|
expand_score
|
python
|
def expand_score(new_net, new_layer, net, layer):
old_cl = net.params[layer][0].num
new_net.params[new_layer][0].data[:old_cl][...] = net.params[layer][0].data
new_net.params[new_layer][1].data[0,0,0,:old_cl][...] = net.params[layer][1].data
|
Transplant an old score layer's parameters, with k < k' classes, into a new
score layer with k classes s.t. the first k' are the old classes.
|
train
|
https://github.com/wkentaro/pytorch-fcn/blob/97189cbccb2c9b8bd776b356a1fd4b6c03f67d79/torchfcn/ext/fcn.berkeleyvision.org/surgery.py#L61-L68
| null |
from __future__ import division
import caffe
import numpy as np
def transplant(new_net, net, suffix=''):
"""
Transfer weights by copying matching parameters, coercing parameters of
incompatible shape, and dropping unmatched parameters.
The coercion is useful to convert fully connected layers to their
equivalent convolutional layers, since the weights are the same and only
the shapes are different. In particular, equivalent fully connected and
convolution layers have shapes O x I and O x I x H x W respectively for O
outputs channels, I input channels, H kernel height, and W kernel width.
Both `net` to `new_net` arguments must be instantiated `caffe.Net`s.
"""
for p in net.params:
p_new = p + suffix
if p_new not in new_net.params:
print 'dropping', p
continue
for i in range(len(net.params[p])):
if i > (len(new_net.params[p_new]) - 1):
print 'dropping', p, i
break
if net.params[p][i].data.shape != new_net.params[p_new][i].data.shape:
print 'coercing', p, i, 'from', net.params[p][i].data.shape, 'to', new_net.params[p_new][i].data.shape
else:
print 'copying', p, ' -> ', p_new, i
new_net.params[p_new][i].data.flat = net.params[p][i].data.flat
def upsample_filt(size):
"""
Make a 2D bilinear kernel suitable for upsampling of the given (h, w) size.
"""
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
def interp(net, layers):
"""
Set weights of each layer in layers to bilinear kernels for interpolation.
"""
for l in layers:
m, k, h, w = net.params[l][0].data.shape
if m != k and k != 1:
print 'input + output channels need to be the same or |output| == 1'
raise
if h != w:
print 'filters need to be square'
raise
filt = upsample_filt(h)
net.params[l][0].data[range(m), range(k), :, :] = filt
|
wkentaro/pytorch-fcn
|
torchfcn/ext/fcn.berkeleyvision.org/voc_layers.py
|
VOCSegDataLayer.setup
|
python
|
def setup(self, bottom, top):
# config
params = eval(self.param_str)
self.voc_dir = params['voc_dir']
self.split = params['split']
self.mean = np.array(params['mean'])
self.random = params.get('randomize', True)
self.seed = params.get('seed', None)
# two tops: data and label
if len(top) != 2:
raise Exception("Need to define two tops: data and label.")
# data layers have no bottoms
if len(bottom) != 0:
raise Exception("Do not define a bottom.")
# load indices for images and labels
split_f = '{}/ImageSets/Segmentation/{}.txt'.format(self.voc_dir,
self.split)
self.indices = open(split_f, 'r').read().splitlines()
self.idx = 0
# make eval deterministic
if 'train' not in self.split:
self.random = False
# randomization: seed and pick
if self.random:
random.seed(self.seed)
self.idx = random.randint(0, len(self.indices)-1)
|
Setup data layer according to parameters:
- voc_dir: path to PASCAL VOC year dir
- split: train / val / test
- mean: tuple of mean values to subtract
- randomize: load in random order (default: True)
- seed: seed for randomization (default: None / current time)
for PASCAL VOC semantic segmentation.
example
params = dict(voc_dir="/path/to/PASCAL/VOC2011",
mean=(104.00698793, 116.66876762, 122.67891434),
split="val")
|
train
|
https://github.com/wkentaro/pytorch-fcn/blob/97189cbccb2c9b8bd776b356a1fd4b6c03f67d79/torchfcn/ext/fcn.berkeleyvision.org/voc_layers.py#L16-L62
| null |
class VOCSegDataLayer(caffe.Layer):
"""
Load (input image, label image) pairs from PASCAL VOC
one-at-a-time while reshaping the net to preserve dimensions.
Use this to feed data to a fully convolutional network.
"""
def reshape(self, bottom, top):
# load image + label image pair
self.data = self.load_image(self.indices[self.idx])
self.label = self.load_label(self.indices[self.idx])
# reshape tops to fit (leading 1 is for batch dimension)
top[0].reshape(1, *self.data.shape)
top[1].reshape(1, *self.label.shape)
def forward(self, bottom, top):
# assign output
top[0].data[...] = self.data
top[1].data[...] = self.label
# pick next input
if self.random:
self.idx = random.randint(0, len(self.indices)-1)
else:
self.idx += 1
if self.idx == len(self.indices):
self.idx = 0
def backward(self, top, propagate_down, bottom):
pass
def load_image(self, idx):
"""
Load input image and preprocess for Caffe:
- cast to float
- switch channels RGB -> BGR
- subtract mean
- transpose to channel x height x width order
"""
im = Image.open('{}/JPEGImages/{}.jpg'.format(self.voc_dir, idx))
in_ = np.array(im, dtype=np.float32)
in_ = in_[:,:,::-1]
in_ -= self.mean
in_ = in_.transpose((2,0,1))
return in_
def load_label(self, idx):
"""
Load label image as 1 x height x width integer array of label indices.
The leading singleton dimension is required by the loss.
"""
im = Image.open('{}/SegmentationClass/{}.png'.format(self.voc_dir, idx))
label = np.array(im, dtype=np.uint8)
label = label[np.newaxis, ...]
return label
|
wkentaro/pytorch-fcn
|
torchfcn/ext/fcn.berkeleyvision.org/pascalcontext_layers.py
|
PASCALContextSegDataLayer.setup
|
python
|
def setup(self, bottom, top):
# config
params = eval(self.param_str)
self.voc_dir = params['voc_dir'] + '/VOC2010'
self.context_dir = params['context_dir']
self.split = params['split']
self.mean = np.array((104.007, 116.669, 122.679), dtype=np.float32)
self.random = params.get('randomize', True)
self.seed = params.get('seed', None)
# load labels and resolve inconsistencies by mapping to full 400 labels
self.labels_400 = [label.replace(' ','') for idx, label in np.genfromtxt(self.context_dir + '/labels.txt', delimiter=':', dtype=None)]
self.labels_59 = [label.replace(' ','') for idx, label in np.genfromtxt(self.context_dir + '/59_labels.txt', delimiter=':', dtype=None)]
for main_label, task_label in zip(('table', 'bedclothes', 'cloth'), ('diningtable', 'bedcloth', 'clothes')):
self.labels_59[self.labels_59.index(task_label)] = main_label
# two tops: data and label
if len(top) != 2:
raise Exception("Need to define two tops: data and label.")
# data layers have no bottoms
if len(bottom) != 0:
raise Exception("Do not define a bottom.")
# load indices for images and labels
split_f = '{}/ImageSets/Main/{}.txt'.format(self.voc_dir,
self.split)
self.indices = open(split_f, 'r').read().splitlines()
self.idx = 0
# make eval deterministic
if 'train' not in self.split:
self.random = False
# randomization: seed and pick
if self.random:
random.seed(self.seed)
self.idx = random.randint(0, len(self.indices)-1)
|
Setup data layer according to parameters:
- voc_dir: path to PASCAL VOC dir (must contain 2010)
- context_dir: path to PASCAL-Context annotations
- split: train / val / test
- randomize: load in random order (default: True)
- seed: seed for randomization (default: None / current time)
for PASCAL-Context semantic segmentation.
example: params = dict(voc_dir="/path/to/PASCAL", split="val")
|
train
|
https://github.com/wkentaro/pytorch-fcn/blob/97189cbccb2c9b8bd776b356a1fd4b6c03f67d79/torchfcn/ext/fcn.berkeleyvision.org/pascalcontext_layers.py#L23-L72
| null |
class PASCALContextSegDataLayer(caffe.Layer):
"""
Load (input image, label image) pairs from PASCAL-Context
one-at-a-time while reshaping the net to preserve dimensions.
The labels follow the 59 class task defined by
R. Mottaghi, X. Chen, X. Liu, N.-G. Cho, S.-W. Lee, S. Fidler, R.
Urtasun, and A. Yuille. The Role of Context for Object Detection and
Semantic Segmentation in the Wild. CVPR 2014.
Use this to feed data to a fully convolutional network.
"""
def reshape(self, bottom, top):
# load image + label image pair
self.data = self.load_image(self.indices[self.idx])
self.label = self.load_label(self.indices[self.idx])
# reshape tops to fit (leading 1 is for batch dimension)
top[0].reshape(1, *self.data.shape)
top[1].reshape(1, *self.label.shape)
def forward(self, bottom, top):
# assign output
top[0].data[...] = self.data
top[1].data[...] = self.label
# pick next input
if self.random:
self.idx = random.randint(0, len(self.indices)-1)
else:
self.idx += 1
if self.idx == len(self.indices):
self.idx = 0
def backward(self, top, propagate_down, bottom):
pass
def load_image(self, idx):
"""
Load input image and preprocess for Caffe:
- cast to float
- switch channels RGB -> BGR
- subtract mean
- transpose to channel x height x width order
"""
im = Image.open('{}/JPEGImages/{}.jpg'.format(self.voc_dir, idx))
in_ = np.array(im, dtype=np.float32)
in_ = in_[:,:,::-1]
in_ -= self.mean
in_ = in_.transpose((2,0,1))
return in_
def load_label(self, idx):
"""
Load label image as 1 x height x width integer array of label indices.
The leading singleton dimension is required by the loss.
The full 400 labels are translated to the 59 class task labels.
"""
label_400 = scipy.io.loadmat('{}/trainval/{}.mat'.format(self.context_dir, idx))['LabelMap']
label = np.zeros_like(label_400, dtype=np.uint8)
for idx, l in enumerate(self.labels_59):
idx_400 = self.labels_400.index(l) + 1
label[label_400 == idx_400] = idx + 1
label = label[np.newaxis, ...]
return label
|
wkentaro/pytorch-fcn
|
torchfcn/ext/fcn.berkeleyvision.org/pascalcontext_layers.py
|
PASCALContextSegDataLayer.load_label
|
python
|
def load_label(self, idx):
label_400 = scipy.io.loadmat('{}/trainval/{}.mat'.format(self.context_dir, idx))['LabelMap']
label = np.zeros_like(label_400, dtype=np.uint8)
for idx, l in enumerate(self.labels_59):
idx_400 = self.labels_400.index(l) + 1
label[label_400 == idx_400] = idx + 1
label = label[np.newaxis, ...]
return label
|
Load label image as 1 x height x width integer array of label indices.
The leading singleton dimension is required by the loss.
The full 400 labels are translated to the 59 class task labels.
|
train
|
https://github.com/wkentaro/pytorch-fcn/blob/97189cbccb2c9b8bd776b356a1fd4b6c03f67d79/torchfcn/ext/fcn.berkeleyvision.org/pascalcontext_layers.py#L113-L125
| null |
class PASCALContextSegDataLayer(caffe.Layer):
"""
Load (input image, label image) pairs from PASCAL-Context
one-at-a-time while reshaping the net to preserve dimensions.
The labels follow the 59 class task defined by
R. Mottaghi, X. Chen, X. Liu, N.-G. Cho, S.-W. Lee, S. Fidler, R.
Urtasun, and A. Yuille. The Role of Context for Object Detection and
Semantic Segmentation in the Wild. CVPR 2014.
Use this to feed data to a fully convolutional network.
"""
def setup(self, bottom, top):
"""
Setup data layer according to parameters:
- voc_dir: path to PASCAL VOC dir (must contain 2010)
- context_dir: path to PASCAL-Context annotations
- split: train / val / test
- randomize: load in random order (default: True)
- seed: seed for randomization (default: None / current time)
for PASCAL-Context semantic segmentation.
example: params = dict(voc_dir="/path/to/PASCAL", split="val")
"""
# config
params = eval(self.param_str)
self.voc_dir = params['voc_dir'] + '/VOC2010'
self.context_dir = params['context_dir']
self.split = params['split']
self.mean = np.array((104.007, 116.669, 122.679), dtype=np.float32)
self.random = params.get('randomize', True)
self.seed = params.get('seed', None)
# load labels and resolve inconsistencies by mapping to full 400 labels
self.labels_400 = [label.replace(' ','') for idx, label in np.genfromtxt(self.context_dir + '/labels.txt', delimiter=':', dtype=None)]
self.labels_59 = [label.replace(' ','') for idx, label in np.genfromtxt(self.context_dir + '/59_labels.txt', delimiter=':', dtype=None)]
for main_label, task_label in zip(('table', 'bedclothes', 'cloth'), ('diningtable', 'bedcloth', 'clothes')):
self.labels_59[self.labels_59.index(task_label)] = main_label
# two tops: data and label
if len(top) != 2:
raise Exception("Need to define two tops: data and label.")
# data layers have no bottoms
if len(bottom) != 0:
raise Exception("Do not define a bottom.")
# load indices for images and labels
split_f = '{}/ImageSets/Main/{}.txt'.format(self.voc_dir,
self.split)
self.indices = open(split_f, 'r').read().splitlines()
self.idx = 0
# make eval deterministic
if 'train' not in self.split:
self.random = False
# randomization: seed and pick
if self.random:
random.seed(self.seed)
self.idx = random.randint(0, len(self.indices)-1)
def reshape(self, bottom, top):
# load image + label image pair
self.data = self.load_image(self.indices[self.idx])
self.label = self.load_label(self.indices[self.idx])
# reshape tops to fit (leading 1 is for batch dimension)
top[0].reshape(1, *self.data.shape)
top[1].reshape(1, *self.label.shape)
def forward(self, bottom, top):
# assign output
top[0].data[...] = self.data
top[1].data[...] = self.label
# pick next input
if self.random:
self.idx = random.randint(0, len(self.indices)-1)
else:
self.idx += 1
if self.idx == len(self.indices):
self.idx = 0
def backward(self, top, propagate_down, bottom):
pass
def load_image(self, idx):
"""
Load input image and preprocess for Caffe:
- cast to float
- switch channels RGB -> BGR
- subtract mean
- transpose to channel x height x width order
"""
im = Image.open('{}/JPEGImages/{}.jpg'.format(self.voc_dir, idx))
in_ = np.array(im, dtype=np.float32)
in_ = in_[:,:,::-1]
in_ -= self.mean
in_ = in_.transpose((2,0,1))
return in_
|
wkentaro/pytorch-fcn
|
torchfcn/ext/fcn.berkeleyvision.org/voc_helper.py
|
voc.load_label
|
python
|
def load_label(self, idx):
label = Image.open('{}/SegmentationClass/{}.png'.format(self.dir, idx))
label = np.array(label, dtype=np.uint8)
label = label[np.newaxis, ...]
return label
|
Load label image as 1 x height x width integer array of label indices.
The leading singleton dimension is required by the loss.
|
train
|
https://github.com/wkentaro/pytorch-fcn/blob/97189cbccb2c9b8bd776b356a1fd4b6c03f67d79/torchfcn/ext/fcn.berkeleyvision.org/voc_helper.py#L27-L35
| null |
class voc:
def __init__(self, data_path):
# data_path is /path/to/PASCAL/VOC2011
self.dir = data_path
self.classes = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
# for paletting
reference_idx = '2008_000666'
palette_im = Image.open('{}/SegmentationClass/{}.png'.format(
self.dir, reference_idx))
self.palette = palette_im.palette
def load_image(self, idx):
im = Image.open('{}/JPEGImages/{}.jpg'.format(self.dir, idx))
return im
def palette(self, label_im):
'''
Transfer the VOC color palette to an output mask for visualization.
'''
if label_im.ndim == 3:
label_im = label_im[0]
label = Image.fromarray(label_im, mode='P')
label.palette = copy.copy(self.palette)
return label
|
wkentaro/pytorch-fcn
|
torchfcn/ext/fcn.berkeleyvision.org/voc_helper.py
|
voc.palette
|
python
|
def palette(self, label_im):
'''
Transfer the VOC color palette to an output mask for visualization.
'''
if label_im.ndim == 3:
label_im = label_im[0]
label = Image.fromarray(label_im, mode='P')
label.palette = copy.copy(self.palette)
return label
|
Transfer the VOC color palette to an output mask for visualization.
|
train
|
https://github.com/wkentaro/pytorch-fcn/blob/97189cbccb2c9b8bd776b356a1fd4b6c03f67d79/torchfcn/ext/fcn.berkeleyvision.org/voc_helper.py#L37-L45
| null |
class voc:
def __init__(self, data_path):
# data_path is /path/to/PASCAL/VOC2011
self.dir = data_path
self.classes = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
# for paletting
reference_idx = '2008_000666'
palette_im = Image.open('{}/SegmentationClass/{}.png'.format(
self.dir, reference_idx))
self.palette = palette_im.palette
def load_image(self, idx):
im = Image.open('{}/JPEGImages/{}.jpg'.format(self.dir, idx))
return im
def load_label(self, idx):
"""
Load label image as 1 x height x width integer array of label indices.
The leading singleton dimension is required by the loss.
"""
label = Image.open('{}/SegmentationClass/{}.png'.format(self.dir, idx))
label = np.array(label, dtype=np.uint8)
label = label[np.newaxis, ...]
return label
|
msiemens/tinydb
|
tinydb/database.py
|
TinyDB.table
|
python
|
def table(self, name=DEFAULT_TABLE, **options):
if name in self._table_cache:
return self._table_cache[name]
table_class = options.pop('table_class', self._cls_table)
table = table_class(self._cls_storage_proxy(self._storage, name), name, **options)
self._table_cache[name] = table
return table
|
Get access to a specific table.
Creates a new table, if it hasn't been created before, otherwise it
returns the cached :class:`~tinydb.Table` object.
:param name: The name of the table.
:type name: str
:param cache_size: How many query results to cache.
:param table_class: Which table class to use.
|
train
|
https://github.com/msiemens/tinydb/blob/10052cb1ae6a3682d26eb4272c44e3b020aa5877/tinydb/database.py#L179-L200
| null |
class TinyDB(object):
"""
The main class of TinyDB.
Gives access to the database, provides methods to insert/search/remove
and getting tables.
"""
DEFAULT_TABLE = '_default'
DEFAULT_STORAGE = JSONStorage
def __init__(self, *args, **kwargs):
"""
Create a new instance of TinyDB.
All arguments and keyword arguments will be passed to the underlying
storage class (default: :class:`~tinydb.storages.JSONStorage`).
:param storage: The class of the storage to use. Will be initialized
with ``args`` and ``kwargs``.
:param default_table: The name of the default table to populate.
"""
storage = kwargs.pop('storage', self.DEFAULT_STORAGE)
default_table = kwargs.pop('default_table', self.DEFAULT_TABLE)
self._cls_table = kwargs.pop('table_class', self.table_class)
self._cls_storage_proxy = kwargs.pop('storage_proxy_class',
self.storage_proxy_class)
# Prepare the storage
#: :type: Storage
self._storage = storage(*args, **kwargs)
self._opened = True
# Prepare the default table
self._table_cache = {}
self._table = self.table(default_table)
def __repr__(self):
args = [
'tables={}'.format(list(self.tables())),
'tables_count={}'.format(len(self.tables())),
'default_table_documents_count={}'.format(self.__len__()),
'all_tables_documents_count={}'.format(
['{}={}'.format(table, len(self.table(table)))
for table in self.tables()]),
]
return '<{} {}>'.format(type(self).__name__, ', '.join(args))
def tables(self):
"""
Get the names of all tables in the database.
:returns: a set of table names
:rtype: set[str]
"""
return set(self._storage.read())
def purge_tables(self):
"""
Purge all tables from the database. **CANNOT BE REVERSED!**
"""
self._storage.write({})
self._table_cache.clear()
def purge_table(self, name):
"""
Purge a specific table from the database. **CANNOT BE REVERSED!**
:param name: The name of the table.
:type name: str
"""
if name in self._table_cache:
del self._table_cache[name]
proxy = StorageProxy(self._storage, name)
proxy.purge_table()
@property
def storage(self):
"""
Access the storage used for this TinyDB instance.
:return: This instance's storage
"""
return self._storage
def close(self):
"""
Close the database.
"""
self._opened = False
self._storage.close()
def __enter__(self):
return self
def __exit__(self, *args):
if self._opened:
self.close()
def __getattr__(self, name):
"""
Forward all unknown attribute calls to the underlying standard table.
"""
return getattr(self._table, name)
# Methods that are executed on the default table
# Because magic methods are not handled by __getattr__ we need to forward
# them manually here
def __len__(self):
"""
Get the total number of documents in the default table.
>>> db = TinyDB('db.json')
>>> len(db)
0
"""
return len(self._table)
def __iter__(self):
"""
Iter over all documents from default table.
"""
return self._table.__iter__()
|
msiemens/tinydb
|
tinydb/database.py
|
TinyDB.purge_table
|
python
|
def purge_table(self, name):
if name in self._table_cache:
del self._table_cache[name]
proxy = StorageProxy(self._storage, name)
proxy.purge_table()
|
Purge a specific table from the database. **CANNOT BE REVERSED!**
:param name: The name of the table.
:type name: str
|
train
|
https://github.com/msiemens/tinydb/blob/10052cb1ae6a3682d26eb4272c44e3b020aa5877/tinydb/database.py#L220-L231
|
[
"def purge_table(self):\n try:\n data = self._storage.read() or {}\n del data[self._table_name]\n self._storage.write(data)\n except KeyError:\n pass\n"
] |
class TinyDB(object):
"""
The main class of TinyDB.
Gives access to the database, provides methods to insert/search/remove
and getting tables.
"""
DEFAULT_TABLE = '_default'
DEFAULT_STORAGE = JSONStorage
def __init__(self, *args, **kwargs):
"""
Create a new instance of TinyDB.
All arguments and keyword arguments will be passed to the underlying
storage class (default: :class:`~tinydb.storages.JSONStorage`).
:param storage: The class of the storage to use. Will be initialized
with ``args`` and ``kwargs``.
:param default_table: The name of the default table to populate.
"""
storage = kwargs.pop('storage', self.DEFAULT_STORAGE)
default_table = kwargs.pop('default_table', self.DEFAULT_TABLE)
self._cls_table = kwargs.pop('table_class', self.table_class)
self._cls_storage_proxy = kwargs.pop('storage_proxy_class',
self.storage_proxy_class)
# Prepare the storage
#: :type: Storage
self._storage = storage(*args, **kwargs)
self._opened = True
# Prepare the default table
self._table_cache = {}
self._table = self.table(default_table)
def __repr__(self):
args = [
'tables={}'.format(list(self.tables())),
'tables_count={}'.format(len(self.tables())),
'default_table_documents_count={}'.format(self.__len__()),
'all_tables_documents_count={}'.format(
['{}={}'.format(table, len(self.table(table)))
for table in self.tables()]),
]
return '<{} {}>'.format(type(self).__name__, ', '.join(args))
def table(self, name=DEFAULT_TABLE, **options):
"""
Get access to a specific table.
Creates a new table, if it hasn't been created before, otherwise it
returns the cached :class:`~tinydb.Table` object.
:param name: The name of the table.
:type name: str
:param cache_size: How many query results to cache.
:param table_class: Which table class to use.
"""
if name in self._table_cache:
return self._table_cache[name]
table_class = options.pop('table_class', self._cls_table)
table = table_class(self._cls_storage_proxy(self._storage, name), name, **options)
self._table_cache[name] = table
return table
def tables(self):
"""
Get the names of all tables in the database.
:returns: a set of table names
:rtype: set[str]
"""
return set(self._storage.read())
def purge_tables(self):
"""
Purge all tables from the database. **CANNOT BE REVERSED!**
"""
self._storage.write({})
self._table_cache.clear()
@property
def storage(self):
"""
Access the storage used for this TinyDB instance.
:return: This instance's storage
"""
return self._storage
def close(self):
"""
Close the database.
"""
self._opened = False
self._storage.close()
def __enter__(self):
return self
def __exit__(self, *args):
if self._opened:
self.close()
def __getattr__(self, name):
"""
Forward all unknown attribute calls to the underlying standard table.
"""
return getattr(self._table, name)
# Methods that are executed on the default table
# Because magic methods are not handled by __getattr__ we need to forward
# them manually here
def __len__(self):
"""
Get the total number of documents in the default table.
>>> db = TinyDB('db.json')
>>> len(db)
0
"""
return len(self._table)
def __iter__(self):
"""
Iter over all documents from default table.
"""
return self._table.__iter__()
|
msiemens/tinydb
|
tinydb/database.py
|
Table.process_elements
|
python
|
def process_elements(self, func, cond=None, doc_ids=None, eids=None):
doc_ids = _get_doc_ids(doc_ids, eids)
data = self._read()
if doc_ids is not None:
# Processed document specified by id
for doc_id in doc_ids:
func(data, doc_id)
elif cond is not None:
# Collect affected doc_ids
doc_ids = []
# Processed documents specified by condition
for doc_id in list(data):
if cond(data[doc_id]):
func(data, doc_id)
doc_ids.append(doc_id)
else:
# Processed documents
doc_ids = list(data)
for doc_id in doc_ids:
func(data, doc_id)
self._write(data)
return doc_ids
|
Helper function for processing all documents specified by condition
or IDs.
A repeating pattern in TinyDB is to run some code on all documents
that match a condition or are specified by their ID. This is
implemented in this function.
The function passed as ``func`` has to be a callable. Its first
argument will be the data currently in the database. Its second
argument is the document ID of the currently processed document.
See: :meth:`~.update`, :meth:`.remove`
:param func: the function to execute on every included document.
first argument: all data
second argument: the current eid
:param cond: query that matches documents to use, or
:param doc_ids: list of document IDs to use
:param eids: list of document IDs to use (deprecated)
:returns: the document IDs that were affected during processing
|
train
|
https://github.com/msiemens/tinydb/blob/10052cb1ae6a3682d26eb4272c44e3b020aa5877/tinydb/database.py#L327-L376
|
[
"def _get_doc_ids(doc_ids, eids):\n # Backwards-compatibility shim\n if eids is not None:\n if doc_ids is not None:\n raise TypeError('cannot pass both eids and doc_ids')\n\n warnings.warn('eids has been renamed to doc_ids', DeprecationWarning)\n return eids\n else:\n return doc_ids\n",
"def _read(self):\n \"\"\"\n Reading access to the DB.\n\n :returns: all values\n :rtype: DataProxy\n \"\"\"\n\n return self._storage.read()\n",
"def _write(self, values):\n \"\"\"\n Writing access to the DB.\n\n :param values: the new values to write\n :type values: DataProxy | dict\n \"\"\"\n\n self.clear_cache()\n self._storage.write(values)\n",
"lambda data, doc_id: data.pop(doc_id),\n",
"lambda data, doc_id: fields(data[doc_id]),\n",
"lambda data, doc_id: data[doc_id].update(fields),\n"
] |
class Table(object):
"""
Represents a single TinyDB Table.
"""
def __init__(self, storage, name, cache_size=10):
"""
Get access to a table.
:param storage: Access to the storage
:type storage: StorageProxy
:param name: The table name
:param cache_size: Maximum size of query cache.
"""
self._storage = storage
self._name = name
self._query_cache = LRUCache(capacity=cache_size)
data = self._read()
self._init_last_id(data)
def __repr__(self):
args = [
'name={!r}'.format(self.name),
'total={}'.format(self.__len__()),
'storage={}'.format(self._storage),
]
return '<{} {}>'.format(type(self).__name__, ', '.join(args))
def _init_last_id(self, data):
if data:
self._last_id = max(i for i in data)
else:
self._last_id = 0
@property
def name(self):
"""
Get the table name.
"""
return self._name
def clear_cache(self):
"""
Clear the query cache.
A simple helper that clears the internal query cache.
"""
self._query_cache.clear()
def _get_next_id(self):
"""
Increment the ID used the last time and return it
"""
current_id = self._last_id + 1
self._last_id = current_id
return current_id
def _get_doc_id(self, document):
if not isinstance(document, Mapping):
raise ValueError('Document is not a Mapping')
return self._get_next_id()
def _read(self):
"""
Reading access to the DB.
:returns: all values
:rtype: DataProxy
"""
return self._storage.read()
def _write(self, values):
"""
Writing access to the DB.
:param values: the new values to write
:type values: DataProxy | dict
"""
self.clear_cache()
self._storage.write(values)
def __len__(self):
"""
Get the total number of documents in the table.
"""
return len(self._read())
def all(self):
"""
Get all documents stored in the table.
:returns: a list with all documents.
:rtype: list[Element]
"""
return list(itervalues(self._read()))
def __iter__(self):
"""
Iter over all documents stored in the table.
:returns: an iterator over all documents.
:rtype: listiterator[Element]
"""
for value in itervalues(self._read()):
yield value
def insert(self, document):
"""
Insert a new document into the table.
:param document: the document to insert
:returns: the inserted document's ID
"""
doc_id = self._get_doc_id(document)
data = self._read()
data[doc_id] = dict(document)
self._write(data)
return doc_id
def insert_multiple(self, documents):
"""
Insert multiple documents into the table.
:param documents: a list of documents to insert
:returns: a list containing the inserted documents' IDs
"""
doc_ids = []
data = self._read()
for doc in documents:
doc_id = self._get_doc_id(doc)
doc_ids.append(doc_id)
data[doc_id] = dict(doc)
self._write(data)
return doc_ids
def remove(self, cond=None, doc_ids=None, eids=None):
"""
Remove all matching documents.
:param cond: the condition to check against
:type cond: query
:param doc_ids: a list of document IDs
:type doc_ids: list
:returns: a list containing the removed document's ID
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if cond is None and doc_ids is None:
raise RuntimeError('Use purge() to remove all documents')
return self.process_elements(
lambda data, doc_id: data.pop(doc_id),
cond, doc_ids
)
def update(self, fields, cond=None, doc_ids=None, eids=None):
"""
Update all matching documents to have a given set of fields.
:param fields: the fields that the matching documents will have
or a method that will update the documents
:type fields: dict | dict -> None
:param cond: which documents to update
:type cond: query
:param doc_ids: a list of document IDs
:type doc_ids: list
:returns: a list containing the updated document's ID
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if callable(fields):
return self.process_elements(
lambda data, doc_id: fields(data[doc_id]),
cond, doc_ids
)
else:
return self.process_elements(
lambda data, doc_id: data[doc_id].update(fields),
cond, doc_ids
)
def write_back(self, documents, doc_ids=None, eids=None):
"""
Write back documents by doc_id
:param documents: a list of document to write back
:param doc_ids: a list of document IDs which need to be written back
:returns: a list of document IDs that have been written
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if doc_ids is not None and not len(documents) == len(doc_ids):
raise ValueError(
'The length of documents and doc_ids is not match.')
if doc_ids is None:
doc_ids = [doc.doc_id for doc in documents]
# Since this function will write docs back like inserting, to ensure
# here only process existing or removed instead of inserting new,
# raise error if doc_id exceeded the last.
if len(doc_ids) > 0 and max(doc_ids) > self._last_id:
raise IndexError(
'ID exceeds table length, use existing or removed doc_id.')
data = self._read()
# Document specified by ID
documents.reverse()
for doc_id in doc_ids:
data[doc_id] = dict(documents.pop())
self._write(data)
return doc_ids
def upsert(self, document, cond):
"""
Update a document, if it exist - insert it otherwise.
Note: this will update *all* documents matching the query.
:param document: the document to insert or the fields to update
:param cond: which document to look for
:returns: a list containing the updated document's ID
"""
updated_docs = self.update(document, cond)
if updated_docs:
return updated_docs
else:
return [self.insert(document)]
def purge(self):
"""
Purge the table by removing all documents.
"""
self._write({})
self._last_id = 0
def search(self, cond):
"""
Search for all documents matching a 'where' cond.
:param cond: the condition to check against
:type cond: Query
:returns: list of matching documents
:rtype: list[Element]
"""
if cond in self._query_cache:
return self._query_cache.get(cond, [])[:]
docs = [doc for doc in self.all() if cond(doc)]
self._query_cache[cond] = docs
return docs[:]
def get(self, cond=None, doc_id=None, eid=None):
"""
Get exactly one document specified by a query or and ID.
Returns ``None`` if the document doesn't exist
:param cond: the condition to check against
:type cond: Query
:param doc_id: the document's ID
:returns: the document or None
:rtype: Element | None
"""
doc_id = _get_doc_id(doc_id, eid)
# Cannot use process_elements here because we want to return a
# specific document
if doc_id is not None:
# Document specified by ID
return self._read().get(doc_id, None)
# Document specified by condition
for doc in self.all():
if cond(doc):
return doc
def count(self, cond):
"""
Count the documents matching a condition.
:param cond: the condition use
:type cond: Query
"""
return len(self.search(cond))
def contains(self, cond=None, doc_ids=None, eids=None):
"""
Check wether the database contains a document matching a condition or
an ID.
If ``eids`` is set, it checks if the db contains a document with one
of the specified.
:param cond: the condition use
:type cond: Query
:param doc_ids: the document IDs to look for
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if doc_ids is not None:
# Documents specified by ID
return any(self.get(doc_id=doc_id) for doc_id in doc_ids)
# Document specified by condition
return self.get(cond) is not None
|
msiemens/tinydb
|
tinydb/database.py
|
Table.insert
|
python
|
def insert(self, document):
doc_id = self._get_doc_id(document)
data = self._read()
data[doc_id] = dict(document)
self._write(data)
return doc_id
|
Insert a new document into the table.
:param document: the document to insert
:returns: the inserted document's ID
|
train
|
https://github.com/msiemens/tinydb/blob/10052cb1ae6a3682d26eb4272c44e3b020aa5877/tinydb/database.py#L449-L462
|
[
"def _get_doc_id(self, document):\n if not isinstance(document, Mapping):\n raise ValueError('Document is not a Mapping')\n return self._get_next_id()\n",
"def _read(self):\n \"\"\"\n Reading access to the DB.\n\n :returns: all values\n :rtype: DataProxy\n \"\"\"\n\n return self._storage.read()\n",
"def _write(self, values):\n \"\"\"\n Writing access to the DB.\n\n :param values: the new values to write\n :type values: DataProxy | dict\n \"\"\"\n\n self.clear_cache()\n self._storage.write(values)\n"
] |
class Table(object):
"""
Represents a single TinyDB Table.
"""
def __init__(self, storage, name, cache_size=10):
"""
Get access to a table.
:param storage: Access to the storage
:type storage: StorageProxy
:param name: The table name
:param cache_size: Maximum size of query cache.
"""
self._storage = storage
self._name = name
self._query_cache = LRUCache(capacity=cache_size)
data = self._read()
self._init_last_id(data)
def __repr__(self):
args = [
'name={!r}'.format(self.name),
'total={}'.format(self.__len__()),
'storage={}'.format(self._storage),
]
return '<{} {}>'.format(type(self).__name__, ', '.join(args))
def _init_last_id(self, data):
if data:
self._last_id = max(i for i in data)
else:
self._last_id = 0
@property
def name(self):
"""
Get the table name.
"""
return self._name
def process_elements(self, func, cond=None, doc_ids=None, eids=None):
"""
Helper function for processing all documents specified by condition
or IDs.
A repeating pattern in TinyDB is to run some code on all documents
that match a condition or are specified by their ID. This is
implemented in this function.
The function passed as ``func`` has to be a callable. Its first
argument will be the data currently in the database. Its second
argument is the document ID of the currently processed document.
See: :meth:`~.update`, :meth:`.remove`
:param func: the function to execute on every included document.
first argument: all data
second argument: the current eid
:param cond: query that matches documents to use, or
:param doc_ids: list of document IDs to use
:param eids: list of document IDs to use (deprecated)
:returns: the document IDs that were affected during processing
"""
doc_ids = _get_doc_ids(doc_ids, eids)
data = self._read()
if doc_ids is not None:
# Processed document specified by id
for doc_id in doc_ids:
func(data, doc_id)
elif cond is not None:
# Collect affected doc_ids
doc_ids = []
# Processed documents specified by condition
for doc_id in list(data):
if cond(data[doc_id]):
func(data, doc_id)
doc_ids.append(doc_id)
else:
# Processed documents
doc_ids = list(data)
for doc_id in doc_ids:
func(data, doc_id)
self._write(data)
return doc_ids
def clear_cache(self):
"""
Clear the query cache.
A simple helper that clears the internal query cache.
"""
self._query_cache.clear()
def _get_next_id(self):
"""
Increment the ID used the last time and return it
"""
current_id = self._last_id + 1
self._last_id = current_id
return current_id
def _get_doc_id(self, document):
if not isinstance(document, Mapping):
raise ValueError('Document is not a Mapping')
return self._get_next_id()
def _read(self):
"""
Reading access to the DB.
:returns: all values
:rtype: DataProxy
"""
return self._storage.read()
def _write(self, values):
"""
Writing access to the DB.
:param values: the new values to write
:type values: DataProxy | dict
"""
self.clear_cache()
self._storage.write(values)
def __len__(self):
"""
Get the total number of documents in the table.
"""
return len(self._read())
def all(self):
"""
Get all documents stored in the table.
:returns: a list with all documents.
:rtype: list[Element]
"""
return list(itervalues(self._read()))
def __iter__(self):
"""
Iter over all documents stored in the table.
:returns: an iterator over all documents.
:rtype: listiterator[Element]
"""
for value in itervalues(self._read()):
yield value
def insert_multiple(self, documents):
"""
Insert multiple documents into the table.
:param documents: a list of documents to insert
:returns: a list containing the inserted documents' IDs
"""
doc_ids = []
data = self._read()
for doc in documents:
doc_id = self._get_doc_id(doc)
doc_ids.append(doc_id)
data[doc_id] = dict(doc)
self._write(data)
return doc_ids
def remove(self, cond=None, doc_ids=None, eids=None):
"""
Remove all matching documents.
:param cond: the condition to check against
:type cond: query
:param doc_ids: a list of document IDs
:type doc_ids: list
:returns: a list containing the removed document's ID
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if cond is None and doc_ids is None:
raise RuntimeError('Use purge() to remove all documents')
return self.process_elements(
lambda data, doc_id: data.pop(doc_id),
cond, doc_ids
)
def update(self, fields, cond=None, doc_ids=None, eids=None):
"""
Update all matching documents to have a given set of fields.
:param fields: the fields that the matching documents will have
or a method that will update the documents
:type fields: dict | dict -> None
:param cond: which documents to update
:type cond: query
:param doc_ids: a list of document IDs
:type doc_ids: list
:returns: a list containing the updated document's ID
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if callable(fields):
return self.process_elements(
lambda data, doc_id: fields(data[doc_id]),
cond, doc_ids
)
else:
return self.process_elements(
lambda data, doc_id: data[doc_id].update(fields),
cond, doc_ids
)
def write_back(self, documents, doc_ids=None, eids=None):
"""
Write back documents by doc_id
:param documents: a list of document to write back
:param doc_ids: a list of document IDs which need to be written back
:returns: a list of document IDs that have been written
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if doc_ids is not None and not len(documents) == len(doc_ids):
raise ValueError(
'The length of documents and doc_ids is not match.')
if doc_ids is None:
doc_ids = [doc.doc_id for doc in documents]
# Since this function will write docs back like inserting, to ensure
# here only process existing or removed instead of inserting new,
# raise error if doc_id exceeded the last.
if len(doc_ids) > 0 and max(doc_ids) > self._last_id:
raise IndexError(
'ID exceeds table length, use existing or removed doc_id.')
data = self._read()
# Document specified by ID
documents.reverse()
for doc_id in doc_ids:
data[doc_id] = dict(documents.pop())
self._write(data)
return doc_ids
def upsert(self, document, cond):
"""
Update a document, if it exist - insert it otherwise.
Note: this will update *all* documents matching the query.
:param document: the document to insert or the fields to update
:param cond: which document to look for
:returns: a list containing the updated document's ID
"""
updated_docs = self.update(document, cond)
if updated_docs:
return updated_docs
else:
return [self.insert(document)]
def purge(self):
"""
Purge the table by removing all documents.
"""
self._write({})
self._last_id = 0
def search(self, cond):
"""
Search for all documents matching a 'where' cond.
:param cond: the condition to check against
:type cond: Query
:returns: list of matching documents
:rtype: list[Element]
"""
if cond in self._query_cache:
return self._query_cache.get(cond, [])[:]
docs = [doc for doc in self.all() if cond(doc)]
self._query_cache[cond] = docs
return docs[:]
def get(self, cond=None, doc_id=None, eid=None):
"""
Get exactly one document specified by a query or and ID.
Returns ``None`` if the document doesn't exist
:param cond: the condition to check against
:type cond: Query
:param doc_id: the document's ID
:returns: the document or None
:rtype: Element | None
"""
doc_id = _get_doc_id(doc_id, eid)
# Cannot use process_elements here because we want to return a
# specific document
if doc_id is not None:
# Document specified by ID
return self._read().get(doc_id, None)
# Document specified by condition
for doc in self.all():
if cond(doc):
return doc
def count(self, cond):
"""
Count the documents matching a condition.
:param cond: the condition use
:type cond: Query
"""
return len(self.search(cond))
def contains(self, cond=None, doc_ids=None, eids=None):
"""
Check wether the database contains a document matching a condition or
an ID.
If ``eids`` is set, it checks if the db contains a document with one
of the specified.
:param cond: the condition use
:type cond: Query
:param doc_ids: the document IDs to look for
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if doc_ids is not None:
# Documents specified by ID
return any(self.get(doc_id=doc_id) for doc_id in doc_ids)
# Document specified by condition
return self.get(cond) is not None
|
msiemens/tinydb
|
tinydb/database.py
|
Table.insert_multiple
|
python
|
def insert_multiple(self, documents):
doc_ids = []
data = self._read()
for doc in documents:
doc_id = self._get_doc_id(doc)
doc_ids.append(doc_id)
data[doc_id] = dict(doc)
self._write(data)
return doc_ids
|
Insert multiple documents into the table.
:param documents: a list of documents to insert
:returns: a list containing the inserted documents' IDs
|
train
|
https://github.com/msiemens/tinydb/blob/10052cb1ae6a3682d26eb4272c44e3b020aa5877/tinydb/database.py#L464-L483
|
[
"def _get_doc_id(self, document):\n if not isinstance(document, Mapping):\n raise ValueError('Document is not a Mapping')\n return self._get_next_id()\n",
"def _read(self):\n \"\"\"\n Reading access to the DB.\n\n :returns: all values\n :rtype: DataProxy\n \"\"\"\n\n return self._storage.read()\n",
"def _write(self, values):\n \"\"\"\n Writing access to the DB.\n\n :param values: the new values to write\n :type values: DataProxy | dict\n \"\"\"\n\n self.clear_cache()\n self._storage.write(values)\n"
] |
class Table(object):
"""
Represents a single TinyDB Table.
"""
def __init__(self, storage, name, cache_size=10):
"""
Get access to a table.
:param storage: Access to the storage
:type storage: StorageProxy
:param name: The table name
:param cache_size: Maximum size of query cache.
"""
self._storage = storage
self._name = name
self._query_cache = LRUCache(capacity=cache_size)
data = self._read()
self._init_last_id(data)
def __repr__(self):
args = [
'name={!r}'.format(self.name),
'total={}'.format(self.__len__()),
'storage={}'.format(self._storage),
]
return '<{} {}>'.format(type(self).__name__, ', '.join(args))
def _init_last_id(self, data):
if data:
self._last_id = max(i for i in data)
else:
self._last_id = 0
@property
def name(self):
"""
Get the table name.
"""
return self._name
def process_elements(self, func, cond=None, doc_ids=None, eids=None):
"""
Helper function for processing all documents specified by condition
or IDs.
A repeating pattern in TinyDB is to run some code on all documents
that match a condition or are specified by their ID. This is
implemented in this function.
The function passed as ``func`` has to be a callable. Its first
argument will be the data currently in the database. Its second
argument is the document ID of the currently processed document.
See: :meth:`~.update`, :meth:`.remove`
:param func: the function to execute on every included document.
first argument: all data
second argument: the current eid
:param cond: query that matches documents to use, or
:param doc_ids: list of document IDs to use
:param eids: list of document IDs to use (deprecated)
:returns: the document IDs that were affected during processing
"""
doc_ids = _get_doc_ids(doc_ids, eids)
data = self._read()
if doc_ids is not None:
# Processed document specified by id
for doc_id in doc_ids:
func(data, doc_id)
elif cond is not None:
# Collect affected doc_ids
doc_ids = []
# Processed documents specified by condition
for doc_id in list(data):
if cond(data[doc_id]):
func(data, doc_id)
doc_ids.append(doc_id)
else:
# Processed documents
doc_ids = list(data)
for doc_id in doc_ids:
func(data, doc_id)
self._write(data)
return doc_ids
def clear_cache(self):
"""
Clear the query cache.
A simple helper that clears the internal query cache.
"""
self._query_cache.clear()
def _get_next_id(self):
"""
Increment the ID used the last time and return it
"""
current_id = self._last_id + 1
self._last_id = current_id
return current_id
def _get_doc_id(self, document):
if not isinstance(document, Mapping):
raise ValueError('Document is not a Mapping')
return self._get_next_id()
def _read(self):
"""
Reading access to the DB.
:returns: all values
:rtype: DataProxy
"""
return self._storage.read()
def _write(self, values):
"""
Writing access to the DB.
:param values: the new values to write
:type values: DataProxy | dict
"""
self.clear_cache()
self._storage.write(values)
def __len__(self):
"""
Get the total number of documents in the table.
"""
return len(self._read())
def all(self):
"""
Get all documents stored in the table.
:returns: a list with all documents.
:rtype: list[Element]
"""
return list(itervalues(self._read()))
def __iter__(self):
"""
Iter over all documents stored in the table.
:returns: an iterator over all documents.
:rtype: listiterator[Element]
"""
for value in itervalues(self._read()):
yield value
def insert(self, document):
"""
Insert a new document into the table.
:param document: the document to insert
:returns: the inserted document's ID
"""
doc_id = self._get_doc_id(document)
data = self._read()
data[doc_id] = dict(document)
self._write(data)
return doc_id
def remove(self, cond=None, doc_ids=None, eids=None):
"""
Remove all matching documents.
:param cond: the condition to check against
:type cond: query
:param doc_ids: a list of document IDs
:type doc_ids: list
:returns: a list containing the removed document's ID
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if cond is None and doc_ids is None:
raise RuntimeError('Use purge() to remove all documents')
return self.process_elements(
lambda data, doc_id: data.pop(doc_id),
cond, doc_ids
)
def update(self, fields, cond=None, doc_ids=None, eids=None):
"""
Update all matching documents to have a given set of fields.
:param fields: the fields that the matching documents will have
or a method that will update the documents
:type fields: dict | dict -> None
:param cond: which documents to update
:type cond: query
:param doc_ids: a list of document IDs
:type doc_ids: list
:returns: a list containing the updated document's ID
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if callable(fields):
return self.process_elements(
lambda data, doc_id: fields(data[doc_id]),
cond, doc_ids
)
else:
return self.process_elements(
lambda data, doc_id: data[doc_id].update(fields),
cond, doc_ids
)
def write_back(self, documents, doc_ids=None, eids=None):
"""
Write back documents by doc_id
:param documents: a list of document to write back
:param doc_ids: a list of document IDs which need to be written back
:returns: a list of document IDs that have been written
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if doc_ids is not None and not len(documents) == len(doc_ids):
raise ValueError(
'The length of documents and doc_ids is not match.')
if doc_ids is None:
doc_ids = [doc.doc_id for doc in documents]
# Since this function will write docs back like inserting, to ensure
# here only process existing or removed instead of inserting new,
# raise error if doc_id exceeded the last.
if len(doc_ids) > 0 and max(doc_ids) > self._last_id:
raise IndexError(
'ID exceeds table length, use existing or removed doc_id.')
data = self._read()
# Document specified by ID
documents.reverse()
for doc_id in doc_ids:
data[doc_id] = dict(documents.pop())
self._write(data)
return doc_ids
def upsert(self, document, cond):
"""
Update a document, if it exist - insert it otherwise.
Note: this will update *all* documents matching the query.
:param document: the document to insert or the fields to update
:param cond: which document to look for
:returns: a list containing the updated document's ID
"""
updated_docs = self.update(document, cond)
if updated_docs:
return updated_docs
else:
return [self.insert(document)]
def purge(self):
"""
Purge the table by removing all documents.
"""
self._write({})
self._last_id = 0
def search(self, cond):
"""
Search for all documents matching a 'where' cond.
:param cond: the condition to check against
:type cond: Query
:returns: list of matching documents
:rtype: list[Element]
"""
if cond in self._query_cache:
return self._query_cache.get(cond, [])[:]
docs = [doc for doc in self.all() if cond(doc)]
self._query_cache[cond] = docs
return docs[:]
def get(self, cond=None, doc_id=None, eid=None):
"""
Get exactly one document specified by a query or and ID.
Returns ``None`` if the document doesn't exist
:param cond: the condition to check against
:type cond: Query
:param doc_id: the document's ID
:returns: the document or None
:rtype: Element | None
"""
doc_id = _get_doc_id(doc_id, eid)
# Cannot use process_elements here because we want to return a
# specific document
if doc_id is not None:
# Document specified by ID
return self._read().get(doc_id, None)
# Document specified by condition
for doc in self.all():
if cond(doc):
return doc
def count(self, cond):
"""
Count the documents matching a condition.
:param cond: the condition use
:type cond: Query
"""
return len(self.search(cond))
def contains(self, cond=None, doc_ids=None, eids=None):
"""
Check wether the database contains a document matching a condition or
an ID.
If ``eids`` is set, it checks if the db contains a document with one
of the specified.
:param cond: the condition use
:type cond: Query
:param doc_ids: the document IDs to look for
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if doc_ids is not None:
# Documents specified by ID
return any(self.get(doc_id=doc_id) for doc_id in doc_ids)
# Document specified by condition
return self.get(cond) is not None
|
msiemens/tinydb
|
tinydb/database.py
|
Table.remove
|
python
|
def remove(self, cond=None, doc_ids=None, eids=None):
doc_ids = _get_doc_ids(doc_ids, eids)
if cond is None and doc_ids is None:
raise RuntimeError('Use purge() to remove all documents')
return self.process_elements(
lambda data, doc_id: data.pop(doc_id),
cond, doc_ids
)
|
Remove all matching documents.
:param cond: the condition to check against
:type cond: query
:param doc_ids: a list of document IDs
:type doc_ids: list
:returns: a list containing the removed document's ID
|
train
|
https://github.com/msiemens/tinydb/blob/10052cb1ae6a3682d26eb4272c44e3b020aa5877/tinydb/database.py#L485-L503
|
[
"def _get_doc_ids(doc_ids, eids):\n # Backwards-compatibility shim\n if eids is not None:\n if doc_ids is not None:\n raise TypeError('cannot pass both eids and doc_ids')\n\n warnings.warn('eids has been renamed to doc_ids', DeprecationWarning)\n return eids\n else:\n return doc_ids\n",
"def process_elements(self, func, cond=None, doc_ids=None, eids=None):\n \"\"\"\n Helper function for processing all documents specified by condition\n or IDs.\n\n A repeating pattern in TinyDB is to run some code on all documents\n that match a condition or are specified by their ID. This is\n implemented in this function.\n The function passed as ``func`` has to be a callable. Its first\n argument will be the data currently in the database. Its second\n argument is the document ID of the currently processed document.\n\n See: :meth:`~.update`, :meth:`.remove`\n\n :param func: the function to execute on every included document.\n first argument: all data\n second argument: the current eid\n :param cond: query that matches documents to use, or\n :param doc_ids: list of document IDs to use\n :param eids: list of document IDs to use (deprecated)\n :returns: the document IDs that were affected during processing\n \"\"\"\n\n doc_ids = _get_doc_ids(doc_ids, eids)\n data = self._read()\n\n if doc_ids is not None:\n # Processed document specified by id\n for doc_id in doc_ids:\n func(data, doc_id)\n\n elif cond is not None:\n # Collect affected doc_ids\n doc_ids = []\n\n # Processed documents specified by condition\n for doc_id in list(data):\n if cond(data[doc_id]):\n func(data, doc_id)\n doc_ids.append(doc_id)\n else:\n # Processed documents\n doc_ids = list(data)\n\n for doc_id in doc_ids:\n func(data, doc_id)\n\n self._write(data)\n\n return doc_ids\n"
] |
class Table(object):
"""
Represents a single TinyDB Table.
"""
def __init__(self, storage, name, cache_size=10):
"""
Get access to a table.
:param storage: Access to the storage
:type storage: StorageProxy
:param name: The table name
:param cache_size: Maximum size of query cache.
"""
self._storage = storage
self._name = name
self._query_cache = LRUCache(capacity=cache_size)
data = self._read()
self._init_last_id(data)
def __repr__(self):
args = [
'name={!r}'.format(self.name),
'total={}'.format(self.__len__()),
'storage={}'.format(self._storage),
]
return '<{} {}>'.format(type(self).__name__, ', '.join(args))
def _init_last_id(self, data):
if data:
self._last_id = max(i for i in data)
else:
self._last_id = 0
@property
def name(self):
"""
Get the table name.
"""
return self._name
def process_elements(self, func, cond=None, doc_ids=None, eids=None):
"""
Helper function for processing all documents specified by condition
or IDs.
A repeating pattern in TinyDB is to run some code on all documents
that match a condition or are specified by their ID. This is
implemented in this function.
The function passed as ``func`` has to be a callable. Its first
argument will be the data currently in the database. Its second
argument is the document ID of the currently processed document.
See: :meth:`~.update`, :meth:`.remove`
:param func: the function to execute on every included document.
first argument: all data
second argument: the current eid
:param cond: query that matches documents to use, or
:param doc_ids: list of document IDs to use
:param eids: list of document IDs to use (deprecated)
:returns: the document IDs that were affected during processing
"""
doc_ids = _get_doc_ids(doc_ids, eids)
data = self._read()
if doc_ids is not None:
# Processed document specified by id
for doc_id in doc_ids:
func(data, doc_id)
elif cond is not None:
# Collect affected doc_ids
doc_ids = []
# Processed documents specified by condition
for doc_id in list(data):
if cond(data[doc_id]):
func(data, doc_id)
doc_ids.append(doc_id)
else:
# Processed documents
doc_ids = list(data)
for doc_id in doc_ids:
func(data, doc_id)
self._write(data)
return doc_ids
def clear_cache(self):
"""
Clear the query cache.
A simple helper that clears the internal query cache.
"""
self._query_cache.clear()
def _get_next_id(self):
"""
Increment the ID used the last time and return it
"""
current_id = self._last_id + 1
self._last_id = current_id
return current_id
def _get_doc_id(self, document):
if not isinstance(document, Mapping):
raise ValueError('Document is not a Mapping')
return self._get_next_id()
def _read(self):
"""
Reading access to the DB.
:returns: all values
:rtype: DataProxy
"""
return self._storage.read()
def _write(self, values):
"""
Writing access to the DB.
:param values: the new values to write
:type values: DataProxy | dict
"""
self.clear_cache()
self._storage.write(values)
def __len__(self):
"""
Get the total number of documents in the table.
"""
return len(self._read())
def all(self):
"""
Get all documents stored in the table.
:returns: a list with all documents.
:rtype: list[Element]
"""
return list(itervalues(self._read()))
def __iter__(self):
"""
Iter over all documents stored in the table.
:returns: an iterator over all documents.
:rtype: listiterator[Element]
"""
for value in itervalues(self._read()):
yield value
def insert(self, document):
"""
Insert a new document into the table.
:param document: the document to insert
:returns: the inserted document's ID
"""
doc_id = self._get_doc_id(document)
data = self._read()
data[doc_id] = dict(document)
self._write(data)
return doc_id
def insert_multiple(self, documents):
"""
Insert multiple documents into the table.
:param documents: a list of documents to insert
:returns: a list containing the inserted documents' IDs
"""
doc_ids = []
data = self._read()
for doc in documents:
doc_id = self._get_doc_id(doc)
doc_ids.append(doc_id)
data[doc_id] = dict(doc)
self._write(data)
return doc_ids
def update(self, fields, cond=None, doc_ids=None, eids=None):
"""
Update all matching documents to have a given set of fields.
:param fields: the fields that the matching documents will have
or a method that will update the documents
:type fields: dict | dict -> None
:param cond: which documents to update
:type cond: query
:param doc_ids: a list of document IDs
:type doc_ids: list
:returns: a list containing the updated document's ID
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if callable(fields):
return self.process_elements(
lambda data, doc_id: fields(data[doc_id]),
cond, doc_ids
)
else:
return self.process_elements(
lambda data, doc_id: data[doc_id].update(fields),
cond, doc_ids
)
def write_back(self, documents, doc_ids=None, eids=None):
"""
Write back documents by doc_id
:param documents: a list of document to write back
:param doc_ids: a list of document IDs which need to be written back
:returns: a list of document IDs that have been written
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if doc_ids is not None and not len(documents) == len(doc_ids):
raise ValueError(
'The length of documents and doc_ids is not match.')
if doc_ids is None:
doc_ids = [doc.doc_id for doc in documents]
# Since this function will write docs back like inserting, to ensure
# here only process existing or removed instead of inserting new,
# raise error if doc_id exceeded the last.
if len(doc_ids) > 0 and max(doc_ids) > self._last_id:
raise IndexError(
'ID exceeds table length, use existing or removed doc_id.')
data = self._read()
# Document specified by ID
documents.reverse()
for doc_id in doc_ids:
data[doc_id] = dict(documents.pop())
self._write(data)
return doc_ids
def upsert(self, document, cond):
"""
Update a document, if it exist - insert it otherwise.
Note: this will update *all* documents matching the query.
:param document: the document to insert or the fields to update
:param cond: which document to look for
:returns: a list containing the updated document's ID
"""
updated_docs = self.update(document, cond)
if updated_docs:
return updated_docs
else:
return [self.insert(document)]
def purge(self):
"""
Purge the table by removing all documents.
"""
self._write({})
self._last_id = 0
def search(self, cond):
"""
Search for all documents matching a 'where' cond.
:param cond: the condition to check against
:type cond: Query
:returns: list of matching documents
:rtype: list[Element]
"""
if cond in self._query_cache:
return self._query_cache.get(cond, [])[:]
docs = [doc for doc in self.all() if cond(doc)]
self._query_cache[cond] = docs
return docs[:]
def get(self, cond=None, doc_id=None, eid=None):
"""
Get exactly one document specified by a query or and ID.
Returns ``None`` if the document doesn't exist
:param cond: the condition to check against
:type cond: Query
:param doc_id: the document's ID
:returns: the document or None
:rtype: Element | None
"""
doc_id = _get_doc_id(doc_id, eid)
# Cannot use process_elements here because we want to return a
# specific document
if doc_id is not None:
# Document specified by ID
return self._read().get(doc_id, None)
# Document specified by condition
for doc in self.all():
if cond(doc):
return doc
def count(self, cond):
"""
Count the documents matching a condition.
:param cond: the condition use
:type cond: Query
"""
return len(self.search(cond))
def contains(self, cond=None, doc_ids=None, eids=None):
"""
Check wether the database contains a document matching a condition or
an ID.
If ``eids`` is set, it checks if the db contains a document with one
of the specified.
:param cond: the condition use
:type cond: Query
:param doc_ids: the document IDs to look for
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if doc_ids is not None:
# Documents specified by ID
return any(self.get(doc_id=doc_id) for doc_id in doc_ids)
# Document specified by condition
return self.get(cond) is not None
|
msiemens/tinydb
|
tinydb/database.py
|
Table.update
|
python
|
def update(self, fields, cond=None, doc_ids=None, eids=None):
doc_ids = _get_doc_ids(doc_ids, eids)
if callable(fields):
return self.process_elements(
lambda data, doc_id: fields(data[doc_id]),
cond, doc_ids
)
else:
return self.process_elements(
lambda data, doc_id: data[doc_id].update(fields),
cond, doc_ids
)
|
Update all matching documents to have a given set of fields.
:param fields: the fields that the matching documents will have
or a method that will update the documents
:type fields: dict | dict -> None
:param cond: which documents to update
:type cond: query
:param doc_ids: a list of document IDs
:type doc_ids: list
:returns: a list containing the updated document's ID
|
train
|
https://github.com/msiemens/tinydb/blob/10052cb1ae6a3682d26eb4272c44e3b020aa5877/tinydb/database.py#L505-L529
|
[
"def _get_doc_ids(doc_ids, eids):\n # Backwards-compatibility shim\n if eids is not None:\n if doc_ids is not None:\n raise TypeError('cannot pass both eids and doc_ids')\n\n warnings.warn('eids has been renamed to doc_ids', DeprecationWarning)\n return eids\n else:\n return doc_ids\n",
"def process_elements(self, func, cond=None, doc_ids=None, eids=None):\n \"\"\"\n Helper function for processing all documents specified by condition\n or IDs.\n\n A repeating pattern in TinyDB is to run some code on all documents\n that match a condition or are specified by their ID. This is\n implemented in this function.\n The function passed as ``func`` has to be a callable. Its first\n argument will be the data currently in the database. Its second\n argument is the document ID of the currently processed document.\n\n See: :meth:`~.update`, :meth:`.remove`\n\n :param func: the function to execute on every included document.\n first argument: all data\n second argument: the current eid\n :param cond: query that matches documents to use, or\n :param doc_ids: list of document IDs to use\n :param eids: list of document IDs to use (deprecated)\n :returns: the document IDs that were affected during processing\n \"\"\"\n\n doc_ids = _get_doc_ids(doc_ids, eids)\n data = self._read()\n\n if doc_ids is not None:\n # Processed document specified by id\n for doc_id in doc_ids:\n func(data, doc_id)\n\n elif cond is not None:\n # Collect affected doc_ids\n doc_ids = []\n\n # Processed documents specified by condition\n for doc_id in list(data):\n if cond(data[doc_id]):\n func(data, doc_id)\n doc_ids.append(doc_id)\n else:\n # Processed documents\n doc_ids = list(data)\n\n for doc_id in doc_ids:\n func(data, doc_id)\n\n self._write(data)\n\n return doc_ids\n"
] |
class Table(object):
"""
Represents a single TinyDB Table.
"""
def __init__(self, storage, name, cache_size=10):
"""
Get access to a table.
:param storage: Access to the storage
:type storage: StorageProxy
:param name: The table name
:param cache_size: Maximum size of query cache.
"""
self._storage = storage
self._name = name
self._query_cache = LRUCache(capacity=cache_size)
data = self._read()
self._init_last_id(data)
def __repr__(self):
args = [
'name={!r}'.format(self.name),
'total={}'.format(self.__len__()),
'storage={}'.format(self._storage),
]
return '<{} {}>'.format(type(self).__name__, ', '.join(args))
def _init_last_id(self, data):
if data:
self._last_id = max(i for i in data)
else:
self._last_id = 0
@property
def name(self):
"""
Get the table name.
"""
return self._name
def process_elements(self, func, cond=None, doc_ids=None, eids=None):
"""
Helper function for processing all documents specified by condition
or IDs.
A repeating pattern in TinyDB is to run some code on all documents
that match a condition or are specified by their ID. This is
implemented in this function.
The function passed as ``func`` has to be a callable. Its first
argument will be the data currently in the database. Its second
argument is the document ID of the currently processed document.
See: :meth:`~.update`, :meth:`.remove`
:param func: the function to execute on every included document.
first argument: all data
second argument: the current eid
:param cond: query that matches documents to use, or
:param doc_ids: list of document IDs to use
:param eids: list of document IDs to use (deprecated)
:returns: the document IDs that were affected during processing
"""
doc_ids = _get_doc_ids(doc_ids, eids)
data = self._read()
if doc_ids is not None:
# Processed document specified by id
for doc_id in doc_ids:
func(data, doc_id)
elif cond is not None:
# Collect affected doc_ids
doc_ids = []
# Processed documents specified by condition
for doc_id in list(data):
if cond(data[doc_id]):
func(data, doc_id)
doc_ids.append(doc_id)
else:
# Processed documents
doc_ids = list(data)
for doc_id in doc_ids:
func(data, doc_id)
self._write(data)
return doc_ids
def clear_cache(self):
"""
Clear the query cache.
A simple helper that clears the internal query cache.
"""
self._query_cache.clear()
def _get_next_id(self):
"""
Increment the ID used the last time and return it
"""
current_id = self._last_id + 1
self._last_id = current_id
return current_id
def _get_doc_id(self, document):
if not isinstance(document, Mapping):
raise ValueError('Document is not a Mapping')
return self._get_next_id()
def _read(self):
"""
Reading access to the DB.
:returns: all values
:rtype: DataProxy
"""
return self._storage.read()
def _write(self, values):
"""
Writing access to the DB.
:param values: the new values to write
:type values: DataProxy | dict
"""
self.clear_cache()
self._storage.write(values)
def __len__(self):
"""
Get the total number of documents in the table.
"""
return len(self._read())
def all(self):
"""
Get all documents stored in the table.
:returns: a list with all documents.
:rtype: list[Element]
"""
return list(itervalues(self._read()))
def __iter__(self):
"""
Iter over all documents stored in the table.
:returns: an iterator over all documents.
:rtype: listiterator[Element]
"""
for value in itervalues(self._read()):
yield value
def insert(self, document):
"""
Insert a new document into the table.
:param document: the document to insert
:returns: the inserted document's ID
"""
doc_id = self._get_doc_id(document)
data = self._read()
data[doc_id] = dict(document)
self._write(data)
return doc_id
def insert_multiple(self, documents):
"""
Insert multiple documents into the table.
:param documents: a list of documents to insert
:returns: a list containing the inserted documents' IDs
"""
doc_ids = []
data = self._read()
for doc in documents:
doc_id = self._get_doc_id(doc)
doc_ids.append(doc_id)
data[doc_id] = dict(doc)
self._write(data)
return doc_ids
def remove(self, cond=None, doc_ids=None, eids=None):
"""
Remove all matching documents.
:param cond: the condition to check against
:type cond: query
:param doc_ids: a list of document IDs
:type doc_ids: list
:returns: a list containing the removed document's ID
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if cond is None and doc_ids is None:
raise RuntimeError('Use purge() to remove all documents')
return self.process_elements(
lambda data, doc_id: data.pop(doc_id),
cond, doc_ids
)
def write_back(self, documents, doc_ids=None, eids=None):
"""
Write back documents by doc_id
:param documents: a list of document to write back
:param doc_ids: a list of document IDs which need to be written back
:returns: a list of document IDs that have been written
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if doc_ids is not None and not len(documents) == len(doc_ids):
raise ValueError(
'The length of documents and doc_ids is not match.')
if doc_ids is None:
doc_ids = [doc.doc_id for doc in documents]
# Since this function will write docs back like inserting, to ensure
# here only process existing or removed instead of inserting new,
# raise error if doc_id exceeded the last.
if len(doc_ids) > 0 and max(doc_ids) > self._last_id:
raise IndexError(
'ID exceeds table length, use existing or removed doc_id.')
data = self._read()
# Document specified by ID
documents.reverse()
for doc_id in doc_ids:
data[doc_id] = dict(documents.pop())
self._write(data)
return doc_ids
def upsert(self, document, cond):
"""
Update a document, if it exist - insert it otherwise.
Note: this will update *all* documents matching the query.
:param document: the document to insert or the fields to update
:param cond: which document to look for
:returns: a list containing the updated document's ID
"""
updated_docs = self.update(document, cond)
if updated_docs:
return updated_docs
else:
return [self.insert(document)]
def purge(self):
"""
Purge the table by removing all documents.
"""
self._write({})
self._last_id = 0
def search(self, cond):
"""
Search for all documents matching a 'where' cond.
:param cond: the condition to check against
:type cond: Query
:returns: list of matching documents
:rtype: list[Element]
"""
if cond in self._query_cache:
return self._query_cache.get(cond, [])[:]
docs = [doc for doc in self.all() if cond(doc)]
self._query_cache[cond] = docs
return docs[:]
def get(self, cond=None, doc_id=None, eid=None):
"""
Get exactly one document specified by a query or and ID.
Returns ``None`` if the document doesn't exist
:param cond: the condition to check against
:type cond: Query
:param doc_id: the document's ID
:returns: the document or None
:rtype: Element | None
"""
doc_id = _get_doc_id(doc_id, eid)
# Cannot use process_elements here because we want to return a
# specific document
if doc_id is not None:
# Document specified by ID
return self._read().get(doc_id, None)
# Document specified by condition
for doc in self.all():
if cond(doc):
return doc
def count(self, cond):
"""
Count the documents matching a condition.
:param cond: the condition use
:type cond: Query
"""
return len(self.search(cond))
def contains(self, cond=None, doc_ids=None, eids=None):
"""
Check wether the database contains a document matching a condition or
an ID.
If ``eids`` is set, it checks if the db contains a document with one
of the specified.
:param cond: the condition use
:type cond: Query
:param doc_ids: the document IDs to look for
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if doc_ids is not None:
# Documents specified by ID
return any(self.get(doc_id=doc_id) for doc_id in doc_ids)
# Document specified by condition
return self.get(cond) is not None
|
msiemens/tinydb
|
tinydb/database.py
|
Table.write_back
|
python
|
def write_back(self, documents, doc_ids=None, eids=None):
doc_ids = _get_doc_ids(doc_ids, eids)
if doc_ids is not None and not len(documents) == len(doc_ids):
raise ValueError(
'The length of documents and doc_ids is not match.')
if doc_ids is None:
doc_ids = [doc.doc_id for doc in documents]
# Since this function will write docs back like inserting, to ensure
# here only process existing or removed instead of inserting new,
# raise error if doc_id exceeded the last.
if len(doc_ids) > 0 and max(doc_ids) > self._last_id:
raise IndexError(
'ID exceeds table length, use existing or removed doc_id.')
data = self._read()
# Document specified by ID
documents.reverse()
for doc_id in doc_ids:
data[doc_id] = dict(documents.pop())
self._write(data)
return doc_ids
|
Write back documents by doc_id
:param documents: a list of document to write back
:param doc_ids: a list of document IDs which need to be written back
:returns: a list of document IDs that have been written
|
train
|
https://github.com/msiemens/tinydb/blob/10052cb1ae6a3682d26eb4272c44e3b020aa5877/tinydb/database.py#L531-L564
|
[
"def _get_doc_ids(doc_ids, eids):\n # Backwards-compatibility shim\n if eids is not None:\n if doc_ids is not None:\n raise TypeError('cannot pass both eids and doc_ids')\n\n warnings.warn('eids has been renamed to doc_ids', DeprecationWarning)\n return eids\n else:\n return doc_ids\n",
"def _read(self):\n \"\"\"\n Reading access to the DB.\n\n :returns: all values\n :rtype: DataProxy\n \"\"\"\n\n return self._storage.read()\n",
"def _write(self, values):\n \"\"\"\n Writing access to the DB.\n\n :param values: the new values to write\n :type values: DataProxy | dict\n \"\"\"\n\n self.clear_cache()\n self._storage.write(values)\n"
] |
class Table(object):
"""
Represents a single TinyDB Table.
"""
def __init__(self, storage, name, cache_size=10):
"""
Get access to a table.
:param storage: Access to the storage
:type storage: StorageProxy
:param name: The table name
:param cache_size: Maximum size of query cache.
"""
self._storage = storage
self._name = name
self._query_cache = LRUCache(capacity=cache_size)
data = self._read()
self._init_last_id(data)
def __repr__(self):
args = [
'name={!r}'.format(self.name),
'total={}'.format(self.__len__()),
'storage={}'.format(self._storage),
]
return '<{} {}>'.format(type(self).__name__, ', '.join(args))
def _init_last_id(self, data):
if data:
self._last_id = max(i for i in data)
else:
self._last_id = 0
@property
def name(self):
"""
Get the table name.
"""
return self._name
def process_elements(self, func, cond=None, doc_ids=None, eids=None):
"""
Helper function for processing all documents specified by condition
or IDs.
A repeating pattern in TinyDB is to run some code on all documents
that match a condition or are specified by their ID. This is
implemented in this function.
The function passed as ``func`` has to be a callable. Its first
argument will be the data currently in the database. Its second
argument is the document ID of the currently processed document.
See: :meth:`~.update`, :meth:`.remove`
:param func: the function to execute on every included document.
first argument: all data
second argument: the current eid
:param cond: query that matches documents to use, or
:param doc_ids: list of document IDs to use
:param eids: list of document IDs to use (deprecated)
:returns: the document IDs that were affected during processing
"""
doc_ids = _get_doc_ids(doc_ids, eids)
data = self._read()
if doc_ids is not None:
# Processed document specified by id
for doc_id in doc_ids:
func(data, doc_id)
elif cond is not None:
# Collect affected doc_ids
doc_ids = []
# Processed documents specified by condition
for doc_id in list(data):
if cond(data[doc_id]):
func(data, doc_id)
doc_ids.append(doc_id)
else:
# Processed documents
doc_ids = list(data)
for doc_id in doc_ids:
func(data, doc_id)
self._write(data)
return doc_ids
def clear_cache(self):
"""
Clear the query cache.
A simple helper that clears the internal query cache.
"""
self._query_cache.clear()
def _get_next_id(self):
"""
Increment the ID used the last time and return it
"""
current_id = self._last_id + 1
self._last_id = current_id
return current_id
def _get_doc_id(self, document):
if not isinstance(document, Mapping):
raise ValueError('Document is not a Mapping')
return self._get_next_id()
def _read(self):
"""
Reading access to the DB.
:returns: all values
:rtype: DataProxy
"""
return self._storage.read()
def _write(self, values):
"""
Writing access to the DB.
:param values: the new values to write
:type values: DataProxy | dict
"""
self.clear_cache()
self._storage.write(values)
def __len__(self):
"""
Get the total number of documents in the table.
"""
return len(self._read())
def all(self):
"""
Get all documents stored in the table.
:returns: a list with all documents.
:rtype: list[Element]
"""
return list(itervalues(self._read()))
def __iter__(self):
"""
Iter over all documents stored in the table.
:returns: an iterator over all documents.
:rtype: listiterator[Element]
"""
for value in itervalues(self._read()):
yield value
def insert(self, document):
"""
Insert a new document into the table.
:param document: the document to insert
:returns: the inserted document's ID
"""
doc_id = self._get_doc_id(document)
data = self._read()
data[doc_id] = dict(document)
self._write(data)
return doc_id
def insert_multiple(self, documents):
"""
Insert multiple documents into the table.
:param documents: a list of documents to insert
:returns: a list containing the inserted documents' IDs
"""
doc_ids = []
data = self._read()
for doc in documents:
doc_id = self._get_doc_id(doc)
doc_ids.append(doc_id)
data[doc_id] = dict(doc)
self._write(data)
return doc_ids
def remove(self, cond=None, doc_ids=None, eids=None):
"""
Remove all matching documents.
:param cond: the condition to check against
:type cond: query
:param doc_ids: a list of document IDs
:type doc_ids: list
:returns: a list containing the removed document's ID
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if cond is None and doc_ids is None:
raise RuntimeError('Use purge() to remove all documents')
return self.process_elements(
lambda data, doc_id: data.pop(doc_id),
cond, doc_ids
)
def update(self, fields, cond=None, doc_ids=None, eids=None):
"""
Update all matching documents to have a given set of fields.
:param fields: the fields that the matching documents will have
or a method that will update the documents
:type fields: dict | dict -> None
:param cond: which documents to update
:type cond: query
:param doc_ids: a list of document IDs
:type doc_ids: list
:returns: a list containing the updated document's ID
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if callable(fields):
return self.process_elements(
lambda data, doc_id: fields(data[doc_id]),
cond, doc_ids
)
else:
return self.process_elements(
lambda data, doc_id: data[doc_id].update(fields),
cond, doc_ids
)
def upsert(self, document, cond):
"""
Update a document, if it exist - insert it otherwise.
Note: this will update *all* documents matching the query.
:param document: the document to insert or the fields to update
:param cond: which document to look for
:returns: a list containing the updated document's ID
"""
updated_docs = self.update(document, cond)
if updated_docs:
return updated_docs
else:
return [self.insert(document)]
def purge(self):
"""
Purge the table by removing all documents.
"""
self._write({})
self._last_id = 0
def search(self, cond):
"""
Search for all documents matching a 'where' cond.
:param cond: the condition to check against
:type cond: Query
:returns: list of matching documents
:rtype: list[Element]
"""
if cond in self._query_cache:
return self._query_cache.get(cond, [])[:]
docs = [doc for doc in self.all() if cond(doc)]
self._query_cache[cond] = docs
return docs[:]
def get(self, cond=None, doc_id=None, eid=None):
"""
Get exactly one document specified by a query or and ID.
Returns ``None`` if the document doesn't exist
:param cond: the condition to check against
:type cond: Query
:param doc_id: the document's ID
:returns: the document or None
:rtype: Element | None
"""
doc_id = _get_doc_id(doc_id, eid)
# Cannot use process_elements here because we want to return a
# specific document
if doc_id is not None:
# Document specified by ID
return self._read().get(doc_id, None)
# Document specified by condition
for doc in self.all():
if cond(doc):
return doc
def count(self, cond):
"""
Count the documents matching a condition.
:param cond: the condition use
:type cond: Query
"""
return len(self.search(cond))
def contains(self, cond=None, doc_ids=None, eids=None):
"""
Check wether the database contains a document matching a condition or
an ID.
If ``eids`` is set, it checks if the db contains a document with one
of the specified.
:param cond: the condition use
:type cond: Query
:param doc_ids: the document IDs to look for
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if doc_ids is not None:
# Documents specified by ID
return any(self.get(doc_id=doc_id) for doc_id in doc_ids)
# Document specified by condition
return self.get(cond) is not None
|
msiemens/tinydb
|
tinydb/database.py
|
Table.upsert
|
python
|
def upsert(self, document, cond):
updated_docs = self.update(document, cond)
if updated_docs:
return updated_docs
else:
return [self.insert(document)]
|
Update a document, if it exist - insert it otherwise.
Note: this will update *all* documents matching the query.
:param document: the document to insert or the fields to update
:param cond: which document to look for
:returns: a list containing the updated document's ID
|
train
|
https://github.com/msiemens/tinydb/blob/10052cb1ae6a3682d26eb4272c44e3b020aa5877/tinydb/database.py#L566-L581
|
[
"def insert(self, document):\n \"\"\"\n Insert a new document into the table.\n\n :param document: the document to insert\n :returns: the inserted document's ID\n \"\"\"\n\n doc_id = self._get_doc_id(document)\n data = self._read()\n data[doc_id] = dict(document)\n self._write(data)\n\n return doc_id\n",
"def update(self, fields, cond=None, doc_ids=None, eids=None):\n \"\"\"\n Update all matching documents to have a given set of fields.\n\n :param fields: the fields that the matching documents will have\n or a method that will update the documents\n :type fields: dict | dict -> None\n :param cond: which documents to update\n :type cond: query\n :param doc_ids: a list of document IDs\n :type doc_ids: list\n :returns: a list containing the updated document's ID\n \"\"\"\n doc_ids = _get_doc_ids(doc_ids, eids)\n\n if callable(fields):\n return self.process_elements(\n lambda data, doc_id: fields(data[doc_id]),\n cond, doc_ids\n )\n else:\n return self.process_elements(\n lambda data, doc_id: data[doc_id].update(fields),\n cond, doc_ids\n )\n"
] |
class Table(object):
"""
Represents a single TinyDB Table.
"""
def __init__(self, storage, name, cache_size=10):
"""
Get access to a table.
:param storage: Access to the storage
:type storage: StorageProxy
:param name: The table name
:param cache_size: Maximum size of query cache.
"""
self._storage = storage
self._name = name
self._query_cache = LRUCache(capacity=cache_size)
data = self._read()
self._init_last_id(data)
def __repr__(self):
args = [
'name={!r}'.format(self.name),
'total={}'.format(self.__len__()),
'storage={}'.format(self._storage),
]
return '<{} {}>'.format(type(self).__name__, ', '.join(args))
def _init_last_id(self, data):
if data:
self._last_id = max(i for i in data)
else:
self._last_id = 0
@property
def name(self):
"""
Get the table name.
"""
return self._name
def process_elements(self, func, cond=None, doc_ids=None, eids=None):
"""
Helper function for processing all documents specified by condition
or IDs.
A repeating pattern in TinyDB is to run some code on all documents
that match a condition or are specified by their ID. This is
implemented in this function.
The function passed as ``func`` has to be a callable. Its first
argument will be the data currently in the database. Its second
argument is the document ID of the currently processed document.
See: :meth:`~.update`, :meth:`.remove`
:param func: the function to execute on every included document.
first argument: all data
second argument: the current eid
:param cond: query that matches documents to use, or
:param doc_ids: list of document IDs to use
:param eids: list of document IDs to use (deprecated)
:returns: the document IDs that were affected during processing
"""
doc_ids = _get_doc_ids(doc_ids, eids)
data = self._read()
if doc_ids is not None:
# Processed document specified by id
for doc_id in doc_ids:
func(data, doc_id)
elif cond is not None:
# Collect affected doc_ids
doc_ids = []
# Processed documents specified by condition
for doc_id in list(data):
if cond(data[doc_id]):
func(data, doc_id)
doc_ids.append(doc_id)
else:
# Processed documents
doc_ids = list(data)
for doc_id in doc_ids:
func(data, doc_id)
self._write(data)
return doc_ids
def clear_cache(self):
"""
Clear the query cache.
A simple helper that clears the internal query cache.
"""
self._query_cache.clear()
def _get_next_id(self):
"""
Increment the ID used the last time and return it
"""
current_id = self._last_id + 1
self._last_id = current_id
return current_id
def _get_doc_id(self, document):
if not isinstance(document, Mapping):
raise ValueError('Document is not a Mapping')
return self._get_next_id()
def _read(self):
"""
Reading access to the DB.
:returns: all values
:rtype: DataProxy
"""
return self._storage.read()
def _write(self, values):
"""
Writing access to the DB.
:param values: the new values to write
:type values: DataProxy | dict
"""
self.clear_cache()
self._storage.write(values)
def __len__(self):
"""
Get the total number of documents in the table.
"""
return len(self._read())
def all(self):
"""
Get all documents stored in the table.
:returns: a list with all documents.
:rtype: list[Element]
"""
return list(itervalues(self._read()))
def __iter__(self):
"""
Iter over all documents stored in the table.
:returns: an iterator over all documents.
:rtype: listiterator[Element]
"""
for value in itervalues(self._read()):
yield value
def insert(self, document):
"""
Insert a new document into the table.
:param document: the document to insert
:returns: the inserted document's ID
"""
doc_id = self._get_doc_id(document)
data = self._read()
data[doc_id] = dict(document)
self._write(data)
return doc_id
def insert_multiple(self, documents):
"""
Insert multiple documents into the table.
:param documents: a list of documents to insert
:returns: a list containing the inserted documents' IDs
"""
doc_ids = []
data = self._read()
for doc in documents:
doc_id = self._get_doc_id(doc)
doc_ids.append(doc_id)
data[doc_id] = dict(doc)
self._write(data)
return doc_ids
def remove(self, cond=None, doc_ids=None, eids=None):
"""
Remove all matching documents.
:param cond: the condition to check against
:type cond: query
:param doc_ids: a list of document IDs
:type doc_ids: list
:returns: a list containing the removed document's ID
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if cond is None and doc_ids is None:
raise RuntimeError('Use purge() to remove all documents')
return self.process_elements(
lambda data, doc_id: data.pop(doc_id),
cond, doc_ids
)
def update(self, fields, cond=None, doc_ids=None, eids=None):
"""
Update all matching documents to have a given set of fields.
:param fields: the fields that the matching documents will have
or a method that will update the documents
:type fields: dict | dict -> None
:param cond: which documents to update
:type cond: query
:param doc_ids: a list of document IDs
:type doc_ids: list
:returns: a list containing the updated document's ID
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if callable(fields):
return self.process_elements(
lambda data, doc_id: fields(data[doc_id]),
cond, doc_ids
)
else:
return self.process_elements(
lambda data, doc_id: data[doc_id].update(fields),
cond, doc_ids
)
def write_back(self, documents, doc_ids=None, eids=None):
"""
Write back documents by doc_id
:param documents: a list of document to write back
:param doc_ids: a list of document IDs which need to be written back
:returns: a list of document IDs that have been written
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if doc_ids is not None and not len(documents) == len(doc_ids):
raise ValueError(
'The length of documents and doc_ids is not match.')
if doc_ids is None:
doc_ids = [doc.doc_id for doc in documents]
# Since this function will write docs back like inserting, to ensure
# here only process existing or removed instead of inserting new,
# raise error if doc_id exceeded the last.
if len(doc_ids) > 0 and max(doc_ids) > self._last_id:
raise IndexError(
'ID exceeds table length, use existing or removed doc_id.')
data = self._read()
# Document specified by ID
documents.reverse()
for doc_id in doc_ids:
data[doc_id] = dict(documents.pop())
self._write(data)
return doc_ids
def purge(self):
"""
Purge the table by removing all documents.
"""
self._write({})
self._last_id = 0
def search(self, cond):
"""
Search for all documents matching a 'where' cond.
:param cond: the condition to check against
:type cond: Query
:returns: list of matching documents
:rtype: list[Element]
"""
if cond in self._query_cache:
return self._query_cache.get(cond, [])[:]
docs = [doc for doc in self.all() if cond(doc)]
self._query_cache[cond] = docs
return docs[:]
def get(self, cond=None, doc_id=None, eid=None):
"""
Get exactly one document specified by a query or and ID.
Returns ``None`` if the document doesn't exist
:param cond: the condition to check against
:type cond: Query
:param doc_id: the document's ID
:returns: the document or None
:rtype: Element | None
"""
doc_id = _get_doc_id(doc_id, eid)
# Cannot use process_elements here because we want to return a
# specific document
if doc_id is not None:
# Document specified by ID
return self._read().get(doc_id, None)
# Document specified by condition
for doc in self.all():
if cond(doc):
return doc
def count(self, cond):
"""
Count the documents matching a condition.
:param cond: the condition use
:type cond: Query
"""
return len(self.search(cond))
def contains(self, cond=None, doc_ids=None, eids=None):
"""
Check wether the database contains a document matching a condition or
an ID.
If ``eids`` is set, it checks if the db contains a document with one
of the specified.
:param cond: the condition use
:type cond: Query
:param doc_ids: the document IDs to look for
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if doc_ids is not None:
# Documents specified by ID
return any(self.get(doc_id=doc_id) for doc_id in doc_ids)
# Document specified by condition
return self.get(cond) is not None
|
msiemens/tinydb
|
tinydb/database.py
|
Table.search
|
python
|
def search(self, cond):
if cond in self._query_cache:
return self._query_cache.get(cond, [])[:]
docs = [doc for doc in self.all() if cond(doc)]
self._query_cache[cond] = docs
return docs[:]
|
Search for all documents matching a 'where' cond.
:param cond: the condition to check against
:type cond: Query
:returns: list of matching documents
:rtype: list[Element]
|
train
|
https://github.com/msiemens/tinydb/blob/10052cb1ae6a3682d26eb4272c44e3b020aa5877/tinydb/database.py#L591-L608
|
[
"def all(self):\n \"\"\"\n Get all documents stored in the table.\n\n :returns: a list with all documents.\n :rtype: list[Element]\n \"\"\"\n\n return list(itervalues(self._read()))\n"
] |
class Table(object):
"""
Represents a single TinyDB Table.
"""
def __init__(self, storage, name, cache_size=10):
"""
Get access to a table.
:param storage: Access to the storage
:type storage: StorageProxy
:param name: The table name
:param cache_size: Maximum size of query cache.
"""
self._storage = storage
self._name = name
self._query_cache = LRUCache(capacity=cache_size)
data = self._read()
self._init_last_id(data)
def __repr__(self):
args = [
'name={!r}'.format(self.name),
'total={}'.format(self.__len__()),
'storage={}'.format(self._storage),
]
return '<{} {}>'.format(type(self).__name__, ', '.join(args))
def _init_last_id(self, data):
if data:
self._last_id = max(i for i in data)
else:
self._last_id = 0
@property
def name(self):
"""
Get the table name.
"""
return self._name
def process_elements(self, func, cond=None, doc_ids=None, eids=None):
"""
Helper function for processing all documents specified by condition
or IDs.
A repeating pattern in TinyDB is to run some code on all documents
that match a condition or are specified by their ID. This is
implemented in this function.
The function passed as ``func`` has to be a callable. Its first
argument will be the data currently in the database. Its second
argument is the document ID of the currently processed document.
See: :meth:`~.update`, :meth:`.remove`
:param func: the function to execute on every included document.
first argument: all data
second argument: the current eid
:param cond: query that matches documents to use, or
:param doc_ids: list of document IDs to use
:param eids: list of document IDs to use (deprecated)
:returns: the document IDs that were affected during processing
"""
doc_ids = _get_doc_ids(doc_ids, eids)
data = self._read()
if doc_ids is not None:
# Processed document specified by id
for doc_id in doc_ids:
func(data, doc_id)
elif cond is not None:
# Collect affected doc_ids
doc_ids = []
# Processed documents specified by condition
for doc_id in list(data):
if cond(data[doc_id]):
func(data, doc_id)
doc_ids.append(doc_id)
else:
# Processed documents
doc_ids = list(data)
for doc_id in doc_ids:
func(data, doc_id)
self._write(data)
return doc_ids
def clear_cache(self):
"""
Clear the query cache.
A simple helper that clears the internal query cache.
"""
self._query_cache.clear()
def _get_next_id(self):
"""
Increment the ID used the last time and return it
"""
current_id = self._last_id + 1
self._last_id = current_id
return current_id
def _get_doc_id(self, document):
if not isinstance(document, Mapping):
raise ValueError('Document is not a Mapping')
return self._get_next_id()
def _read(self):
"""
Reading access to the DB.
:returns: all values
:rtype: DataProxy
"""
return self._storage.read()
def _write(self, values):
"""
Writing access to the DB.
:param values: the new values to write
:type values: DataProxy | dict
"""
self.clear_cache()
self._storage.write(values)
def __len__(self):
"""
Get the total number of documents in the table.
"""
return len(self._read())
def all(self):
"""
Get all documents stored in the table.
:returns: a list with all documents.
:rtype: list[Element]
"""
return list(itervalues(self._read()))
def __iter__(self):
"""
Iter over all documents stored in the table.
:returns: an iterator over all documents.
:rtype: listiterator[Element]
"""
for value in itervalues(self._read()):
yield value
def insert(self, document):
"""
Insert a new document into the table.
:param document: the document to insert
:returns: the inserted document's ID
"""
doc_id = self._get_doc_id(document)
data = self._read()
data[doc_id] = dict(document)
self._write(data)
return doc_id
def insert_multiple(self, documents):
"""
Insert multiple documents into the table.
:param documents: a list of documents to insert
:returns: a list containing the inserted documents' IDs
"""
doc_ids = []
data = self._read()
for doc in documents:
doc_id = self._get_doc_id(doc)
doc_ids.append(doc_id)
data[doc_id] = dict(doc)
self._write(data)
return doc_ids
def remove(self, cond=None, doc_ids=None, eids=None):
"""
Remove all matching documents.
:param cond: the condition to check against
:type cond: query
:param doc_ids: a list of document IDs
:type doc_ids: list
:returns: a list containing the removed document's ID
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if cond is None and doc_ids is None:
raise RuntimeError('Use purge() to remove all documents')
return self.process_elements(
lambda data, doc_id: data.pop(doc_id),
cond, doc_ids
)
def update(self, fields, cond=None, doc_ids=None, eids=None):
"""
Update all matching documents to have a given set of fields.
:param fields: the fields that the matching documents will have
or a method that will update the documents
:type fields: dict | dict -> None
:param cond: which documents to update
:type cond: query
:param doc_ids: a list of document IDs
:type doc_ids: list
:returns: a list containing the updated document's ID
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if callable(fields):
return self.process_elements(
lambda data, doc_id: fields(data[doc_id]),
cond, doc_ids
)
else:
return self.process_elements(
lambda data, doc_id: data[doc_id].update(fields),
cond, doc_ids
)
def write_back(self, documents, doc_ids=None, eids=None):
"""
Write back documents by doc_id
:param documents: a list of document to write back
:param doc_ids: a list of document IDs which need to be written back
:returns: a list of document IDs that have been written
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if doc_ids is not None and not len(documents) == len(doc_ids):
raise ValueError(
'The length of documents and doc_ids is not match.')
if doc_ids is None:
doc_ids = [doc.doc_id for doc in documents]
# Since this function will write docs back like inserting, to ensure
# here only process existing or removed instead of inserting new,
# raise error if doc_id exceeded the last.
if len(doc_ids) > 0 and max(doc_ids) > self._last_id:
raise IndexError(
'ID exceeds table length, use existing or removed doc_id.')
data = self._read()
# Document specified by ID
documents.reverse()
for doc_id in doc_ids:
data[doc_id] = dict(documents.pop())
self._write(data)
return doc_ids
def upsert(self, document, cond):
"""
Update a document, if it exist - insert it otherwise.
Note: this will update *all* documents matching the query.
:param document: the document to insert or the fields to update
:param cond: which document to look for
:returns: a list containing the updated document's ID
"""
updated_docs = self.update(document, cond)
if updated_docs:
return updated_docs
else:
return [self.insert(document)]
def purge(self):
"""
Purge the table by removing all documents.
"""
self._write({})
self._last_id = 0
def get(self, cond=None, doc_id=None, eid=None):
"""
Get exactly one document specified by a query or and ID.
Returns ``None`` if the document doesn't exist
:param cond: the condition to check against
:type cond: Query
:param doc_id: the document's ID
:returns: the document or None
:rtype: Element | None
"""
doc_id = _get_doc_id(doc_id, eid)
# Cannot use process_elements here because we want to return a
# specific document
if doc_id is not None:
# Document specified by ID
return self._read().get(doc_id, None)
# Document specified by condition
for doc in self.all():
if cond(doc):
return doc
def count(self, cond):
"""
Count the documents matching a condition.
:param cond: the condition use
:type cond: Query
"""
return len(self.search(cond))
def contains(self, cond=None, doc_ids=None, eids=None):
"""
Check wether the database contains a document matching a condition or
an ID.
If ``eids`` is set, it checks if the db contains a document with one
of the specified.
:param cond: the condition use
:type cond: Query
:param doc_ids: the document IDs to look for
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if doc_ids is not None:
# Documents specified by ID
return any(self.get(doc_id=doc_id) for doc_id in doc_ids)
# Document specified by condition
return self.get(cond) is not None
|
msiemens/tinydb
|
tinydb/database.py
|
Table.get
|
python
|
def get(self, cond=None, doc_id=None, eid=None):
doc_id = _get_doc_id(doc_id, eid)
# Cannot use process_elements here because we want to return a
# specific document
if doc_id is not None:
# Document specified by ID
return self._read().get(doc_id, None)
# Document specified by condition
for doc in self.all():
if cond(doc):
return doc
|
Get exactly one document specified by a query or and ID.
Returns ``None`` if the document doesn't exist
:param cond: the condition to check against
:type cond: Query
:param doc_id: the document's ID
:returns: the document or None
:rtype: Element | None
|
train
|
https://github.com/msiemens/tinydb/blob/10052cb1ae6a3682d26eb4272c44e3b020aa5877/tinydb/database.py#L610-L636
|
[
"def _get_doc_id(doc_id, eid):\n # Backwards-compatibility shim\n if eid is not None:\n if doc_id is not None:\n raise TypeError('cannot pass both eid and doc_id')\n\n warnings.warn('eid has been renamed to doc_id', DeprecationWarning)\n return eid\n else:\n return doc_id\n",
"def _read(self):\n \"\"\"\n Reading access to the DB.\n\n :returns: all values\n :rtype: DataProxy\n \"\"\"\n\n return self._storage.read()\n",
"def all(self):\n \"\"\"\n Get all documents stored in the table.\n\n :returns: a list with all documents.\n :rtype: list[Element]\n \"\"\"\n\n return list(itervalues(self._read()))\n"
] |
class Table(object):
"""
Represents a single TinyDB Table.
"""
def __init__(self, storage, name, cache_size=10):
"""
Get access to a table.
:param storage: Access to the storage
:type storage: StorageProxy
:param name: The table name
:param cache_size: Maximum size of query cache.
"""
self._storage = storage
self._name = name
self._query_cache = LRUCache(capacity=cache_size)
data = self._read()
self._init_last_id(data)
def __repr__(self):
args = [
'name={!r}'.format(self.name),
'total={}'.format(self.__len__()),
'storage={}'.format(self._storage),
]
return '<{} {}>'.format(type(self).__name__, ', '.join(args))
def _init_last_id(self, data):
if data:
self._last_id = max(i for i in data)
else:
self._last_id = 0
@property
def name(self):
"""
Get the table name.
"""
return self._name
def process_elements(self, func, cond=None, doc_ids=None, eids=None):
"""
Helper function for processing all documents specified by condition
or IDs.
A repeating pattern in TinyDB is to run some code on all documents
that match a condition or are specified by their ID. This is
implemented in this function.
The function passed as ``func`` has to be a callable. Its first
argument will be the data currently in the database. Its second
argument is the document ID of the currently processed document.
See: :meth:`~.update`, :meth:`.remove`
:param func: the function to execute on every included document.
first argument: all data
second argument: the current eid
:param cond: query that matches documents to use, or
:param doc_ids: list of document IDs to use
:param eids: list of document IDs to use (deprecated)
:returns: the document IDs that were affected during processing
"""
doc_ids = _get_doc_ids(doc_ids, eids)
data = self._read()
if doc_ids is not None:
# Processed document specified by id
for doc_id in doc_ids:
func(data, doc_id)
elif cond is not None:
# Collect affected doc_ids
doc_ids = []
# Processed documents specified by condition
for doc_id in list(data):
if cond(data[doc_id]):
func(data, doc_id)
doc_ids.append(doc_id)
else:
# Processed documents
doc_ids = list(data)
for doc_id in doc_ids:
func(data, doc_id)
self._write(data)
return doc_ids
def clear_cache(self):
"""
Clear the query cache.
A simple helper that clears the internal query cache.
"""
self._query_cache.clear()
def _get_next_id(self):
"""
Increment the ID used the last time and return it
"""
current_id = self._last_id + 1
self._last_id = current_id
return current_id
def _get_doc_id(self, document):
if not isinstance(document, Mapping):
raise ValueError('Document is not a Mapping')
return self._get_next_id()
def _read(self):
"""
Reading access to the DB.
:returns: all values
:rtype: DataProxy
"""
return self._storage.read()
def _write(self, values):
"""
Writing access to the DB.
:param values: the new values to write
:type values: DataProxy | dict
"""
self.clear_cache()
self._storage.write(values)
def __len__(self):
"""
Get the total number of documents in the table.
"""
return len(self._read())
def all(self):
"""
Get all documents stored in the table.
:returns: a list with all documents.
:rtype: list[Element]
"""
return list(itervalues(self._read()))
def __iter__(self):
"""
Iter over all documents stored in the table.
:returns: an iterator over all documents.
:rtype: listiterator[Element]
"""
for value in itervalues(self._read()):
yield value
def insert(self, document):
"""
Insert a new document into the table.
:param document: the document to insert
:returns: the inserted document's ID
"""
doc_id = self._get_doc_id(document)
data = self._read()
data[doc_id] = dict(document)
self._write(data)
return doc_id
def insert_multiple(self, documents):
"""
Insert multiple documents into the table.
:param documents: a list of documents to insert
:returns: a list containing the inserted documents' IDs
"""
doc_ids = []
data = self._read()
for doc in documents:
doc_id = self._get_doc_id(doc)
doc_ids.append(doc_id)
data[doc_id] = dict(doc)
self._write(data)
return doc_ids
def remove(self, cond=None, doc_ids=None, eids=None):
"""
Remove all matching documents.
:param cond: the condition to check against
:type cond: query
:param doc_ids: a list of document IDs
:type doc_ids: list
:returns: a list containing the removed document's ID
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if cond is None and doc_ids is None:
raise RuntimeError('Use purge() to remove all documents')
return self.process_elements(
lambda data, doc_id: data.pop(doc_id),
cond, doc_ids
)
def update(self, fields, cond=None, doc_ids=None, eids=None):
"""
Update all matching documents to have a given set of fields.
:param fields: the fields that the matching documents will have
or a method that will update the documents
:type fields: dict | dict -> None
:param cond: which documents to update
:type cond: query
:param doc_ids: a list of document IDs
:type doc_ids: list
:returns: a list containing the updated document's ID
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if callable(fields):
return self.process_elements(
lambda data, doc_id: fields(data[doc_id]),
cond, doc_ids
)
else:
return self.process_elements(
lambda data, doc_id: data[doc_id].update(fields),
cond, doc_ids
)
def write_back(self, documents, doc_ids=None, eids=None):
"""
Write back documents by doc_id
:param documents: a list of document to write back
:param doc_ids: a list of document IDs which need to be written back
:returns: a list of document IDs that have been written
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if doc_ids is not None and not len(documents) == len(doc_ids):
raise ValueError(
'The length of documents and doc_ids is not match.')
if doc_ids is None:
doc_ids = [doc.doc_id for doc in documents]
# Since this function will write docs back like inserting, to ensure
# here only process existing or removed instead of inserting new,
# raise error if doc_id exceeded the last.
if len(doc_ids) > 0 and max(doc_ids) > self._last_id:
raise IndexError(
'ID exceeds table length, use existing or removed doc_id.')
data = self._read()
# Document specified by ID
documents.reverse()
for doc_id in doc_ids:
data[doc_id] = dict(documents.pop())
self._write(data)
return doc_ids
def upsert(self, document, cond):
"""
Update a document, if it exist - insert it otherwise.
Note: this will update *all* documents matching the query.
:param document: the document to insert or the fields to update
:param cond: which document to look for
:returns: a list containing the updated document's ID
"""
updated_docs = self.update(document, cond)
if updated_docs:
return updated_docs
else:
return [self.insert(document)]
def purge(self):
"""
Purge the table by removing all documents.
"""
self._write({})
self._last_id = 0
def search(self, cond):
"""
Search for all documents matching a 'where' cond.
:param cond: the condition to check against
:type cond: Query
:returns: list of matching documents
:rtype: list[Element]
"""
if cond in self._query_cache:
return self._query_cache.get(cond, [])[:]
docs = [doc for doc in self.all() if cond(doc)]
self._query_cache[cond] = docs
return docs[:]
def count(self, cond):
"""
Count the documents matching a condition.
:param cond: the condition use
:type cond: Query
"""
return len(self.search(cond))
def contains(self, cond=None, doc_ids=None, eids=None):
"""
Check wether the database contains a document matching a condition or
an ID.
If ``eids`` is set, it checks if the db contains a document with one
of the specified.
:param cond: the condition use
:type cond: Query
:param doc_ids: the document IDs to look for
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if doc_ids is not None:
# Documents specified by ID
return any(self.get(doc_id=doc_id) for doc_id in doc_ids)
# Document specified by condition
return self.get(cond) is not None
|
msiemens/tinydb
|
tinydb/database.py
|
Table.contains
|
python
|
def contains(self, cond=None, doc_ids=None, eids=None):
doc_ids = _get_doc_ids(doc_ids, eids)
if doc_ids is not None:
# Documents specified by ID
return any(self.get(doc_id=doc_id) for doc_id in doc_ids)
# Document specified by condition
return self.get(cond) is not None
|
Check wether the database contains a document matching a condition or
an ID.
If ``eids`` is set, it checks if the db contains a document with one
of the specified.
:param cond: the condition use
:type cond: Query
:param doc_ids: the document IDs to look for
|
train
|
https://github.com/msiemens/tinydb/blob/10052cb1ae6a3682d26eb4272c44e3b020aa5877/tinydb/database.py#L648-L667
|
[
"def _get_doc_ids(doc_ids, eids):\n # Backwards-compatibility shim\n if eids is not None:\n if doc_ids is not None:\n raise TypeError('cannot pass both eids and doc_ids')\n\n warnings.warn('eids has been renamed to doc_ids', DeprecationWarning)\n return eids\n else:\n return doc_ids\n",
"def get(self, cond=None, doc_id=None, eid=None):\n \"\"\"\n Get exactly one document specified by a query or and ID.\n\n Returns ``None`` if the document doesn't exist\n\n :param cond: the condition to check against\n :type cond: Query\n\n :param doc_id: the document's ID\n\n :returns: the document or None\n :rtype: Element | None\n \"\"\"\n doc_id = _get_doc_id(doc_id, eid)\n\n # Cannot use process_elements here because we want to return a\n # specific document\n\n if doc_id is not None:\n # Document specified by ID\n return self._read().get(doc_id, None)\n\n # Document specified by condition\n for doc in self.all():\n if cond(doc):\n return doc\n"
] |
class Table(object):
"""
Represents a single TinyDB Table.
"""
def __init__(self, storage, name, cache_size=10):
"""
Get access to a table.
:param storage: Access to the storage
:type storage: StorageProxy
:param name: The table name
:param cache_size: Maximum size of query cache.
"""
self._storage = storage
self._name = name
self._query_cache = LRUCache(capacity=cache_size)
data = self._read()
self._init_last_id(data)
def __repr__(self):
args = [
'name={!r}'.format(self.name),
'total={}'.format(self.__len__()),
'storage={}'.format(self._storage),
]
return '<{} {}>'.format(type(self).__name__, ', '.join(args))
def _init_last_id(self, data):
if data:
self._last_id = max(i for i in data)
else:
self._last_id = 0
@property
def name(self):
"""
Get the table name.
"""
return self._name
def process_elements(self, func, cond=None, doc_ids=None, eids=None):
"""
Helper function for processing all documents specified by condition
or IDs.
A repeating pattern in TinyDB is to run some code on all documents
that match a condition or are specified by their ID. This is
implemented in this function.
The function passed as ``func`` has to be a callable. Its first
argument will be the data currently in the database. Its second
argument is the document ID of the currently processed document.
See: :meth:`~.update`, :meth:`.remove`
:param func: the function to execute on every included document.
first argument: all data
second argument: the current eid
:param cond: query that matches documents to use, or
:param doc_ids: list of document IDs to use
:param eids: list of document IDs to use (deprecated)
:returns: the document IDs that were affected during processing
"""
doc_ids = _get_doc_ids(doc_ids, eids)
data = self._read()
if doc_ids is not None:
# Processed document specified by id
for doc_id in doc_ids:
func(data, doc_id)
elif cond is not None:
# Collect affected doc_ids
doc_ids = []
# Processed documents specified by condition
for doc_id in list(data):
if cond(data[doc_id]):
func(data, doc_id)
doc_ids.append(doc_id)
else:
# Processed documents
doc_ids = list(data)
for doc_id in doc_ids:
func(data, doc_id)
self._write(data)
return doc_ids
def clear_cache(self):
"""
Clear the query cache.
A simple helper that clears the internal query cache.
"""
self._query_cache.clear()
def _get_next_id(self):
"""
Increment the ID used the last time and return it
"""
current_id = self._last_id + 1
self._last_id = current_id
return current_id
def _get_doc_id(self, document):
if not isinstance(document, Mapping):
raise ValueError('Document is not a Mapping')
return self._get_next_id()
def _read(self):
"""
Reading access to the DB.
:returns: all values
:rtype: DataProxy
"""
return self._storage.read()
def _write(self, values):
"""
Writing access to the DB.
:param values: the new values to write
:type values: DataProxy | dict
"""
self.clear_cache()
self._storage.write(values)
def __len__(self):
"""
Get the total number of documents in the table.
"""
return len(self._read())
def all(self):
"""
Get all documents stored in the table.
:returns: a list with all documents.
:rtype: list[Element]
"""
return list(itervalues(self._read()))
def __iter__(self):
"""
Iter over all documents stored in the table.
:returns: an iterator over all documents.
:rtype: listiterator[Element]
"""
for value in itervalues(self._read()):
yield value
def insert(self, document):
"""
Insert a new document into the table.
:param document: the document to insert
:returns: the inserted document's ID
"""
doc_id = self._get_doc_id(document)
data = self._read()
data[doc_id] = dict(document)
self._write(data)
return doc_id
def insert_multiple(self, documents):
"""
Insert multiple documents into the table.
:param documents: a list of documents to insert
:returns: a list containing the inserted documents' IDs
"""
doc_ids = []
data = self._read()
for doc in documents:
doc_id = self._get_doc_id(doc)
doc_ids.append(doc_id)
data[doc_id] = dict(doc)
self._write(data)
return doc_ids
def remove(self, cond=None, doc_ids=None, eids=None):
"""
Remove all matching documents.
:param cond: the condition to check against
:type cond: query
:param doc_ids: a list of document IDs
:type doc_ids: list
:returns: a list containing the removed document's ID
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if cond is None and doc_ids is None:
raise RuntimeError('Use purge() to remove all documents')
return self.process_elements(
lambda data, doc_id: data.pop(doc_id),
cond, doc_ids
)
def update(self, fields, cond=None, doc_ids=None, eids=None):
"""
Update all matching documents to have a given set of fields.
:param fields: the fields that the matching documents will have
or a method that will update the documents
:type fields: dict | dict -> None
:param cond: which documents to update
:type cond: query
:param doc_ids: a list of document IDs
:type doc_ids: list
:returns: a list containing the updated document's ID
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if callable(fields):
return self.process_elements(
lambda data, doc_id: fields(data[doc_id]),
cond, doc_ids
)
else:
return self.process_elements(
lambda data, doc_id: data[doc_id].update(fields),
cond, doc_ids
)
def write_back(self, documents, doc_ids=None, eids=None):
"""
Write back documents by doc_id
:param documents: a list of document to write back
:param doc_ids: a list of document IDs which need to be written back
:returns: a list of document IDs that have been written
"""
doc_ids = _get_doc_ids(doc_ids, eids)
if doc_ids is not None and not len(documents) == len(doc_ids):
raise ValueError(
'The length of documents and doc_ids is not match.')
if doc_ids is None:
doc_ids = [doc.doc_id for doc in documents]
# Since this function will write docs back like inserting, to ensure
# here only process existing or removed instead of inserting new,
# raise error if doc_id exceeded the last.
if len(doc_ids) > 0 and max(doc_ids) > self._last_id:
raise IndexError(
'ID exceeds table length, use existing or removed doc_id.')
data = self._read()
# Document specified by ID
documents.reverse()
for doc_id in doc_ids:
data[doc_id] = dict(documents.pop())
self._write(data)
return doc_ids
def upsert(self, document, cond):
"""
Update a document, if it exist - insert it otherwise.
Note: this will update *all* documents matching the query.
:param document: the document to insert or the fields to update
:param cond: which document to look for
:returns: a list containing the updated document's ID
"""
updated_docs = self.update(document, cond)
if updated_docs:
return updated_docs
else:
return [self.insert(document)]
def purge(self):
"""
Purge the table by removing all documents.
"""
self._write({})
self._last_id = 0
def search(self, cond):
"""
Search for all documents matching a 'where' cond.
:param cond: the condition to check against
:type cond: Query
:returns: list of matching documents
:rtype: list[Element]
"""
if cond in self._query_cache:
return self._query_cache.get(cond, [])[:]
docs = [doc for doc in self.all() if cond(doc)]
self._query_cache[cond] = docs
return docs[:]
def get(self, cond=None, doc_id=None, eid=None):
"""
Get exactly one document specified by a query or and ID.
Returns ``None`` if the document doesn't exist
:param cond: the condition to check against
:type cond: Query
:param doc_id: the document's ID
:returns: the document or None
:rtype: Element | None
"""
doc_id = _get_doc_id(doc_id, eid)
# Cannot use process_elements here because we want to return a
# specific document
if doc_id is not None:
# Document specified by ID
return self._read().get(doc_id, None)
# Document specified by condition
for doc in self.all():
if cond(doc):
return doc
def count(self, cond):
"""
Count the documents matching a condition.
:param cond: the condition use
:type cond: Query
"""
return len(self.search(cond))
|
msiemens/tinydb
|
tinydb/middlewares.py
|
CachingMiddleware.flush
|
python
|
def flush(self):
if self._cache_modified_count > 0:
self.storage.write(self.cache)
self._cache_modified_count = 0
|
Flush all unwritten data to disk.
|
train
|
https://github.com/msiemens/tinydb/blob/10052cb1ae6a3682d26eb4272c44e3b020aa5877/tinydb/middlewares.py#L106-L112
| null |
class CachingMiddleware(Middleware):
"""
Add some caching to TinyDB.
This Middleware aims to improve the performance of TinyDB by writing only
the last DB state every :attr:`WRITE_CACHE_SIZE` time and reading always
from cache.
"""
#: The number of write operations to cache before writing to disc
WRITE_CACHE_SIZE = 1000
def __init__(self, storage_cls=TinyDB.DEFAULT_STORAGE):
super(CachingMiddleware, self).__init__(storage_cls)
self.cache = None
self._cache_modified_count = 0
def read(self):
if self.cache is None:
self.cache = self.storage.read()
return self.cache
def write(self, data):
self.cache = data
self._cache_modified_count += 1
if self._cache_modified_count >= self.WRITE_CACHE_SIZE:
self.flush()
def close(self):
self.flush() # Flush potentially unwritten data
self.storage.close()
|
msiemens/tinydb
|
tinydb/queries.py
|
Query.matches
|
python
|
def matches(self, regex, flags=0):
return self._generate_test(
lambda value: re.match(regex, value, flags),
('matches', self._path, regex)
)
|
Run a regex test against a dict value (whole string has to match).
>>> Query().f1.matches(r'^\w+$')
:param regex: The regular expression to use for matching
|
train
|
https://github.com/msiemens/tinydb/blob/10052cb1ae6a3682d26eb4272c44e3b020aa5877/tinydb/queries.py#L264-L275
|
[
"def _generate_test(self, test, hashval):\n \"\"\"\n Generate a query based on a test function.\n\n :param test: The test the query executes.\n :param hashval: The hash of the query.\n :return: A :class:`~tinydb.queries.QueryImpl` object\n \"\"\"\n if not self._path:\n raise ValueError('Query has no path')\n\n return QueryImpl(self._prepare_test(test), hashval)\n"
] |
class Query(QueryImpl):
"""
TinyDB Queries.
Allows to build queries for TinyDB databases. There are two main ways of
using queries:
1) ORM-like usage:
>>> User = Query()
>>> db.search(User.name == 'John Doe')
>>> db.search(User['logged-in'] == True)
2) Classical usage:
>>> db.search(where('value') == True)
Note that ``where(...)`` is a shorthand for ``Query(...)`` allowing for
a more fluent syntax.
Besides the methods documented here you can combine queries using the
binary AND and OR operators:
>>> # Binary AND:
>>> db.search((where('field1').exists()) & (where('field2') == 5))
>>> # Binary OR:
>>> db.search((where('field1').exists()) | (where('field2') == 5))
Queries are executed by calling the resulting object. They expect to get
the document to test as the first argument and return ``True`` or
``False`` depending on whether the documents matches the query or not.
"""
def __init__(self):
self._path = ()
super(Query, self).__init__(
self._prepare_test(lambda _: True),
('path', self._path)
)
def __repr__(self):
return '{}()'.format(type(self).__name__)
def __hash__(self):
return super(Query, self).__hash__()
def __getattr__(self, item):
query = Query()
query._path = self._path + (item, )
query.hashval = ('path', query._path)
return query
__getitem__ = __getattr__
def _prepare_test(self, test):
def runner(value):
try:
# Resolve the path
for part in self._path:
value = value[part]
except (KeyError, TypeError):
return False
else:
return test(value)
return runner
def _generate_test(self, test, hashval):
"""
Generate a query based on a test function.
:param test: The test the query executes.
:param hashval: The hash of the query.
:return: A :class:`~tinydb.queries.QueryImpl` object
"""
if not self._path:
raise ValueError('Query has no path')
return QueryImpl(self._prepare_test(test), hashval)
def __eq__(self, rhs):
"""
Test a dict value for equality.
>>> Query().f1 == 42
:param rhs: The value to compare against
"""
if sys.version_info <= (3, 0): # pragma: no cover
# Special UTF-8 handling on Python 2
def test(value):
with catch_warning(UnicodeWarning):
try:
return value == rhs
except UnicodeWarning:
# Dealing with a case, where 'value' or 'rhs'
# is unicode and the other is a byte string.
if isinstance(value, str):
return value.decode('utf-8') == rhs
elif isinstance(rhs, str):
return value == rhs.decode('utf-8')
else: # pragma: no cover
def test(value):
return value == rhs
return self._generate_test(
lambda value: test(value),
('==', self._path, freeze(rhs))
)
def __ne__(self, rhs):
"""
Test a dict value for inequality.
>>> Query().f1 != 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value != rhs,
('!=', self._path, freeze(rhs))
)
def __lt__(self, rhs):
"""
Test a dict value for being lower than another value.
>>> Query().f1 < 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value < rhs,
('<', self._path, rhs)
)
def __le__(self, rhs):
"""
Test a dict value for being lower than or equal to another value.
>>> where('f1') <= 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value <= rhs,
('<=', self._path, rhs)
)
def __gt__(self, rhs):
"""
Test a dict value for being greater than another value.
>>> Query().f1 > 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value > rhs,
('>', self._path, rhs)
)
def __ge__(self, rhs):
"""
Test a dict value for being greater than or equal to another value.
>>> Query().f1 >= 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value >= rhs,
('>=', self._path, rhs)
)
def exists(self):
"""
Test for a dict where a provided key exists.
>>> Query().f1.exists()
"""
return self._generate_test(
lambda _: True,
('exists', self._path)
)
def search(self, regex, flags=0):
"""
Run a regex test against a dict value (only substring string has to
match).
>>> Query().f1.search(r'^\w+$')
:param regex: The regular expression to use for matching
"""
return self._generate_test(
lambda value: re.search(regex, value, flags),
('search', self._path, regex)
)
def test(self, func, *args):
"""
Run a user-defined test function against a dict value.
>>> def test_func(val):
... return val == 42
...
>>> Query().f1.test(test_func)
:param func: The function to call, passing the dict as the first
argument
:param args: Additional arguments to pass to the test function
"""
return self._generate_test(
lambda value: func(value, *args),
('test', self._path, func, args)
)
def any(self, cond):
"""
Check if a condition is met by any document in a list,
where a condition can also be a sequence (e.g. list).
>>> Query().f1.any(Query().f2 == 1)
Matches::
{'f1': [{'f2': 1}, {'f2': 0}]}
>>> Query().f1.any([1, 2, 3])
Matches::
{'f1': [1, 2]}
{'f1': [3, 4, 5]}
:param cond: Either a query that at least one document has to match or
a list of which at least one document has to be contained
in the tested document.
"""
if callable(cond):
def _cmp(value):
return is_sequence(value) and any(cond(e) for e in value)
else:
def _cmp(value):
return is_sequence(value) and any(e in cond for e in value)
return self._generate_test(
lambda value: _cmp(value),
('any', self._path, freeze(cond))
)
def all(self, cond):
"""
Check if a condition is met by all documents in a list,
where a condition can also be a sequence (e.g. list).
>>> Query().f1.all(Query().f2 == 1)
Matches::
{'f1': [{'f2': 1}, {'f2': 1}]}
>>> Query().f1.all([1, 2, 3])
Matches::
{'f1': [1, 2, 3, 4, 5]}
:param cond: Either a query that all documents have to match or a list
which has to be contained in the tested document.
"""
if callable(cond):
def _cmp(value):
return is_sequence(value) and all(cond(e) for e in value)
else:
def _cmp(value):
return is_sequence(value) and all(e in value for e in cond)
return self._generate_test(
lambda value: _cmp(value),
('all', self._path, freeze(cond))
)
def one_of(self, items):
"""
Check if the value is contained in a list or generator.
>>> Query().f1.one_of(['value 1', 'value 2'])
:param items: The list of items to check with
"""
return self._generate_test(
lambda value: value in items,
('one_of', self._path, freeze(items))
)
|
msiemens/tinydb
|
tinydb/queries.py
|
Query.search
|
python
|
def search(self, regex, flags=0):
return self._generate_test(
lambda value: re.search(regex, value, flags),
('search', self._path, regex)
)
|
Run a regex test against a dict value (only substring string has to
match).
>>> Query().f1.search(r'^\w+$')
:param regex: The regular expression to use for matching
|
train
|
https://github.com/msiemens/tinydb/blob/10052cb1ae6a3682d26eb4272c44e3b020aa5877/tinydb/queries.py#L277-L289
|
[
"def _generate_test(self, test, hashval):\n \"\"\"\n Generate a query based on a test function.\n\n :param test: The test the query executes.\n :param hashval: The hash of the query.\n :return: A :class:`~tinydb.queries.QueryImpl` object\n \"\"\"\n if not self._path:\n raise ValueError('Query has no path')\n\n return QueryImpl(self._prepare_test(test), hashval)\n"
] |
class Query(QueryImpl):
"""
TinyDB Queries.
Allows to build queries for TinyDB databases. There are two main ways of
using queries:
1) ORM-like usage:
>>> User = Query()
>>> db.search(User.name == 'John Doe')
>>> db.search(User['logged-in'] == True)
2) Classical usage:
>>> db.search(where('value') == True)
Note that ``where(...)`` is a shorthand for ``Query(...)`` allowing for
a more fluent syntax.
Besides the methods documented here you can combine queries using the
binary AND and OR operators:
>>> # Binary AND:
>>> db.search((where('field1').exists()) & (where('field2') == 5))
>>> # Binary OR:
>>> db.search((where('field1').exists()) | (where('field2') == 5))
Queries are executed by calling the resulting object. They expect to get
the document to test as the first argument and return ``True`` or
``False`` depending on whether the documents matches the query or not.
"""
def __init__(self):
self._path = ()
super(Query, self).__init__(
self._prepare_test(lambda _: True),
('path', self._path)
)
def __repr__(self):
return '{}()'.format(type(self).__name__)
def __hash__(self):
return super(Query, self).__hash__()
def __getattr__(self, item):
query = Query()
query._path = self._path + (item, )
query.hashval = ('path', query._path)
return query
__getitem__ = __getattr__
def _prepare_test(self, test):
def runner(value):
try:
# Resolve the path
for part in self._path:
value = value[part]
except (KeyError, TypeError):
return False
else:
return test(value)
return runner
def _generate_test(self, test, hashval):
"""
Generate a query based on a test function.
:param test: The test the query executes.
:param hashval: The hash of the query.
:return: A :class:`~tinydb.queries.QueryImpl` object
"""
if not self._path:
raise ValueError('Query has no path')
return QueryImpl(self._prepare_test(test), hashval)
def __eq__(self, rhs):
"""
Test a dict value for equality.
>>> Query().f1 == 42
:param rhs: The value to compare against
"""
if sys.version_info <= (3, 0): # pragma: no cover
# Special UTF-8 handling on Python 2
def test(value):
with catch_warning(UnicodeWarning):
try:
return value == rhs
except UnicodeWarning:
# Dealing with a case, where 'value' or 'rhs'
# is unicode and the other is a byte string.
if isinstance(value, str):
return value.decode('utf-8') == rhs
elif isinstance(rhs, str):
return value == rhs.decode('utf-8')
else: # pragma: no cover
def test(value):
return value == rhs
return self._generate_test(
lambda value: test(value),
('==', self._path, freeze(rhs))
)
def __ne__(self, rhs):
"""
Test a dict value for inequality.
>>> Query().f1 != 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value != rhs,
('!=', self._path, freeze(rhs))
)
def __lt__(self, rhs):
"""
Test a dict value for being lower than another value.
>>> Query().f1 < 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value < rhs,
('<', self._path, rhs)
)
def __le__(self, rhs):
"""
Test a dict value for being lower than or equal to another value.
>>> where('f1') <= 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value <= rhs,
('<=', self._path, rhs)
)
def __gt__(self, rhs):
"""
Test a dict value for being greater than another value.
>>> Query().f1 > 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value > rhs,
('>', self._path, rhs)
)
def __ge__(self, rhs):
"""
Test a dict value for being greater than or equal to another value.
>>> Query().f1 >= 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value >= rhs,
('>=', self._path, rhs)
)
def exists(self):
"""
Test for a dict where a provided key exists.
>>> Query().f1.exists()
"""
return self._generate_test(
lambda _: True,
('exists', self._path)
)
def matches(self, regex, flags=0):
"""
Run a regex test against a dict value (whole string has to match).
>>> Query().f1.matches(r'^\w+$')
:param regex: The regular expression to use for matching
"""
return self._generate_test(
lambda value: re.match(regex, value, flags),
('matches', self._path, regex)
)
def test(self, func, *args):
"""
Run a user-defined test function against a dict value.
>>> def test_func(val):
... return val == 42
...
>>> Query().f1.test(test_func)
:param func: The function to call, passing the dict as the first
argument
:param args: Additional arguments to pass to the test function
"""
return self._generate_test(
lambda value: func(value, *args),
('test', self._path, func, args)
)
def any(self, cond):
"""
Check if a condition is met by any document in a list,
where a condition can also be a sequence (e.g. list).
>>> Query().f1.any(Query().f2 == 1)
Matches::
{'f1': [{'f2': 1}, {'f2': 0}]}
>>> Query().f1.any([1, 2, 3])
Matches::
{'f1': [1, 2]}
{'f1': [3, 4, 5]}
:param cond: Either a query that at least one document has to match or
a list of which at least one document has to be contained
in the tested document.
"""
if callable(cond):
def _cmp(value):
return is_sequence(value) and any(cond(e) for e in value)
else:
def _cmp(value):
return is_sequence(value) and any(e in cond for e in value)
return self._generate_test(
lambda value: _cmp(value),
('any', self._path, freeze(cond))
)
def all(self, cond):
"""
Check if a condition is met by all documents in a list,
where a condition can also be a sequence (e.g. list).
>>> Query().f1.all(Query().f2 == 1)
Matches::
{'f1': [{'f2': 1}, {'f2': 1}]}
>>> Query().f1.all([1, 2, 3])
Matches::
{'f1': [1, 2, 3, 4, 5]}
:param cond: Either a query that all documents have to match or a list
which has to be contained in the tested document.
"""
if callable(cond):
def _cmp(value):
return is_sequence(value) and all(cond(e) for e in value)
else:
def _cmp(value):
return is_sequence(value) and all(e in value for e in cond)
return self._generate_test(
lambda value: _cmp(value),
('all', self._path, freeze(cond))
)
def one_of(self, items):
"""
Check if the value is contained in a list or generator.
>>> Query().f1.one_of(['value 1', 'value 2'])
:param items: The list of items to check with
"""
return self._generate_test(
lambda value: value in items,
('one_of', self._path, freeze(items))
)
|
msiemens/tinydb
|
tinydb/queries.py
|
Query.any
|
python
|
def any(self, cond):
if callable(cond):
def _cmp(value):
return is_sequence(value) and any(cond(e) for e in value)
else:
def _cmp(value):
return is_sequence(value) and any(e in cond for e in value)
return self._generate_test(
lambda value: _cmp(value),
('any', self._path, freeze(cond))
)
|
Check if a condition is met by any document in a list,
where a condition can also be a sequence (e.g. list).
>>> Query().f1.any(Query().f2 == 1)
Matches::
{'f1': [{'f2': 1}, {'f2': 0}]}
>>> Query().f1.any([1, 2, 3])
Matches::
{'f1': [1, 2]}
{'f1': [3, 4, 5]}
:param cond: Either a query that at least one document has to match or
a list of which at least one document has to be contained
in the tested document.
|
train
|
https://github.com/msiemens/tinydb/blob/10052cb1ae6a3682d26eb4272c44e3b020aa5877/tinydb/queries.py#L309-L342
|
[
"def freeze(obj):\n if isinstance(obj, dict):\n return FrozenDict((k, freeze(v)) for k, v in obj.items())\n elif isinstance(obj, list):\n return tuple(freeze(el) for el in obj)\n elif isinstance(obj, set):\n return frozenset(obj)\n else:\n return obj\n",
"def _generate_test(self, test, hashval):\n \"\"\"\n Generate a query based on a test function.\n\n :param test: The test the query executes.\n :param hashval: The hash of the query.\n :return: A :class:`~tinydb.queries.QueryImpl` object\n \"\"\"\n if not self._path:\n raise ValueError('Query has no path')\n\n return QueryImpl(self._prepare_test(test), hashval)\n"
] |
class Query(QueryImpl):
"""
TinyDB Queries.
Allows to build queries for TinyDB databases. There are two main ways of
using queries:
1) ORM-like usage:
>>> User = Query()
>>> db.search(User.name == 'John Doe')
>>> db.search(User['logged-in'] == True)
2) Classical usage:
>>> db.search(where('value') == True)
Note that ``where(...)`` is a shorthand for ``Query(...)`` allowing for
a more fluent syntax.
Besides the methods documented here you can combine queries using the
binary AND and OR operators:
>>> # Binary AND:
>>> db.search((where('field1').exists()) & (where('field2') == 5))
>>> # Binary OR:
>>> db.search((where('field1').exists()) | (where('field2') == 5))
Queries are executed by calling the resulting object. They expect to get
the document to test as the first argument and return ``True`` or
``False`` depending on whether the documents matches the query or not.
"""
def __init__(self):
self._path = ()
super(Query, self).__init__(
self._prepare_test(lambda _: True),
('path', self._path)
)
def __repr__(self):
return '{}()'.format(type(self).__name__)
def __hash__(self):
return super(Query, self).__hash__()
def __getattr__(self, item):
query = Query()
query._path = self._path + (item, )
query.hashval = ('path', query._path)
return query
__getitem__ = __getattr__
def _prepare_test(self, test):
def runner(value):
try:
# Resolve the path
for part in self._path:
value = value[part]
except (KeyError, TypeError):
return False
else:
return test(value)
return runner
def _generate_test(self, test, hashval):
"""
Generate a query based on a test function.
:param test: The test the query executes.
:param hashval: The hash of the query.
:return: A :class:`~tinydb.queries.QueryImpl` object
"""
if not self._path:
raise ValueError('Query has no path')
return QueryImpl(self._prepare_test(test), hashval)
def __eq__(self, rhs):
"""
Test a dict value for equality.
>>> Query().f1 == 42
:param rhs: The value to compare against
"""
if sys.version_info <= (3, 0): # pragma: no cover
# Special UTF-8 handling on Python 2
def test(value):
with catch_warning(UnicodeWarning):
try:
return value == rhs
except UnicodeWarning:
# Dealing with a case, where 'value' or 'rhs'
# is unicode and the other is a byte string.
if isinstance(value, str):
return value.decode('utf-8') == rhs
elif isinstance(rhs, str):
return value == rhs.decode('utf-8')
else: # pragma: no cover
def test(value):
return value == rhs
return self._generate_test(
lambda value: test(value),
('==', self._path, freeze(rhs))
)
def __ne__(self, rhs):
"""
Test a dict value for inequality.
>>> Query().f1 != 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value != rhs,
('!=', self._path, freeze(rhs))
)
def __lt__(self, rhs):
"""
Test a dict value for being lower than another value.
>>> Query().f1 < 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value < rhs,
('<', self._path, rhs)
)
def __le__(self, rhs):
"""
Test a dict value for being lower than or equal to another value.
>>> where('f1') <= 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value <= rhs,
('<=', self._path, rhs)
)
def __gt__(self, rhs):
"""
Test a dict value for being greater than another value.
>>> Query().f1 > 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value > rhs,
('>', self._path, rhs)
)
def __ge__(self, rhs):
"""
Test a dict value for being greater than or equal to another value.
>>> Query().f1 >= 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value >= rhs,
('>=', self._path, rhs)
)
def exists(self):
"""
Test for a dict where a provided key exists.
>>> Query().f1.exists()
"""
return self._generate_test(
lambda _: True,
('exists', self._path)
)
def matches(self, regex, flags=0):
"""
Run a regex test against a dict value (whole string has to match).
>>> Query().f1.matches(r'^\w+$')
:param regex: The regular expression to use for matching
"""
return self._generate_test(
lambda value: re.match(regex, value, flags),
('matches', self._path, regex)
)
def search(self, regex, flags=0):
"""
Run a regex test against a dict value (only substring string has to
match).
>>> Query().f1.search(r'^\w+$')
:param regex: The regular expression to use for matching
"""
return self._generate_test(
lambda value: re.search(regex, value, flags),
('search', self._path, regex)
)
def test(self, func, *args):
"""
Run a user-defined test function against a dict value.
>>> def test_func(val):
... return val == 42
...
>>> Query().f1.test(test_func)
:param func: The function to call, passing the dict as the first
argument
:param args: Additional arguments to pass to the test function
"""
return self._generate_test(
lambda value: func(value, *args),
('test', self._path, func, args)
)
def all(self, cond):
"""
Check if a condition is met by all documents in a list,
where a condition can also be a sequence (e.g. list).
>>> Query().f1.all(Query().f2 == 1)
Matches::
{'f1': [{'f2': 1}, {'f2': 1}]}
>>> Query().f1.all([1, 2, 3])
Matches::
{'f1': [1, 2, 3, 4, 5]}
:param cond: Either a query that all documents have to match or a list
which has to be contained in the tested document.
"""
if callable(cond):
def _cmp(value):
return is_sequence(value) and all(cond(e) for e in value)
else:
def _cmp(value):
return is_sequence(value) and all(e in value for e in cond)
return self._generate_test(
lambda value: _cmp(value),
('all', self._path, freeze(cond))
)
def one_of(self, items):
"""
Check if the value is contained in a list or generator.
>>> Query().f1.one_of(['value 1', 'value 2'])
:param items: The list of items to check with
"""
return self._generate_test(
lambda value: value in items,
('one_of', self._path, freeze(items))
)
|
msiemens/tinydb
|
tinydb/queries.py
|
Query.one_of
|
python
|
def one_of(self, items):
return self._generate_test(
lambda value: value in items,
('one_of', self._path, freeze(items))
)
|
Check if the value is contained in a list or generator.
>>> Query().f1.one_of(['value 1', 'value 2'])
:param items: The list of items to check with
|
train
|
https://github.com/msiemens/tinydb/blob/10052cb1ae6a3682d26eb4272c44e3b020aa5877/tinydb/queries.py#L377-L388
|
[
"def freeze(obj):\n if isinstance(obj, dict):\n return FrozenDict((k, freeze(v)) for k, v in obj.items())\n elif isinstance(obj, list):\n return tuple(freeze(el) for el in obj)\n elif isinstance(obj, set):\n return frozenset(obj)\n else:\n return obj\n",
"def _generate_test(self, test, hashval):\n \"\"\"\n Generate a query based on a test function.\n\n :param test: The test the query executes.\n :param hashval: The hash of the query.\n :return: A :class:`~tinydb.queries.QueryImpl` object\n \"\"\"\n if not self._path:\n raise ValueError('Query has no path')\n\n return QueryImpl(self._prepare_test(test), hashval)\n"
] |
class Query(QueryImpl):
"""
TinyDB Queries.
Allows to build queries for TinyDB databases. There are two main ways of
using queries:
1) ORM-like usage:
>>> User = Query()
>>> db.search(User.name == 'John Doe')
>>> db.search(User['logged-in'] == True)
2) Classical usage:
>>> db.search(where('value') == True)
Note that ``where(...)`` is a shorthand for ``Query(...)`` allowing for
a more fluent syntax.
Besides the methods documented here you can combine queries using the
binary AND and OR operators:
>>> # Binary AND:
>>> db.search((where('field1').exists()) & (where('field2') == 5))
>>> # Binary OR:
>>> db.search((where('field1').exists()) | (where('field2') == 5))
Queries are executed by calling the resulting object. They expect to get
the document to test as the first argument and return ``True`` or
``False`` depending on whether the documents matches the query or not.
"""
def __init__(self):
self._path = ()
super(Query, self).__init__(
self._prepare_test(lambda _: True),
('path', self._path)
)
def __repr__(self):
return '{}()'.format(type(self).__name__)
def __hash__(self):
return super(Query, self).__hash__()
def __getattr__(self, item):
query = Query()
query._path = self._path + (item, )
query.hashval = ('path', query._path)
return query
__getitem__ = __getattr__
def _prepare_test(self, test):
def runner(value):
try:
# Resolve the path
for part in self._path:
value = value[part]
except (KeyError, TypeError):
return False
else:
return test(value)
return runner
def _generate_test(self, test, hashval):
"""
Generate a query based on a test function.
:param test: The test the query executes.
:param hashval: The hash of the query.
:return: A :class:`~tinydb.queries.QueryImpl` object
"""
if not self._path:
raise ValueError('Query has no path')
return QueryImpl(self._prepare_test(test), hashval)
def __eq__(self, rhs):
"""
Test a dict value for equality.
>>> Query().f1 == 42
:param rhs: The value to compare against
"""
if sys.version_info <= (3, 0): # pragma: no cover
# Special UTF-8 handling on Python 2
def test(value):
with catch_warning(UnicodeWarning):
try:
return value == rhs
except UnicodeWarning:
# Dealing with a case, where 'value' or 'rhs'
# is unicode and the other is a byte string.
if isinstance(value, str):
return value.decode('utf-8') == rhs
elif isinstance(rhs, str):
return value == rhs.decode('utf-8')
else: # pragma: no cover
def test(value):
return value == rhs
return self._generate_test(
lambda value: test(value),
('==', self._path, freeze(rhs))
)
def __ne__(self, rhs):
"""
Test a dict value for inequality.
>>> Query().f1 != 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value != rhs,
('!=', self._path, freeze(rhs))
)
def __lt__(self, rhs):
"""
Test a dict value for being lower than another value.
>>> Query().f1 < 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value < rhs,
('<', self._path, rhs)
)
def __le__(self, rhs):
"""
Test a dict value for being lower than or equal to another value.
>>> where('f1') <= 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value <= rhs,
('<=', self._path, rhs)
)
def __gt__(self, rhs):
"""
Test a dict value for being greater than another value.
>>> Query().f1 > 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value > rhs,
('>', self._path, rhs)
)
def __ge__(self, rhs):
"""
Test a dict value for being greater than or equal to another value.
>>> Query().f1 >= 42
:param rhs: The value to compare against
"""
return self._generate_test(
lambda value: value >= rhs,
('>=', self._path, rhs)
)
def exists(self):
"""
Test for a dict where a provided key exists.
>>> Query().f1.exists()
"""
return self._generate_test(
lambda _: True,
('exists', self._path)
)
def matches(self, regex, flags=0):
"""
Run a regex test against a dict value (whole string has to match).
>>> Query().f1.matches(r'^\w+$')
:param regex: The regular expression to use for matching
"""
return self._generate_test(
lambda value: re.match(regex, value, flags),
('matches', self._path, regex)
)
def search(self, regex, flags=0):
"""
Run a regex test against a dict value (only substring string has to
match).
>>> Query().f1.search(r'^\w+$')
:param regex: The regular expression to use for matching
"""
return self._generate_test(
lambda value: re.search(regex, value, flags),
('search', self._path, regex)
)
def test(self, func, *args):
"""
Run a user-defined test function against a dict value.
>>> def test_func(val):
... return val == 42
...
>>> Query().f1.test(test_func)
:param func: The function to call, passing the dict as the first
argument
:param args: Additional arguments to pass to the test function
"""
return self._generate_test(
lambda value: func(value, *args),
('test', self._path, func, args)
)
def any(self, cond):
"""
Check if a condition is met by any document in a list,
where a condition can also be a sequence (e.g. list).
>>> Query().f1.any(Query().f2 == 1)
Matches::
{'f1': [{'f2': 1}, {'f2': 0}]}
>>> Query().f1.any([1, 2, 3])
Matches::
{'f1': [1, 2]}
{'f1': [3, 4, 5]}
:param cond: Either a query that at least one document has to match or
a list of which at least one document has to be contained
in the tested document.
"""
if callable(cond):
def _cmp(value):
return is_sequence(value) and any(cond(e) for e in value)
else:
def _cmp(value):
return is_sequence(value) and any(e in cond for e in value)
return self._generate_test(
lambda value: _cmp(value),
('any', self._path, freeze(cond))
)
def all(self, cond):
"""
Check if a condition is met by all documents in a list,
where a condition can also be a sequence (e.g. list).
>>> Query().f1.all(Query().f2 == 1)
Matches::
{'f1': [{'f2': 1}, {'f2': 1}]}
>>> Query().f1.all([1, 2, 3])
Matches::
{'f1': [1, 2, 3, 4, 5]}
:param cond: Either a query that all documents have to match or a list
which has to be contained in the tested document.
"""
if callable(cond):
def _cmp(value):
return is_sequence(value) and all(cond(e) for e in value)
else:
def _cmp(value):
return is_sequence(value) and all(e in value for e in cond)
return self._generate_test(
lambda value: _cmp(value),
('all', self._path, freeze(cond))
)
|
JinnLynn/genpac
|
genpac/pysocks/socks.py
|
set_default_proxy
|
python
|
def set_default_proxy(proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
socksocket.default_proxy = (proxy_type, addr, port, rdns,
username.encode() if username else None,
password.encode() if password else None)
|
set_default_proxy(proxy_type, addr[, port[, rdns[, username, password]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed. All parameters are as for socket.set_proxy().
|
train
|
https://github.com/JinnLynn/genpac/blob/2f466d28f403a9a5624e02edcd538475fe475fc8/genpac/pysocks/socks.py#L146-L155
| null |
"""
SocksiPy - Python SOCKS module.
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
===============================================================================
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
Modifications made by Anorov (https://github.com/Anorov)
-Forked and renamed to PySocks
-Fixed issue with HTTP proxy failure checking (same bug that was in the old ___recvall() method)
-Included SocksiPyHandler (sockshandler.py), to be used as a urllib2 handler,
courtesy of e000 (https://github.com/e000): https://gist.github.com/869791#file_socksipyhandler.py
-Re-styled code to make it readable
-Aliased PROXY_TYPE_SOCKS5 -> SOCKS5 etc.
-Improved exception handling and output
-Removed irritating use of sequence indexes, replaced with tuple unpacked variables
-Fixed up Python 3 bytestring handling - chr(0x03).encode() -> b"\x03"
-Other general fixes
-Added clarification that the HTTP proxy connection method only supports CONNECT-style tunneling HTTP proxies
-Various small bug fixes
"""
__version__ = "1.6.7"
import socket
import struct
from errno import EOPNOTSUPP, EINVAL, EAGAIN
from io import BytesIO
from os import SEEK_CUR
import os
import sys
import functools
import logging
from collections import Callable
from base64 import b64encode
if os.name == "nt" and sys.version_info < (3, 0):
try:
from . import win_inet_pton
except ImportError:
raise ImportError("To run PySocks on Windows you must install win_inet_pton")
log = logging.getLogger(__name__)
PROXY_TYPE_SOCKS4 = SOCKS4 = 1
PROXY_TYPE_SOCKS5 = SOCKS5 = 2
PROXY_TYPE_HTTP = HTTP = 3
PROXY_TYPES = {"SOCKS4": SOCKS4, "SOCKS5": SOCKS5, "HTTP": HTTP}
PRINTABLE_PROXY_TYPES = dict(zip(PROXY_TYPES.values(), PROXY_TYPES.keys()))
_orgsocket = _orig_socket = socket.socket
def set_self_blocking(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
self = args[0]
try:
_is_blocking = self.gettimeout()
if _is_blocking == 0:
self.setblocking(True)
return function(*args, **kwargs)
except Exception as e:
raise
finally:
# set orgin blcoking
if _is_blocking == 0:
self.setblocking(False)
return wrapper
class ProxyError(IOError):
"""
socket_err contains original socket.error exception.
"""
def __init__(self, msg, socket_err=None):
self.msg = msg
self.socket_err = socket_err
if socket_err:
self.msg += ": {0}".format(socket_err)
def __str__(self):
return self.msg
class GeneralProxyError(ProxyError): pass
class ProxyConnectionError(ProxyError): pass
class SOCKS5AuthError(ProxyError): pass
class SOCKS5Error(ProxyError): pass
class SOCKS4Error(ProxyError): pass
class HTTPError(ProxyError): pass
SOCKS4_ERRORS = { 0x5B: "Request rejected or failed",
0x5C: "Request rejected because SOCKS server cannot connect to identd on the client",
0x5D: "Request rejected because the client program and identd report different user-ids"
}
SOCKS5_ERRORS = { 0x01: "General SOCKS server failure",
0x02: "Connection not allowed by ruleset",
0x03: "Network unreachable",
0x04: "Host unreachable",
0x05: "Connection refused",
0x06: "TTL expired",
0x07: "Command not supported, or protocol error",
0x08: "Address type not supported"
}
DEFAULT_PORTS = { SOCKS4: 1080,
SOCKS5: 1080,
HTTP: 8080
}
def setdefaultproxy(*args, **kwargs):
if 'proxytype' in kwargs:
kwargs['proxy_type'] = kwargs.pop('proxytype')
return set_default_proxy(*args, **kwargs)
def get_default_proxy():
"""
Returns the default proxy, set by set_default_proxy.
"""
return socksocket.default_proxy
getdefaultproxy = get_default_proxy
def wrap_module(module):
"""
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using set_default_proxy(...) first.
This will only work on modules that import socket directly into the namespace;
most of the Python Standard Library falls into this category.
"""
if socksocket.default_proxy:
module.socket.socket = socksocket
else:
raise GeneralProxyError("No default proxy specified")
wrapmodule = wrap_module
def create_connection(dest_pair, proxy_type=None, proxy_addr=None,
proxy_port=None, proxy_rdns=True,
proxy_username=None, proxy_password=None,
timeout=None, source_address=None,
socket_options=None):
"""create_connection(dest_pair, *[, timeout], **proxy_args) -> socket object
Like socket.create_connection(), but connects to proxy
before returning the socket object.
dest_pair - 2-tuple of (IP/hostname, port).
**proxy_args - Same args passed to socksocket.set_proxy() if present.
timeout - Optional socket timeout value, in seconds.
source_address - tuple (host, port) for the socket to bind to as its source
address before connecting (only for compatibility)
"""
# Remove IPv6 brackets on the remote address and proxy address.
remote_host, remote_port = dest_pair
if remote_host.startswith('['):
remote_host = remote_host.strip('[]')
if proxy_addr and proxy_addr.startswith('['):
proxy_addr = proxy_addr.strip('[]')
err = None
# Allow the SOCKS proxy to be on IPv4 or IPv6 addresses.
for r in socket.getaddrinfo(proxy_addr, proxy_port, 0, socket.SOCK_STREAM):
family, socket_type, proto, canonname, sa = r
sock = None
try:
sock = socksocket(family, socket_type, proto)
if socket_options:
for opt in socket_options:
sock.setsockopt(*opt)
if isinstance(timeout, (int, float)):
sock.settimeout(timeout)
if proxy_type:
sock.set_proxy(proxy_type, proxy_addr, proxy_port, proxy_rdns,
proxy_username, proxy_password)
if source_address:
sock.bind(source_address)
sock.connect((remote_host, remote_port))
return sock
except (socket.error, ProxyConnectionError) as e:
err = e
if sock:
sock.close()
sock = None
if err:
raise err
raise socket.error("gai returned empty list.")
class _BaseSocket(socket.socket):
"""Allows Python 2's "delegated" methods such as send() to be overridden
"""
def __init__(self, *pos, **kw):
_orig_socket.__init__(self, *pos, **kw)
self._savedmethods = dict()
for name in self._savenames:
self._savedmethods[name] = getattr(self, name)
delattr(self, name) # Allows normal overriding mechanism to work
_savenames = list()
def _makemethod(name):
return lambda self, *pos, **kw: self._savedmethods[name](*pos, **kw)
for name in ("sendto", "send", "recvfrom", "recv"):
method = getattr(_BaseSocket, name, None)
# Determine if the method is not defined the usual way
# as a function in the class.
# Python 2 uses __slots__, so there are descriptors for each method,
# but they are not functions.
if not isinstance(method, Callable):
_BaseSocket._savenames.append(name)
setattr(_BaseSocket, name, _makemethod(name))
class socksocket(_BaseSocket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET and proto=0.
The "type" argument must be either SOCK_STREAM or SOCK_DGRAM.
"""
default_proxy = None
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, *args, **kwargs):
if type not in (socket.SOCK_STREAM, socket.SOCK_DGRAM):
msg = "Socket type must be stream or datagram, not {!r}"
raise ValueError(msg.format(type))
super(socksocket, self).__init__(family, type, proto, *args, **kwargs)
self._proxyconn = None # TCP connection to keep UDP relay alive
if self.default_proxy:
self.proxy = self.default_proxy
else:
self.proxy = (None, None, None, None, None, None)
self.proxy_sockname = None
self.proxy_peername = None
self._timeout = None
def _readall(self, file, count):
"""
Receive EXACTLY the number of bytes requested from the file object.
Blocks until the required number of bytes have been received.
"""
data = b""
while len(data) < count:
d = file.read(count - len(data))
if not d:
raise GeneralProxyError("Connection closed unexpectedly")
data += d
return data
def settimeout(self, timeout):
self._timeout = timeout
try:
# test if we're connected, if so apply timeout
peer = self.get_proxy_peername()
super(socksocket, self).settimeout(self._timeout)
except socket.error:
pass
def gettimeout(self):
return self._timeout
def setblocking(self, v):
if v:
self.settimeout(None)
else:
self.settimeout(0.0)
def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
"""set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxy_type - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be performed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.proxy = (proxy_type, addr, port, rdns,
username.encode() if username else None,
password.encode() if password else None)
def setproxy(self, *args, **kwargs):
if 'proxytype' in kwargs:
kwargs['proxy_type'] = kwargs.pop('proxytype')
return self.set_proxy(*args, **kwargs)
def bind(self, *pos, **kw):
"""
Implements proxy connection for UDP sockets,
which happens during the bind() phase.
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
if not proxy_type or self.type != socket.SOCK_DGRAM:
return _orig_socket.bind(self, *pos, **kw)
if self._proxyconn:
raise socket.error(EINVAL, "Socket already bound to an address")
if proxy_type != SOCKS5:
msg = "UDP only supported by SOCKS5 proxy type"
raise socket.error(EOPNOTSUPP, msg)
super(socksocket, self).bind(*pos, **kw)
# Need to specify actual local port because
# some relays drop packets if a port of zero is specified.
# Avoid specifying host address in case of NAT though.
_, port = self.getsockname()
dst = ("0", port)
self._proxyconn = _orig_socket()
proxy = self._proxy_addr()
self._proxyconn.connect(proxy)
UDP_ASSOCIATE = b"\x03"
_, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst)
# The relay is most likely on the same host as the SOCKS proxy,
# but some proxies return a private IP address (10.x.y.z)
host, _ = proxy
_, port = relay
super(socksocket, self).connect((host, port))
super(socksocket, self).settimeout(self._timeout)
self.proxy_sockname = ("0.0.0.0", 0) # Unknown
def sendto(self, bytes, *args, **kwargs):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).sendto(bytes, *args, **kwargs)
if not self._proxyconn:
self.bind(("", 0))
address = args[-1]
flags = args[:-1]
header = BytesIO()
RSV = b"\x00\x00"
header.write(RSV)
STANDALONE = b"\x00"
header.write(STANDALONE)
self._write_SOCKS5_address(address, header)
sent = super(socksocket, self).send(header.getvalue() + bytes, *flags, **kwargs)
return sent - header.tell()
def send(self, bytes, flags=0, **kwargs):
if self.type == socket.SOCK_DGRAM:
return self.sendto(bytes, flags, self.proxy_peername, **kwargs)
else:
return super(socksocket, self).send(bytes, flags, **kwargs)
def recvfrom(self, bufsize, flags=0):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).recvfrom(bufsize, flags)
if not self._proxyconn:
self.bind(("", 0))
buf = BytesIO(super(socksocket, self).recv(bufsize + 1024, flags))
buf.seek(2, SEEK_CUR)
frag = buf.read(1)
if ord(frag):
raise NotImplementedError("Received UDP packet fragment")
fromhost, fromport = self._read_SOCKS5_address(buf)
if self.proxy_peername:
peerhost, peerport = self.proxy_peername
if fromhost != peerhost or peerport not in (0, fromport):
raise socket.error(EAGAIN, "Packet filtered")
return (buf.read(bufsize), (fromhost, fromport))
def recv(self, *pos, **kw):
bytes, _ = self.recvfrom(*pos, **kw)
return bytes
def close(self):
if self._proxyconn:
self._proxyconn.close()
return super(socksocket, self).close()
def get_proxy_sockname(self):
"""
Returns the bound IP address and port number at the proxy.
"""
return self.proxy_sockname
getproxysockname = get_proxy_sockname
def get_proxy_peername(self):
"""
Returns the IP and port number of the proxy.
"""
return super(socksocket, self).getpeername()
getproxypeername = get_proxy_peername
def get_peername(self):
"""
Returns the IP address and port number of the destination
machine (note: get_proxy_peername returns the proxy)
"""
return self.proxy_peername
getpeername = get_peername
def _negotiate_SOCKS5(self, *dest_addr):
"""
Negotiates a stream connection through a SOCKS5 server.
"""
CONNECT = b"\x01"
self.proxy_peername, self.proxy_sockname = self._SOCKS5_request(self,
CONNECT, dest_addr)
def _SOCKS5_request(self, conn, cmd, dst):
"""
Send SOCKS5 request with given command (CMD field) and
address (DST field). Returns resolved DST address that was used.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = conn.makefile("wb")
reader = conn.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# First we'll send the authentication packages we support.
if username and password:
# The username/password details were supplied to the
# set_proxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
writer.write(b"\x05\x02\x00\x02")
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
writer.write(b"\x05\x01\x00")
# We'll receive the server's response to determine which
# method was selected
writer.flush()
chosen_auth = self._readall(reader, 2)
if chosen_auth[0:1] != b"\x05":
# Note: string[i:i+1] is used because indexing of a bytestring
# via bytestring[i] yields an integer in Python 3
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Check the chosen authentication method
if chosen_auth[1:2] == b"\x02":
# Okay, we need to perform a basic username/password
# authentication.
writer.write(b"\x01" + chr(len(username)).encode()
+ username
+ chr(len(password)).encode()
+ password)
writer.flush()
auth_status = self._readall(reader, 2)
if auth_status[0:1] != b"\x01":
# Bad response
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
if auth_status[1:2] != b"\x00":
# Authentication failed
raise SOCKS5AuthError("SOCKS5 authentication failed")
# Otherwise, authentication succeeded
# No authentication is required if 0x00
elif chosen_auth[1:2] != b"\x00":
# Reaching here is always bad
if chosen_auth[1:2] == b"\xFF":
raise SOCKS5AuthError("All offered SOCKS5 authentication methods were rejected")
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Now we can request the actual connection
writer.write(b"\x05" + cmd + b"\x00")
resolved = self._write_SOCKS5_address(dst, writer)
writer.flush()
# Get the response
resp = self._readall(reader, 3)
if resp[0:1] != b"\x05":
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x00:
# Connection failed: server returned an error
error = SOCKS5_ERRORS.get(status, "Unknown error")
raise SOCKS5Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
bnd = self._read_SOCKS5_address(reader)
super(socksocket, self).settimeout(self._timeout)
return (resolved, bnd)
finally:
reader.close()
writer.close()
def _write_SOCKS5_address(self, addr, file):
"""
Return the host and port packed for the SOCKS5 protocol,
and the resolved address as a tuple object.
"""
host, port = addr
proxy_type, _, _, rdns, username, password = self.proxy
family_to_byte = {socket.AF_INET: b"\x01", socket.AF_INET6: b"\x04"}
# If the given destination address is an IP address, we'll
# use the IP address request even if remote resolving was specified.
# Detect whether the address is IPv4/6 directly.
for family in (socket.AF_INET, socket.AF_INET6):
try:
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
except socket.error:
continue
# Well it's not an IP number, so it's probably a DNS name.
if rdns:
# Resolve remotely
host_bytes = host.encode('idna')
file.write(b"\x03" + chr(len(host_bytes)).encode() + host_bytes)
else:
# Resolve locally
addresses = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_ADDRCONFIG)
# We can't really work out what IP is reachable, so just pick the
# first.
target_addr = addresses[0]
family = target_addr[0]
host = target_addr[4][0]
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
def _read_SOCKS5_address(self, file):
atyp = self._readall(file, 1)
if atyp == b"\x01":
addr = socket.inet_ntoa(self._readall(file, 4))
elif atyp == b"\x03":
length = self._readall(file, 1)
addr = self._readall(file, ord(length))
elif atyp == b"\x04":
addr = socket.inet_ntop(socket.AF_INET6, self._readall(file, 16))
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
port = struct.unpack(">H", self._readall(file, 2))[0]
return addr, port
def _negotiate_SOCKS4(self, dest_addr, dest_port):
"""
Negotiates a connection through a SOCKS4 server.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = self.makefile("wb")
reader = self.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# Check if the destination address provided is an IP address
remote_resolve = False
try:
addr_bytes = socket.inet_aton(dest_addr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if rdns:
addr_bytes = b"\x00\x00\x00\x01"
remote_resolve = True
else:
addr_bytes = socket.inet_aton(socket.gethostbyname(dest_addr))
# Construct the request packet
writer.write(struct.pack(">BBH", 0x04, 0x01, dest_port))
writer.write(addr_bytes)
# The username parameter is considered userid for SOCKS4
if username:
writer.write(username)
writer.write(b"\x00")
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if remote_resolve:
writer.write(dest_addr.encode('idna') + b"\x00")
writer.flush()
# Get the response from the server
resp = self._readall(reader, 8)
if resp[0:1] != b"\x00":
# Bad data
raise GeneralProxyError("SOCKS4 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x5A:
# Connection failed: server returned an error
error = SOCKS4_ERRORS.get(status, "Unknown error")
raise SOCKS4Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
self.proxy_sockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if remote_resolve:
self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port
else:
self.proxy_peername = dest_addr, dest_port
finally:
reader.close()
writer.close()
def _negotiate_HTTP(self, dest_addr, dest_port):
"""
Negotiates a connection through an HTTP server.
NOTE: This currently only supports HTTP CONNECT-style proxies.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
# If we need to resolve locally, we do this now
addr = dest_addr if rdns else socket.gethostbyname(dest_addr)
http_headers = [
b"CONNECT " + addr.encode('idna') + b":" + str(dest_port).encode() + b" HTTP/1.1",
b"Host: " + dest_addr.encode('idna')
]
if username and password:
http_headers.append(b"Proxy-Authorization: basic " + b64encode(username + b":" + password))
http_headers.append(b"\r\n")
self.sendall(b"\r\n".join(http_headers))
# We just need the first line to check if the connection was successful
fobj = self.makefile()
status_line = fobj.readline()
fobj.close()
if not status_line:
raise GeneralProxyError("Connection closed unexpectedly")
try:
proto, status_code, status_msg = status_line.split(" ", 2)
except ValueError:
raise GeneralProxyError("HTTP proxy server sent invalid response")
if not proto.startswith("HTTP/"):
raise GeneralProxyError("Proxy server does not appear to be an HTTP proxy")
try:
status_code = int(status_code)
except ValueError:
raise HTTPError("HTTP proxy server did not return a valid HTTP status")
if status_code != 200:
error = "{0}: {1}".format(status_code, status_msg)
if status_code in (400, 403, 405):
# It's likely that the HTTP proxy server does not support the CONNECT tunneling method
error += ("\n[*] Note: The HTTP proxy server may not be supported by PySocks"
" (must be a CONNECT tunnel proxy)")
raise HTTPError(error)
self.proxy_sockname = (b"0.0.0.0", 0)
self.proxy_peername = addr, dest_port
_proxy_negotiators = {
SOCKS4: _negotiate_SOCKS4,
SOCKS5: _negotiate_SOCKS5,
HTTP: _negotiate_HTTP
}
@set_self_blocking
def connect(self, dest_pair):
"""
Connects to the specified destination through a proxy.
Uses the same API as socket's connect().
To select the proxy server, use set_proxy().
dest_pair - 2-tuple of (IP/hostname, port).
"""
if len(dest_pair) != 2 or dest_pair[0].startswith("["):
# Probably IPv6, not supported -- raise an error, and hope
# Happy Eyeballs (RFC6555) makes sure at least the IPv4
# connection works...
raise socket.error("PySocks doesn't support IPv6: %s" % str(dest_pair))
dest_addr, dest_port = dest_pair
if self.type == socket.SOCK_DGRAM:
if not self._proxyconn:
self.bind(("", 0))
dest_addr = socket.gethostbyname(dest_addr)
# If the host address is INADDR_ANY or similar, reset the peer
# address so that packets are received from any peer
if dest_addr == "0.0.0.0" and not dest_port:
self.proxy_peername = None
else:
self.proxy_peername = (dest_addr, dest_port)
return
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
# Do a minimal input check first
if (not isinstance(dest_pair, (list, tuple))
or len(dest_pair) != 2
or not dest_addr
or not isinstance(dest_port, int)):
raise GeneralProxyError("Invalid destination-connection (host, port) pair")
# We set the timeout here so that we don't hang in connection or during
# negotiation.
super(socksocket, self).settimeout(self._timeout)
if proxy_type is None:
# Treat like regular socket object
self.proxy_peername = dest_pair
super(socksocket, self).settimeout(self._timeout)
super(socksocket, self).connect((dest_addr, dest_port))
return
proxy_addr = self._proxy_addr()
try:
# Initial connection to proxy server.
super(socksocket, self).connect(proxy_addr)
except socket.error as error:
# Error while connecting to proxy
self.close()
proxy_addr, proxy_port = proxy_addr
proxy_server = "{0}:{1}".format(proxy_addr, proxy_port)
printable_type = PRINTABLE_PROXY_TYPES[proxy_type]
msg = "Error connecting to {0} proxy {1}".format(printable_type,
proxy_server)
log.debug("%s due to: %s", msg, error)
raise ProxyConnectionError(msg, error)
else:
# Connected to proxy server, now negotiate
try:
# Calls negotiate_{SOCKS4, SOCKS5, HTTP}
negotiate = self._proxy_negotiators[proxy_type]
negotiate(self, dest_addr, dest_port)
except socket.error as error:
# Wrap socket errors
self.close()
raise GeneralProxyError("Socket error", error)
except ProxyError:
# Protocol error while negotiating with proxy
self.close()
raise
def _proxy_addr(self):
"""
Return proxy address to connect to as tuple object
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type)
if not proxy_port:
raise GeneralProxyError("Invalid proxy type")
return proxy_addr, proxy_port
|
JinnLynn/genpac
|
genpac/pysocks/socks.py
|
create_connection
|
python
|
def create_connection(dest_pair, proxy_type=None, proxy_addr=None,
proxy_port=None, proxy_rdns=True,
proxy_username=None, proxy_password=None,
timeout=None, source_address=None,
socket_options=None):
# Remove IPv6 brackets on the remote address and proxy address.
remote_host, remote_port = dest_pair
if remote_host.startswith('['):
remote_host = remote_host.strip('[]')
if proxy_addr and proxy_addr.startswith('['):
proxy_addr = proxy_addr.strip('[]')
err = None
# Allow the SOCKS proxy to be on IPv4 or IPv6 addresses.
for r in socket.getaddrinfo(proxy_addr, proxy_port, 0, socket.SOCK_STREAM):
family, socket_type, proto, canonname, sa = r
sock = None
try:
sock = socksocket(family, socket_type, proto)
if socket_options:
for opt in socket_options:
sock.setsockopt(*opt)
if isinstance(timeout, (int, float)):
sock.settimeout(timeout)
if proxy_type:
sock.set_proxy(proxy_type, proxy_addr, proxy_port, proxy_rdns,
proxy_username, proxy_password)
if source_address:
sock.bind(source_address)
sock.connect((remote_host, remote_port))
return sock
except (socket.error, ProxyConnectionError) as e:
err = e
if sock:
sock.close()
sock = None
if err:
raise err
raise socket.error("gai returned empty list.")
|
create_connection(dest_pair, *[, timeout], **proxy_args) -> socket object
Like socket.create_connection(), but connects to proxy
before returning the socket object.
dest_pair - 2-tuple of (IP/hostname, port).
**proxy_args - Same args passed to socksocket.set_proxy() if present.
timeout - Optional socket timeout value, in seconds.
source_address - tuple (host, port) for the socket to bind to as its source
address before connecting (only for compatibility)
|
train
|
https://github.com/JinnLynn/genpac/blob/2f466d28f403a9a5624e02edcd538475fe475fc8/genpac/pysocks/socks.py#L184-L241
|
[
"def settimeout(self, timeout):\n self._timeout = timeout\n try:\n # test if we're connected, if so apply timeout\n peer = self.get_proxy_peername()\n super(socksocket, self).settimeout(self._timeout)\n except socket.error:\n pass\n",
"def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):\n \"\"\"set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]])\n Sets the proxy to be used.\n\n proxy_type - The type of the proxy to be used. Three types\n are supported: PROXY_TYPE_SOCKS4 (including socks4a),\n PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP\n addr - The address of the server (IP or DNS).\n port - The port of the server. Defaults to 1080 for SOCKS\n servers and 8080 for HTTP proxy servers.\n rdns - Should DNS queries be performed on the remote side\n (rather than the local side). The default is True.\n Note: This has no effect with SOCKS4 servers.\n username - Username to authenticate with to the server.\n The default is no authentication.\n password - Password to authenticate with to the server.\n Only relevant when username is also provided.\n \"\"\"\n self.proxy = (proxy_type, addr, port, rdns,\n username.encode() if username else None,\n password.encode() if password else None)\n",
"def bind(self, *pos, **kw):\n \"\"\"\n Implements proxy connection for UDP sockets,\n which happens during the bind() phase.\n \"\"\"\n proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy\n if not proxy_type or self.type != socket.SOCK_DGRAM:\n return _orig_socket.bind(self, *pos, **kw)\n\n if self._proxyconn:\n raise socket.error(EINVAL, \"Socket already bound to an address\")\n if proxy_type != SOCKS5:\n msg = \"UDP only supported by SOCKS5 proxy type\"\n raise socket.error(EOPNOTSUPP, msg)\n super(socksocket, self).bind(*pos, **kw)\n\n # Need to specify actual local port because\n # some relays drop packets if a port of zero is specified.\n # Avoid specifying host address in case of NAT though.\n _, port = self.getsockname()\n dst = (\"0\", port)\n\n self._proxyconn = _orig_socket()\n proxy = self._proxy_addr()\n self._proxyconn.connect(proxy)\n\n UDP_ASSOCIATE = b\"\\x03\"\n _, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst)\n\n # The relay is most likely on the same host as the SOCKS proxy,\n # but some proxies return a private IP address (10.x.y.z)\n host, _ = proxy\n _, port = relay\n super(socksocket, self).connect((host, port))\n super(socksocket, self).settimeout(self._timeout)\n self.proxy_sockname = (\"0.0.0.0\", 0) # Unknown\n",
"def close(self):\n if self._proxyconn:\n self._proxyconn.close()\n return super(socksocket, self).close()\n"
] |
"""
SocksiPy - Python SOCKS module.
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
===============================================================================
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
Modifications made by Anorov (https://github.com/Anorov)
-Forked and renamed to PySocks
-Fixed issue with HTTP proxy failure checking (same bug that was in the old ___recvall() method)
-Included SocksiPyHandler (sockshandler.py), to be used as a urllib2 handler,
courtesy of e000 (https://github.com/e000): https://gist.github.com/869791#file_socksipyhandler.py
-Re-styled code to make it readable
-Aliased PROXY_TYPE_SOCKS5 -> SOCKS5 etc.
-Improved exception handling and output
-Removed irritating use of sequence indexes, replaced with tuple unpacked variables
-Fixed up Python 3 bytestring handling - chr(0x03).encode() -> b"\x03"
-Other general fixes
-Added clarification that the HTTP proxy connection method only supports CONNECT-style tunneling HTTP proxies
-Various small bug fixes
"""
__version__ = "1.6.7"
import socket
import struct
from errno import EOPNOTSUPP, EINVAL, EAGAIN
from io import BytesIO
from os import SEEK_CUR
import os
import sys
import functools
import logging
from collections import Callable
from base64 import b64encode
if os.name == "nt" and sys.version_info < (3, 0):
try:
from . import win_inet_pton
except ImportError:
raise ImportError("To run PySocks on Windows you must install win_inet_pton")
log = logging.getLogger(__name__)
PROXY_TYPE_SOCKS4 = SOCKS4 = 1
PROXY_TYPE_SOCKS5 = SOCKS5 = 2
PROXY_TYPE_HTTP = HTTP = 3
PROXY_TYPES = {"SOCKS4": SOCKS4, "SOCKS5": SOCKS5, "HTTP": HTTP}
PRINTABLE_PROXY_TYPES = dict(zip(PROXY_TYPES.values(), PROXY_TYPES.keys()))
_orgsocket = _orig_socket = socket.socket
def set_self_blocking(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
self = args[0]
try:
_is_blocking = self.gettimeout()
if _is_blocking == 0:
self.setblocking(True)
return function(*args, **kwargs)
except Exception as e:
raise
finally:
# set orgin blcoking
if _is_blocking == 0:
self.setblocking(False)
return wrapper
class ProxyError(IOError):
"""
socket_err contains original socket.error exception.
"""
def __init__(self, msg, socket_err=None):
self.msg = msg
self.socket_err = socket_err
if socket_err:
self.msg += ": {0}".format(socket_err)
def __str__(self):
return self.msg
class GeneralProxyError(ProxyError): pass
class ProxyConnectionError(ProxyError): pass
class SOCKS5AuthError(ProxyError): pass
class SOCKS5Error(ProxyError): pass
class SOCKS4Error(ProxyError): pass
class HTTPError(ProxyError): pass
SOCKS4_ERRORS = { 0x5B: "Request rejected or failed",
0x5C: "Request rejected because SOCKS server cannot connect to identd on the client",
0x5D: "Request rejected because the client program and identd report different user-ids"
}
SOCKS5_ERRORS = { 0x01: "General SOCKS server failure",
0x02: "Connection not allowed by ruleset",
0x03: "Network unreachable",
0x04: "Host unreachable",
0x05: "Connection refused",
0x06: "TTL expired",
0x07: "Command not supported, or protocol error",
0x08: "Address type not supported"
}
DEFAULT_PORTS = { SOCKS4: 1080,
SOCKS5: 1080,
HTTP: 8080
}
def set_default_proxy(proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
"""
set_default_proxy(proxy_type, addr[, port[, rdns[, username, password]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed. All parameters are as for socket.set_proxy().
"""
socksocket.default_proxy = (proxy_type, addr, port, rdns,
username.encode() if username else None,
password.encode() if password else None)
def setdefaultproxy(*args, **kwargs):
if 'proxytype' in kwargs:
kwargs['proxy_type'] = kwargs.pop('proxytype')
return set_default_proxy(*args, **kwargs)
def get_default_proxy():
"""
Returns the default proxy, set by set_default_proxy.
"""
return socksocket.default_proxy
getdefaultproxy = get_default_proxy
def wrap_module(module):
"""
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using set_default_proxy(...) first.
This will only work on modules that import socket directly into the namespace;
most of the Python Standard Library falls into this category.
"""
if socksocket.default_proxy:
module.socket.socket = socksocket
else:
raise GeneralProxyError("No default proxy specified")
wrapmodule = wrap_module
class _BaseSocket(socket.socket):
"""Allows Python 2's "delegated" methods such as send() to be overridden
"""
def __init__(self, *pos, **kw):
_orig_socket.__init__(self, *pos, **kw)
self._savedmethods = dict()
for name in self._savenames:
self._savedmethods[name] = getattr(self, name)
delattr(self, name) # Allows normal overriding mechanism to work
_savenames = list()
def _makemethod(name):
return lambda self, *pos, **kw: self._savedmethods[name](*pos, **kw)
for name in ("sendto", "send", "recvfrom", "recv"):
method = getattr(_BaseSocket, name, None)
# Determine if the method is not defined the usual way
# as a function in the class.
# Python 2 uses __slots__, so there are descriptors for each method,
# but they are not functions.
if not isinstance(method, Callable):
_BaseSocket._savenames.append(name)
setattr(_BaseSocket, name, _makemethod(name))
class socksocket(_BaseSocket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET and proto=0.
The "type" argument must be either SOCK_STREAM or SOCK_DGRAM.
"""
default_proxy = None
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, *args, **kwargs):
if type not in (socket.SOCK_STREAM, socket.SOCK_DGRAM):
msg = "Socket type must be stream or datagram, not {!r}"
raise ValueError(msg.format(type))
super(socksocket, self).__init__(family, type, proto, *args, **kwargs)
self._proxyconn = None # TCP connection to keep UDP relay alive
if self.default_proxy:
self.proxy = self.default_proxy
else:
self.proxy = (None, None, None, None, None, None)
self.proxy_sockname = None
self.proxy_peername = None
self._timeout = None
def _readall(self, file, count):
"""
Receive EXACTLY the number of bytes requested from the file object.
Blocks until the required number of bytes have been received.
"""
data = b""
while len(data) < count:
d = file.read(count - len(data))
if not d:
raise GeneralProxyError("Connection closed unexpectedly")
data += d
return data
def settimeout(self, timeout):
self._timeout = timeout
try:
# test if we're connected, if so apply timeout
peer = self.get_proxy_peername()
super(socksocket, self).settimeout(self._timeout)
except socket.error:
pass
def gettimeout(self):
return self._timeout
def setblocking(self, v):
if v:
self.settimeout(None)
else:
self.settimeout(0.0)
def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
"""set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxy_type - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be performed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.proxy = (proxy_type, addr, port, rdns,
username.encode() if username else None,
password.encode() if password else None)
def setproxy(self, *args, **kwargs):
if 'proxytype' in kwargs:
kwargs['proxy_type'] = kwargs.pop('proxytype')
return self.set_proxy(*args, **kwargs)
def bind(self, *pos, **kw):
"""
Implements proxy connection for UDP sockets,
which happens during the bind() phase.
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
if not proxy_type or self.type != socket.SOCK_DGRAM:
return _orig_socket.bind(self, *pos, **kw)
if self._proxyconn:
raise socket.error(EINVAL, "Socket already bound to an address")
if proxy_type != SOCKS5:
msg = "UDP only supported by SOCKS5 proxy type"
raise socket.error(EOPNOTSUPP, msg)
super(socksocket, self).bind(*pos, **kw)
# Need to specify actual local port because
# some relays drop packets if a port of zero is specified.
# Avoid specifying host address in case of NAT though.
_, port = self.getsockname()
dst = ("0", port)
self._proxyconn = _orig_socket()
proxy = self._proxy_addr()
self._proxyconn.connect(proxy)
UDP_ASSOCIATE = b"\x03"
_, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst)
# The relay is most likely on the same host as the SOCKS proxy,
# but some proxies return a private IP address (10.x.y.z)
host, _ = proxy
_, port = relay
super(socksocket, self).connect((host, port))
super(socksocket, self).settimeout(self._timeout)
self.proxy_sockname = ("0.0.0.0", 0) # Unknown
def sendto(self, bytes, *args, **kwargs):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).sendto(bytes, *args, **kwargs)
if not self._proxyconn:
self.bind(("", 0))
address = args[-1]
flags = args[:-1]
header = BytesIO()
RSV = b"\x00\x00"
header.write(RSV)
STANDALONE = b"\x00"
header.write(STANDALONE)
self._write_SOCKS5_address(address, header)
sent = super(socksocket, self).send(header.getvalue() + bytes, *flags, **kwargs)
return sent - header.tell()
def send(self, bytes, flags=0, **kwargs):
if self.type == socket.SOCK_DGRAM:
return self.sendto(bytes, flags, self.proxy_peername, **kwargs)
else:
return super(socksocket, self).send(bytes, flags, **kwargs)
def recvfrom(self, bufsize, flags=0):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).recvfrom(bufsize, flags)
if not self._proxyconn:
self.bind(("", 0))
buf = BytesIO(super(socksocket, self).recv(bufsize + 1024, flags))
buf.seek(2, SEEK_CUR)
frag = buf.read(1)
if ord(frag):
raise NotImplementedError("Received UDP packet fragment")
fromhost, fromport = self._read_SOCKS5_address(buf)
if self.proxy_peername:
peerhost, peerport = self.proxy_peername
if fromhost != peerhost or peerport not in (0, fromport):
raise socket.error(EAGAIN, "Packet filtered")
return (buf.read(bufsize), (fromhost, fromport))
def recv(self, *pos, **kw):
bytes, _ = self.recvfrom(*pos, **kw)
return bytes
def close(self):
if self._proxyconn:
self._proxyconn.close()
return super(socksocket, self).close()
def get_proxy_sockname(self):
"""
Returns the bound IP address and port number at the proxy.
"""
return self.proxy_sockname
getproxysockname = get_proxy_sockname
def get_proxy_peername(self):
"""
Returns the IP and port number of the proxy.
"""
return super(socksocket, self).getpeername()
getproxypeername = get_proxy_peername
def get_peername(self):
"""
Returns the IP address and port number of the destination
machine (note: get_proxy_peername returns the proxy)
"""
return self.proxy_peername
getpeername = get_peername
def _negotiate_SOCKS5(self, *dest_addr):
"""
Negotiates a stream connection through a SOCKS5 server.
"""
CONNECT = b"\x01"
self.proxy_peername, self.proxy_sockname = self._SOCKS5_request(self,
CONNECT, dest_addr)
def _SOCKS5_request(self, conn, cmd, dst):
"""
Send SOCKS5 request with given command (CMD field) and
address (DST field). Returns resolved DST address that was used.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = conn.makefile("wb")
reader = conn.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# First we'll send the authentication packages we support.
if username and password:
# The username/password details were supplied to the
# set_proxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
writer.write(b"\x05\x02\x00\x02")
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
writer.write(b"\x05\x01\x00")
# We'll receive the server's response to determine which
# method was selected
writer.flush()
chosen_auth = self._readall(reader, 2)
if chosen_auth[0:1] != b"\x05":
# Note: string[i:i+1] is used because indexing of a bytestring
# via bytestring[i] yields an integer in Python 3
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Check the chosen authentication method
if chosen_auth[1:2] == b"\x02":
# Okay, we need to perform a basic username/password
# authentication.
writer.write(b"\x01" + chr(len(username)).encode()
+ username
+ chr(len(password)).encode()
+ password)
writer.flush()
auth_status = self._readall(reader, 2)
if auth_status[0:1] != b"\x01":
# Bad response
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
if auth_status[1:2] != b"\x00":
# Authentication failed
raise SOCKS5AuthError("SOCKS5 authentication failed")
# Otherwise, authentication succeeded
# No authentication is required if 0x00
elif chosen_auth[1:2] != b"\x00":
# Reaching here is always bad
if chosen_auth[1:2] == b"\xFF":
raise SOCKS5AuthError("All offered SOCKS5 authentication methods were rejected")
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Now we can request the actual connection
writer.write(b"\x05" + cmd + b"\x00")
resolved = self._write_SOCKS5_address(dst, writer)
writer.flush()
# Get the response
resp = self._readall(reader, 3)
if resp[0:1] != b"\x05":
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x00:
# Connection failed: server returned an error
error = SOCKS5_ERRORS.get(status, "Unknown error")
raise SOCKS5Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
bnd = self._read_SOCKS5_address(reader)
super(socksocket, self).settimeout(self._timeout)
return (resolved, bnd)
finally:
reader.close()
writer.close()
def _write_SOCKS5_address(self, addr, file):
"""
Return the host and port packed for the SOCKS5 protocol,
and the resolved address as a tuple object.
"""
host, port = addr
proxy_type, _, _, rdns, username, password = self.proxy
family_to_byte = {socket.AF_INET: b"\x01", socket.AF_INET6: b"\x04"}
# If the given destination address is an IP address, we'll
# use the IP address request even if remote resolving was specified.
# Detect whether the address is IPv4/6 directly.
for family in (socket.AF_INET, socket.AF_INET6):
try:
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
except socket.error:
continue
# Well it's not an IP number, so it's probably a DNS name.
if rdns:
# Resolve remotely
host_bytes = host.encode('idna')
file.write(b"\x03" + chr(len(host_bytes)).encode() + host_bytes)
else:
# Resolve locally
addresses = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_ADDRCONFIG)
# We can't really work out what IP is reachable, so just pick the
# first.
target_addr = addresses[0]
family = target_addr[0]
host = target_addr[4][0]
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
def _read_SOCKS5_address(self, file):
atyp = self._readall(file, 1)
if atyp == b"\x01":
addr = socket.inet_ntoa(self._readall(file, 4))
elif atyp == b"\x03":
length = self._readall(file, 1)
addr = self._readall(file, ord(length))
elif atyp == b"\x04":
addr = socket.inet_ntop(socket.AF_INET6, self._readall(file, 16))
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
port = struct.unpack(">H", self._readall(file, 2))[0]
return addr, port
def _negotiate_SOCKS4(self, dest_addr, dest_port):
"""
Negotiates a connection through a SOCKS4 server.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = self.makefile("wb")
reader = self.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# Check if the destination address provided is an IP address
remote_resolve = False
try:
addr_bytes = socket.inet_aton(dest_addr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if rdns:
addr_bytes = b"\x00\x00\x00\x01"
remote_resolve = True
else:
addr_bytes = socket.inet_aton(socket.gethostbyname(dest_addr))
# Construct the request packet
writer.write(struct.pack(">BBH", 0x04, 0x01, dest_port))
writer.write(addr_bytes)
# The username parameter is considered userid for SOCKS4
if username:
writer.write(username)
writer.write(b"\x00")
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if remote_resolve:
writer.write(dest_addr.encode('idna') + b"\x00")
writer.flush()
# Get the response from the server
resp = self._readall(reader, 8)
if resp[0:1] != b"\x00":
# Bad data
raise GeneralProxyError("SOCKS4 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x5A:
# Connection failed: server returned an error
error = SOCKS4_ERRORS.get(status, "Unknown error")
raise SOCKS4Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
self.proxy_sockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if remote_resolve:
self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port
else:
self.proxy_peername = dest_addr, dest_port
finally:
reader.close()
writer.close()
def _negotiate_HTTP(self, dest_addr, dest_port):
"""
Negotiates a connection through an HTTP server.
NOTE: This currently only supports HTTP CONNECT-style proxies.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
# If we need to resolve locally, we do this now
addr = dest_addr if rdns else socket.gethostbyname(dest_addr)
http_headers = [
b"CONNECT " + addr.encode('idna') + b":" + str(dest_port).encode() + b" HTTP/1.1",
b"Host: " + dest_addr.encode('idna')
]
if username and password:
http_headers.append(b"Proxy-Authorization: basic " + b64encode(username + b":" + password))
http_headers.append(b"\r\n")
self.sendall(b"\r\n".join(http_headers))
# We just need the first line to check if the connection was successful
fobj = self.makefile()
status_line = fobj.readline()
fobj.close()
if not status_line:
raise GeneralProxyError("Connection closed unexpectedly")
try:
proto, status_code, status_msg = status_line.split(" ", 2)
except ValueError:
raise GeneralProxyError("HTTP proxy server sent invalid response")
if not proto.startswith("HTTP/"):
raise GeneralProxyError("Proxy server does not appear to be an HTTP proxy")
try:
status_code = int(status_code)
except ValueError:
raise HTTPError("HTTP proxy server did not return a valid HTTP status")
if status_code != 200:
error = "{0}: {1}".format(status_code, status_msg)
if status_code in (400, 403, 405):
# It's likely that the HTTP proxy server does not support the CONNECT tunneling method
error += ("\n[*] Note: The HTTP proxy server may not be supported by PySocks"
" (must be a CONNECT tunnel proxy)")
raise HTTPError(error)
self.proxy_sockname = (b"0.0.0.0", 0)
self.proxy_peername = addr, dest_port
_proxy_negotiators = {
SOCKS4: _negotiate_SOCKS4,
SOCKS5: _negotiate_SOCKS5,
HTTP: _negotiate_HTTP
}
@set_self_blocking
def connect(self, dest_pair):
"""
Connects to the specified destination through a proxy.
Uses the same API as socket's connect().
To select the proxy server, use set_proxy().
dest_pair - 2-tuple of (IP/hostname, port).
"""
if len(dest_pair) != 2 or dest_pair[0].startswith("["):
# Probably IPv6, not supported -- raise an error, and hope
# Happy Eyeballs (RFC6555) makes sure at least the IPv4
# connection works...
raise socket.error("PySocks doesn't support IPv6: %s" % str(dest_pair))
dest_addr, dest_port = dest_pair
if self.type == socket.SOCK_DGRAM:
if not self._proxyconn:
self.bind(("", 0))
dest_addr = socket.gethostbyname(dest_addr)
# If the host address is INADDR_ANY or similar, reset the peer
# address so that packets are received from any peer
if dest_addr == "0.0.0.0" and not dest_port:
self.proxy_peername = None
else:
self.proxy_peername = (dest_addr, dest_port)
return
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
# Do a minimal input check first
if (not isinstance(dest_pair, (list, tuple))
or len(dest_pair) != 2
or not dest_addr
or not isinstance(dest_port, int)):
raise GeneralProxyError("Invalid destination-connection (host, port) pair")
# We set the timeout here so that we don't hang in connection or during
# negotiation.
super(socksocket, self).settimeout(self._timeout)
if proxy_type is None:
# Treat like regular socket object
self.proxy_peername = dest_pair
super(socksocket, self).settimeout(self._timeout)
super(socksocket, self).connect((dest_addr, dest_port))
return
proxy_addr = self._proxy_addr()
try:
# Initial connection to proxy server.
super(socksocket, self).connect(proxy_addr)
except socket.error as error:
# Error while connecting to proxy
self.close()
proxy_addr, proxy_port = proxy_addr
proxy_server = "{0}:{1}".format(proxy_addr, proxy_port)
printable_type = PRINTABLE_PROXY_TYPES[proxy_type]
msg = "Error connecting to {0} proxy {1}".format(printable_type,
proxy_server)
log.debug("%s due to: %s", msg, error)
raise ProxyConnectionError(msg, error)
else:
# Connected to proxy server, now negotiate
try:
# Calls negotiate_{SOCKS4, SOCKS5, HTTP}
negotiate = self._proxy_negotiators[proxy_type]
negotiate(self, dest_addr, dest_port)
except socket.error as error:
# Wrap socket errors
self.close()
raise GeneralProxyError("Socket error", error)
except ProxyError:
# Protocol error while negotiating with proxy
self.close()
raise
def _proxy_addr(self):
"""
Return proxy address to connect to as tuple object
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type)
if not proxy_port:
raise GeneralProxyError("Invalid proxy type")
return proxy_addr, proxy_port
|
JinnLynn/genpac
|
genpac/pysocks/socks.py
|
socksocket._readall
|
python
|
def _readall(self, file, count):
data = b""
while len(data) < count:
d = file.read(count - len(data))
if not d:
raise GeneralProxyError("Connection closed unexpectedly")
data += d
return data
|
Receive EXACTLY the number of bytes requested from the file object.
Blocks until the required number of bytes have been received.
|
train
|
https://github.com/JinnLynn/genpac/blob/2f466d28f403a9a5624e02edcd538475fe475fc8/genpac/pysocks/socks.py#L297-L308
| null |
class socksocket(_BaseSocket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET and proto=0.
The "type" argument must be either SOCK_STREAM or SOCK_DGRAM.
"""
default_proxy = None
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, *args, **kwargs):
if type not in (socket.SOCK_STREAM, socket.SOCK_DGRAM):
msg = "Socket type must be stream or datagram, not {!r}"
raise ValueError(msg.format(type))
super(socksocket, self).__init__(family, type, proto, *args, **kwargs)
self._proxyconn = None # TCP connection to keep UDP relay alive
if self.default_proxy:
self.proxy = self.default_proxy
else:
self.proxy = (None, None, None, None, None, None)
self.proxy_sockname = None
self.proxy_peername = None
self._timeout = None
def settimeout(self, timeout):
self._timeout = timeout
try:
# test if we're connected, if so apply timeout
peer = self.get_proxy_peername()
super(socksocket, self).settimeout(self._timeout)
except socket.error:
pass
def gettimeout(self):
return self._timeout
def setblocking(self, v):
if v:
self.settimeout(None)
else:
self.settimeout(0.0)
def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
"""set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxy_type - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be performed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.proxy = (proxy_type, addr, port, rdns,
username.encode() if username else None,
password.encode() if password else None)
def setproxy(self, *args, **kwargs):
if 'proxytype' in kwargs:
kwargs['proxy_type'] = kwargs.pop('proxytype')
return self.set_proxy(*args, **kwargs)
def bind(self, *pos, **kw):
"""
Implements proxy connection for UDP sockets,
which happens during the bind() phase.
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
if not proxy_type or self.type != socket.SOCK_DGRAM:
return _orig_socket.bind(self, *pos, **kw)
if self._proxyconn:
raise socket.error(EINVAL, "Socket already bound to an address")
if proxy_type != SOCKS5:
msg = "UDP only supported by SOCKS5 proxy type"
raise socket.error(EOPNOTSUPP, msg)
super(socksocket, self).bind(*pos, **kw)
# Need to specify actual local port because
# some relays drop packets if a port of zero is specified.
# Avoid specifying host address in case of NAT though.
_, port = self.getsockname()
dst = ("0", port)
self._proxyconn = _orig_socket()
proxy = self._proxy_addr()
self._proxyconn.connect(proxy)
UDP_ASSOCIATE = b"\x03"
_, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst)
# The relay is most likely on the same host as the SOCKS proxy,
# but some proxies return a private IP address (10.x.y.z)
host, _ = proxy
_, port = relay
super(socksocket, self).connect((host, port))
super(socksocket, self).settimeout(self._timeout)
self.proxy_sockname = ("0.0.0.0", 0) # Unknown
def sendto(self, bytes, *args, **kwargs):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).sendto(bytes, *args, **kwargs)
if not self._proxyconn:
self.bind(("", 0))
address = args[-1]
flags = args[:-1]
header = BytesIO()
RSV = b"\x00\x00"
header.write(RSV)
STANDALONE = b"\x00"
header.write(STANDALONE)
self._write_SOCKS5_address(address, header)
sent = super(socksocket, self).send(header.getvalue() + bytes, *flags, **kwargs)
return sent - header.tell()
def send(self, bytes, flags=0, **kwargs):
if self.type == socket.SOCK_DGRAM:
return self.sendto(bytes, flags, self.proxy_peername, **kwargs)
else:
return super(socksocket, self).send(bytes, flags, **kwargs)
def recvfrom(self, bufsize, flags=0):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).recvfrom(bufsize, flags)
if not self._proxyconn:
self.bind(("", 0))
buf = BytesIO(super(socksocket, self).recv(bufsize + 1024, flags))
buf.seek(2, SEEK_CUR)
frag = buf.read(1)
if ord(frag):
raise NotImplementedError("Received UDP packet fragment")
fromhost, fromport = self._read_SOCKS5_address(buf)
if self.proxy_peername:
peerhost, peerport = self.proxy_peername
if fromhost != peerhost or peerport not in (0, fromport):
raise socket.error(EAGAIN, "Packet filtered")
return (buf.read(bufsize), (fromhost, fromport))
def recv(self, *pos, **kw):
bytes, _ = self.recvfrom(*pos, **kw)
return bytes
def close(self):
if self._proxyconn:
self._proxyconn.close()
return super(socksocket, self).close()
def get_proxy_sockname(self):
"""
Returns the bound IP address and port number at the proxy.
"""
return self.proxy_sockname
getproxysockname = get_proxy_sockname
def get_proxy_peername(self):
"""
Returns the IP and port number of the proxy.
"""
return super(socksocket, self).getpeername()
getproxypeername = get_proxy_peername
def get_peername(self):
"""
Returns the IP address and port number of the destination
machine (note: get_proxy_peername returns the proxy)
"""
return self.proxy_peername
getpeername = get_peername
def _negotiate_SOCKS5(self, *dest_addr):
"""
Negotiates a stream connection through a SOCKS5 server.
"""
CONNECT = b"\x01"
self.proxy_peername, self.proxy_sockname = self._SOCKS5_request(self,
CONNECT, dest_addr)
def _SOCKS5_request(self, conn, cmd, dst):
"""
Send SOCKS5 request with given command (CMD field) and
address (DST field). Returns resolved DST address that was used.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = conn.makefile("wb")
reader = conn.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# First we'll send the authentication packages we support.
if username and password:
# The username/password details were supplied to the
# set_proxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
writer.write(b"\x05\x02\x00\x02")
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
writer.write(b"\x05\x01\x00")
# We'll receive the server's response to determine which
# method was selected
writer.flush()
chosen_auth = self._readall(reader, 2)
if chosen_auth[0:1] != b"\x05":
# Note: string[i:i+1] is used because indexing of a bytestring
# via bytestring[i] yields an integer in Python 3
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Check the chosen authentication method
if chosen_auth[1:2] == b"\x02":
# Okay, we need to perform a basic username/password
# authentication.
writer.write(b"\x01" + chr(len(username)).encode()
+ username
+ chr(len(password)).encode()
+ password)
writer.flush()
auth_status = self._readall(reader, 2)
if auth_status[0:1] != b"\x01":
# Bad response
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
if auth_status[1:2] != b"\x00":
# Authentication failed
raise SOCKS5AuthError("SOCKS5 authentication failed")
# Otherwise, authentication succeeded
# No authentication is required if 0x00
elif chosen_auth[1:2] != b"\x00":
# Reaching here is always bad
if chosen_auth[1:2] == b"\xFF":
raise SOCKS5AuthError("All offered SOCKS5 authentication methods were rejected")
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Now we can request the actual connection
writer.write(b"\x05" + cmd + b"\x00")
resolved = self._write_SOCKS5_address(dst, writer)
writer.flush()
# Get the response
resp = self._readall(reader, 3)
if resp[0:1] != b"\x05":
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x00:
# Connection failed: server returned an error
error = SOCKS5_ERRORS.get(status, "Unknown error")
raise SOCKS5Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
bnd = self._read_SOCKS5_address(reader)
super(socksocket, self).settimeout(self._timeout)
return (resolved, bnd)
finally:
reader.close()
writer.close()
def _write_SOCKS5_address(self, addr, file):
"""
Return the host and port packed for the SOCKS5 protocol,
and the resolved address as a tuple object.
"""
host, port = addr
proxy_type, _, _, rdns, username, password = self.proxy
family_to_byte = {socket.AF_INET: b"\x01", socket.AF_INET6: b"\x04"}
# If the given destination address is an IP address, we'll
# use the IP address request even if remote resolving was specified.
# Detect whether the address is IPv4/6 directly.
for family in (socket.AF_INET, socket.AF_INET6):
try:
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
except socket.error:
continue
# Well it's not an IP number, so it's probably a DNS name.
if rdns:
# Resolve remotely
host_bytes = host.encode('idna')
file.write(b"\x03" + chr(len(host_bytes)).encode() + host_bytes)
else:
# Resolve locally
addresses = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_ADDRCONFIG)
# We can't really work out what IP is reachable, so just pick the
# first.
target_addr = addresses[0]
family = target_addr[0]
host = target_addr[4][0]
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
def _read_SOCKS5_address(self, file):
atyp = self._readall(file, 1)
if atyp == b"\x01":
addr = socket.inet_ntoa(self._readall(file, 4))
elif atyp == b"\x03":
length = self._readall(file, 1)
addr = self._readall(file, ord(length))
elif atyp == b"\x04":
addr = socket.inet_ntop(socket.AF_INET6, self._readall(file, 16))
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
port = struct.unpack(">H", self._readall(file, 2))[0]
return addr, port
def _negotiate_SOCKS4(self, dest_addr, dest_port):
"""
Negotiates a connection through a SOCKS4 server.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = self.makefile("wb")
reader = self.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# Check if the destination address provided is an IP address
remote_resolve = False
try:
addr_bytes = socket.inet_aton(dest_addr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if rdns:
addr_bytes = b"\x00\x00\x00\x01"
remote_resolve = True
else:
addr_bytes = socket.inet_aton(socket.gethostbyname(dest_addr))
# Construct the request packet
writer.write(struct.pack(">BBH", 0x04, 0x01, dest_port))
writer.write(addr_bytes)
# The username parameter is considered userid for SOCKS4
if username:
writer.write(username)
writer.write(b"\x00")
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if remote_resolve:
writer.write(dest_addr.encode('idna') + b"\x00")
writer.flush()
# Get the response from the server
resp = self._readall(reader, 8)
if resp[0:1] != b"\x00":
# Bad data
raise GeneralProxyError("SOCKS4 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x5A:
# Connection failed: server returned an error
error = SOCKS4_ERRORS.get(status, "Unknown error")
raise SOCKS4Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
self.proxy_sockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if remote_resolve:
self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port
else:
self.proxy_peername = dest_addr, dest_port
finally:
reader.close()
writer.close()
def _negotiate_HTTP(self, dest_addr, dest_port):
"""
Negotiates a connection through an HTTP server.
NOTE: This currently only supports HTTP CONNECT-style proxies.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
# If we need to resolve locally, we do this now
addr = dest_addr if rdns else socket.gethostbyname(dest_addr)
http_headers = [
b"CONNECT " + addr.encode('idna') + b":" + str(dest_port).encode() + b" HTTP/1.1",
b"Host: " + dest_addr.encode('idna')
]
if username and password:
http_headers.append(b"Proxy-Authorization: basic " + b64encode(username + b":" + password))
http_headers.append(b"\r\n")
self.sendall(b"\r\n".join(http_headers))
# We just need the first line to check if the connection was successful
fobj = self.makefile()
status_line = fobj.readline()
fobj.close()
if not status_line:
raise GeneralProxyError("Connection closed unexpectedly")
try:
proto, status_code, status_msg = status_line.split(" ", 2)
except ValueError:
raise GeneralProxyError("HTTP proxy server sent invalid response")
if not proto.startswith("HTTP/"):
raise GeneralProxyError("Proxy server does not appear to be an HTTP proxy")
try:
status_code = int(status_code)
except ValueError:
raise HTTPError("HTTP proxy server did not return a valid HTTP status")
if status_code != 200:
error = "{0}: {1}".format(status_code, status_msg)
if status_code in (400, 403, 405):
# It's likely that the HTTP proxy server does not support the CONNECT tunneling method
error += ("\n[*] Note: The HTTP proxy server may not be supported by PySocks"
" (must be a CONNECT tunnel proxy)")
raise HTTPError(error)
self.proxy_sockname = (b"0.0.0.0", 0)
self.proxy_peername = addr, dest_port
_proxy_negotiators = {
SOCKS4: _negotiate_SOCKS4,
SOCKS5: _negotiate_SOCKS5,
HTTP: _negotiate_HTTP
}
@set_self_blocking
def connect(self, dest_pair):
"""
Connects to the specified destination through a proxy.
Uses the same API as socket's connect().
To select the proxy server, use set_proxy().
dest_pair - 2-tuple of (IP/hostname, port).
"""
if len(dest_pair) != 2 or dest_pair[0].startswith("["):
# Probably IPv6, not supported -- raise an error, and hope
# Happy Eyeballs (RFC6555) makes sure at least the IPv4
# connection works...
raise socket.error("PySocks doesn't support IPv6: %s" % str(dest_pair))
dest_addr, dest_port = dest_pair
if self.type == socket.SOCK_DGRAM:
if not self._proxyconn:
self.bind(("", 0))
dest_addr = socket.gethostbyname(dest_addr)
# If the host address is INADDR_ANY or similar, reset the peer
# address so that packets are received from any peer
if dest_addr == "0.0.0.0" and not dest_port:
self.proxy_peername = None
else:
self.proxy_peername = (dest_addr, dest_port)
return
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
# Do a minimal input check first
if (not isinstance(dest_pair, (list, tuple))
or len(dest_pair) != 2
or not dest_addr
or not isinstance(dest_port, int)):
raise GeneralProxyError("Invalid destination-connection (host, port) pair")
# We set the timeout here so that we don't hang in connection or during
# negotiation.
super(socksocket, self).settimeout(self._timeout)
if proxy_type is None:
# Treat like regular socket object
self.proxy_peername = dest_pair
super(socksocket, self).settimeout(self._timeout)
super(socksocket, self).connect((dest_addr, dest_port))
return
proxy_addr = self._proxy_addr()
try:
# Initial connection to proxy server.
super(socksocket, self).connect(proxy_addr)
except socket.error as error:
# Error while connecting to proxy
self.close()
proxy_addr, proxy_port = proxy_addr
proxy_server = "{0}:{1}".format(proxy_addr, proxy_port)
printable_type = PRINTABLE_PROXY_TYPES[proxy_type]
msg = "Error connecting to {0} proxy {1}".format(printable_type,
proxy_server)
log.debug("%s due to: %s", msg, error)
raise ProxyConnectionError(msg, error)
else:
# Connected to proxy server, now negotiate
try:
# Calls negotiate_{SOCKS4, SOCKS5, HTTP}
negotiate = self._proxy_negotiators[proxy_type]
negotiate(self, dest_addr, dest_port)
except socket.error as error:
# Wrap socket errors
self.close()
raise GeneralProxyError("Socket error", error)
except ProxyError:
# Protocol error while negotiating with proxy
self.close()
raise
def _proxy_addr(self):
"""
Return proxy address to connect to as tuple object
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type)
if not proxy_port:
raise GeneralProxyError("Invalid proxy type")
return proxy_addr, proxy_port
|
JinnLynn/genpac
|
genpac/pysocks/socks.py
|
socksocket.set_proxy
|
python
|
def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
self.proxy = (proxy_type, addr, port, rdns,
username.encode() if username else None,
password.encode() if password else None)
|
set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxy_type - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be performed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
|
train
|
https://github.com/JinnLynn/genpac/blob/2f466d28f403a9a5624e02edcd538475fe475fc8/genpac/pysocks/socks.py#L328-L348
| null |
class socksocket(_BaseSocket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET and proto=0.
The "type" argument must be either SOCK_STREAM or SOCK_DGRAM.
"""
default_proxy = None
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, *args, **kwargs):
if type not in (socket.SOCK_STREAM, socket.SOCK_DGRAM):
msg = "Socket type must be stream or datagram, not {!r}"
raise ValueError(msg.format(type))
super(socksocket, self).__init__(family, type, proto, *args, **kwargs)
self._proxyconn = None # TCP connection to keep UDP relay alive
if self.default_proxy:
self.proxy = self.default_proxy
else:
self.proxy = (None, None, None, None, None, None)
self.proxy_sockname = None
self.proxy_peername = None
self._timeout = None
def _readall(self, file, count):
"""
Receive EXACTLY the number of bytes requested from the file object.
Blocks until the required number of bytes have been received.
"""
data = b""
while len(data) < count:
d = file.read(count - len(data))
if not d:
raise GeneralProxyError("Connection closed unexpectedly")
data += d
return data
def settimeout(self, timeout):
self._timeout = timeout
try:
# test if we're connected, if so apply timeout
peer = self.get_proxy_peername()
super(socksocket, self).settimeout(self._timeout)
except socket.error:
pass
def gettimeout(self):
return self._timeout
def setblocking(self, v):
if v:
self.settimeout(None)
else:
self.settimeout(0.0)
def setproxy(self, *args, **kwargs):
if 'proxytype' in kwargs:
kwargs['proxy_type'] = kwargs.pop('proxytype')
return self.set_proxy(*args, **kwargs)
def bind(self, *pos, **kw):
"""
Implements proxy connection for UDP sockets,
which happens during the bind() phase.
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
if not proxy_type or self.type != socket.SOCK_DGRAM:
return _orig_socket.bind(self, *pos, **kw)
if self._proxyconn:
raise socket.error(EINVAL, "Socket already bound to an address")
if proxy_type != SOCKS5:
msg = "UDP only supported by SOCKS5 proxy type"
raise socket.error(EOPNOTSUPP, msg)
super(socksocket, self).bind(*pos, **kw)
# Need to specify actual local port because
# some relays drop packets if a port of zero is specified.
# Avoid specifying host address in case of NAT though.
_, port = self.getsockname()
dst = ("0", port)
self._proxyconn = _orig_socket()
proxy = self._proxy_addr()
self._proxyconn.connect(proxy)
UDP_ASSOCIATE = b"\x03"
_, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst)
# The relay is most likely on the same host as the SOCKS proxy,
# but some proxies return a private IP address (10.x.y.z)
host, _ = proxy
_, port = relay
super(socksocket, self).connect((host, port))
super(socksocket, self).settimeout(self._timeout)
self.proxy_sockname = ("0.0.0.0", 0) # Unknown
def sendto(self, bytes, *args, **kwargs):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).sendto(bytes, *args, **kwargs)
if not self._proxyconn:
self.bind(("", 0))
address = args[-1]
flags = args[:-1]
header = BytesIO()
RSV = b"\x00\x00"
header.write(RSV)
STANDALONE = b"\x00"
header.write(STANDALONE)
self._write_SOCKS5_address(address, header)
sent = super(socksocket, self).send(header.getvalue() + bytes, *flags, **kwargs)
return sent - header.tell()
def send(self, bytes, flags=0, **kwargs):
if self.type == socket.SOCK_DGRAM:
return self.sendto(bytes, flags, self.proxy_peername, **kwargs)
else:
return super(socksocket, self).send(bytes, flags, **kwargs)
def recvfrom(self, bufsize, flags=0):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).recvfrom(bufsize, flags)
if not self._proxyconn:
self.bind(("", 0))
buf = BytesIO(super(socksocket, self).recv(bufsize + 1024, flags))
buf.seek(2, SEEK_CUR)
frag = buf.read(1)
if ord(frag):
raise NotImplementedError("Received UDP packet fragment")
fromhost, fromport = self._read_SOCKS5_address(buf)
if self.proxy_peername:
peerhost, peerport = self.proxy_peername
if fromhost != peerhost or peerport not in (0, fromport):
raise socket.error(EAGAIN, "Packet filtered")
return (buf.read(bufsize), (fromhost, fromport))
def recv(self, *pos, **kw):
bytes, _ = self.recvfrom(*pos, **kw)
return bytes
def close(self):
if self._proxyconn:
self._proxyconn.close()
return super(socksocket, self).close()
def get_proxy_sockname(self):
"""
Returns the bound IP address and port number at the proxy.
"""
return self.proxy_sockname
getproxysockname = get_proxy_sockname
def get_proxy_peername(self):
"""
Returns the IP and port number of the proxy.
"""
return super(socksocket, self).getpeername()
getproxypeername = get_proxy_peername
def get_peername(self):
"""
Returns the IP address and port number of the destination
machine (note: get_proxy_peername returns the proxy)
"""
return self.proxy_peername
getpeername = get_peername
def _negotiate_SOCKS5(self, *dest_addr):
"""
Negotiates a stream connection through a SOCKS5 server.
"""
CONNECT = b"\x01"
self.proxy_peername, self.proxy_sockname = self._SOCKS5_request(self,
CONNECT, dest_addr)
def _SOCKS5_request(self, conn, cmd, dst):
"""
Send SOCKS5 request with given command (CMD field) and
address (DST field). Returns resolved DST address that was used.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = conn.makefile("wb")
reader = conn.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# First we'll send the authentication packages we support.
if username and password:
# The username/password details were supplied to the
# set_proxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
writer.write(b"\x05\x02\x00\x02")
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
writer.write(b"\x05\x01\x00")
# We'll receive the server's response to determine which
# method was selected
writer.flush()
chosen_auth = self._readall(reader, 2)
if chosen_auth[0:1] != b"\x05":
# Note: string[i:i+1] is used because indexing of a bytestring
# via bytestring[i] yields an integer in Python 3
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Check the chosen authentication method
if chosen_auth[1:2] == b"\x02":
# Okay, we need to perform a basic username/password
# authentication.
writer.write(b"\x01" + chr(len(username)).encode()
+ username
+ chr(len(password)).encode()
+ password)
writer.flush()
auth_status = self._readall(reader, 2)
if auth_status[0:1] != b"\x01":
# Bad response
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
if auth_status[1:2] != b"\x00":
# Authentication failed
raise SOCKS5AuthError("SOCKS5 authentication failed")
# Otherwise, authentication succeeded
# No authentication is required if 0x00
elif chosen_auth[1:2] != b"\x00":
# Reaching here is always bad
if chosen_auth[1:2] == b"\xFF":
raise SOCKS5AuthError("All offered SOCKS5 authentication methods were rejected")
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Now we can request the actual connection
writer.write(b"\x05" + cmd + b"\x00")
resolved = self._write_SOCKS5_address(dst, writer)
writer.flush()
# Get the response
resp = self._readall(reader, 3)
if resp[0:1] != b"\x05":
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x00:
# Connection failed: server returned an error
error = SOCKS5_ERRORS.get(status, "Unknown error")
raise SOCKS5Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
bnd = self._read_SOCKS5_address(reader)
super(socksocket, self).settimeout(self._timeout)
return (resolved, bnd)
finally:
reader.close()
writer.close()
def _write_SOCKS5_address(self, addr, file):
"""
Return the host and port packed for the SOCKS5 protocol,
and the resolved address as a tuple object.
"""
host, port = addr
proxy_type, _, _, rdns, username, password = self.proxy
family_to_byte = {socket.AF_INET: b"\x01", socket.AF_INET6: b"\x04"}
# If the given destination address is an IP address, we'll
# use the IP address request even if remote resolving was specified.
# Detect whether the address is IPv4/6 directly.
for family in (socket.AF_INET, socket.AF_INET6):
try:
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
except socket.error:
continue
# Well it's not an IP number, so it's probably a DNS name.
if rdns:
# Resolve remotely
host_bytes = host.encode('idna')
file.write(b"\x03" + chr(len(host_bytes)).encode() + host_bytes)
else:
# Resolve locally
addresses = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_ADDRCONFIG)
# We can't really work out what IP is reachable, so just pick the
# first.
target_addr = addresses[0]
family = target_addr[0]
host = target_addr[4][0]
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
def _read_SOCKS5_address(self, file):
atyp = self._readall(file, 1)
if atyp == b"\x01":
addr = socket.inet_ntoa(self._readall(file, 4))
elif atyp == b"\x03":
length = self._readall(file, 1)
addr = self._readall(file, ord(length))
elif atyp == b"\x04":
addr = socket.inet_ntop(socket.AF_INET6, self._readall(file, 16))
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
port = struct.unpack(">H", self._readall(file, 2))[0]
return addr, port
def _negotiate_SOCKS4(self, dest_addr, dest_port):
"""
Negotiates a connection through a SOCKS4 server.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = self.makefile("wb")
reader = self.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# Check if the destination address provided is an IP address
remote_resolve = False
try:
addr_bytes = socket.inet_aton(dest_addr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if rdns:
addr_bytes = b"\x00\x00\x00\x01"
remote_resolve = True
else:
addr_bytes = socket.inet_aton(socket.gethostbyname(dest_addr))
# Construct the request packet
writer.write(struct.pack(">BBH", 0x04, 0x01, dest_port))
writer.write(addr_bytes)
# The username parameter is considered userid for SOCKS4
if username:
writer.write(username)
writer.write(b"\x00")
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if remote_resolve:
writer.write(dest_addr.encode('idna') + b"\x00")
writer.flush()
# Get the response from the server
resp = self._readall(reader, 8)
if resp[0:1] != b"\x00":
# Bad data
raise GeneralProxyError("SOCKS4 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x5A:
# Connection failed: server returned an error
error = SOCKS4_ERRORS.get(status, "Unknown error")
raise SOCKS4Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
self.proxy_sockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if remote_resolve:
self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port
else:
self.proxy_peername = dest_addr, dest_port
finally:
reader.close()
writer.close()
def _negotiate_HTTP(self, dest_addr, dest_port):
"""
Negotiates a connection through an HTTP server.
NOTE: This currently only supports HTTP CONNECT-style proxies.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
# If we need to resolve locally, we do this now
addr = dest_addr if rdns else socket.gethostbyname(dest_addr)
http_headers = [
b"CONNECT " + addr.encode('idna') + b":" + str(dest_port).encode() + b" HTTP/1.1",
b"Host: " + dest_addr.encode('idna')
]
if username and password:
http_headers.append(b"Proxy-Authorization: basic " + b64encode(username + b":" + password))
http_headers.append(b"\r\n")
self.sendall(b"\r\n".join(http_headers))
# We just need the first line to check if the connection was successful
fobj = self.makefile()
status_line = fobj.readline()
fobj.close()
if not status_line:
raise GeneralProxyError("Connection closed unexpectedly")
try:
proto, status_code, status_msg = status_line.split(" ", 2)
except ValueError:
raise GeneralProxyError("HTTP proxy server sent invalid response")
if not proto.startswith("HTTP/"):
raise GeneralProxyError("Proxy server does not appear to be an HTTP proxy")
try:
status_code = int(status_code)
except ValueError:
raise HTTPError("HTTP proxy server did not return a valid HTTP status")
if status_code != 200:
error = "{0}: {1}".format(status_code, status_msg)
if status_code in (400, 403, 405):
# It's likely that the HTTP proxy server does not support the CONNECT tunneling method
error += ("\n[*] Note: The HTTP proxy server may not be supported by PySocks"
" (must be a CONNECT tunnel proxy)")
raise HTTPError(error)
self.proxy_sockname = (b"0.0.0.0", 0)
self.proxy_peername = addr, dest_port
_proxy_negotiators = {
SOCKS4: _negotiate_SOCKS4,
SOCKS5: _negotiate_SOCKS5,
HTTP: _negotiate_HTTP
}
@set_self_blocking
def connect(self, dest_pair):
"""
Connects to the specified destination through a proxy.
Uses the same API as socket's connect().
To select the proxy server, use set_proxy().
dest_pair - 2-tuple of (IP/hostname, port).
"""
if len(dest_pair) != 2 or dest_pair[0].startswith("["):
# Probably IPv6, not supported -- raise an error, and hope
# Happy Eyeballs (RFC6555) makes sure at least the IPv4
# connection works...
raise socket.error("PySocks doesn't support IPv6: %s" % str(dest_pair))
dest_addr, dest_port = dest_pair
if self.type == socket.SOCK_DGRAM:
if not self._proxyconn:
self.bind(("", 0))
dest_addr = socket.gethostbyname(dest_addr)
# If the host address is INADDR_ANY or similar, reset the peer
# address so that packets are received from any peer
if dest_addr == "0.0.0.0" and not dest_port:
self.proxy_peername = None
else:
self.proxy_peername = (dest_addr, dest_port)
return
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
# Do a minimal input check first
if (not isinstance(dest_pair, (list, tuple))
or len(dest_pair) != 2
or not dest_addr
or not isinstance(dest_port, int)):
raise GeneralProxyError("Invalid destination-connection (host, port) pair")
# We set the timeout here so that we don't hang in connection or during
# negotiation.
super(socksocket, self).settimeout(self._timeout)
if proxy_type is None:
# Treat like regular socket object
self.proxy_peername = dest_pair
super(socksocket, self).settimeout(self._timeout)
super(socksocket, self).connect((dest_addr, dest_port))
return
proxy_addr = self._proxy_addr()
try:
# Initial connection to proxy server.
super(socksocket, self).connect(proxy_addr)
except socket.error as error:
# Error while connecting to proxy
self.close()
proxy_addr, proxy_port = proxy_addr
proxy_server = "{0}:{1}".format(proxy_addr, proxy_port)
printable_type = PRINTABLE_PROXY_TYPES[proxy_type]
msg = "Error connecting to {0} proxy {1}".format(printable_type,
proxy_server)
log.debug("%s due to: %s", msg, error)
raise ProxyConnectionError(msg, error)
else:
# Connected to proxy server, now negotiate
try:
# Calls negotiate_{SOCKS4, SOCKS5, HTTP}
negotiate = self._proxy_negotiators[proxy_type]
negotiate(self, dest_addr, dest_port)
except socket.error as error:
# Wrap socket errors
self.close()
raise GeneralProxyError("Socket error", error)
except ProxyError:
# Protocol error while negotiating with proxy
self.close()
raise
def _proxy_addr(self):
"""
Return proxy address to connect to as tuple object
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type)
if not proxy_port:
raise GeneralProxyError("Invalid proxy type")
return proxy_addr, proxy_port
|
JinnLynn/genpac
|
genpac/pysocks/socks.py
|
socksocket.bind
|
python
|
def bind(self, *pos, **kw):
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
if not proxy_type or self.type != socket.SOCK_DGRAM:
return _orig_socket.bind(self, *pos, **kw)
if self._proxyconn:
raise socket.error(EINVAL, "Socket already bound to an address")
if proxy_type != SOCKS5:
msg = "UDP only supported by SOCKS5 proxy type"
raise socket.error(EOPNOTSUPP, msg)
super(socksocket, self).bind(*pos, **kw)
# Need to specify actual local port because
# some relays drop packets if a port of zero is specified.
# Avoid specifying host address in case of NAT though.
_, port = self.getsockname()
dst = ("0", port)
self._proxyconn = _orig_socket()
proxy = self._proxy_addr()
self._proxyconn.connect(proxy)
UDP_ASSOCIATE = b"\x03"
_, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst)
# The relay is most likely on the same host as the SOCKS proxy,
# but some proxies return a private IP address (10.x.y.z)
host, _ = proxy
_, port = relay
super(socksocket, self).connect((host, port))
super(socksocket, self).settimeout(self._timeout)
self.proxy_sockname = ("0.0.0.0", 0)
|
Implements proxy connection for UDP sockets,
which happens during the bind() phase.
|
train
|
https://github.com/JinnLynn/genpac/blob/2f466d28f403a9a5624e02edcd538475fe475fc8/genpac/pysocks/socks.py#L355-L390
|
[
"def _SOCKS5_request(self, conn, cmd, dst):\n \"\"\"\n Send SOCKS5 request with given command (CMD field) and\n address (DST field). Returns resolved DST address that was used.\n \"\"\"\n proxy_type, addr, port, rdns, username, password = self.proxy\n\n writer = conn.makefile(\"wb\")\n reader = conn.makefile(\"rb\", 0) # buffering=0 renamed in Python 3\n try:\n # First we'll send the authentication packages we support.\n if username and password:\n # The username/password details were supplied to the\n # set_proxy method so we support the USERNAME/PASSWORD\n # authentication (in addition to the standard none).\n writer.write(b\"\\x05\\x02\\x00\\x02\")\n else:\n # No username/password were entered, therefore we\n # only support connections with no authentication.\n writer.write(b\"\\x05\\x01\\x00\")\n\n # We'll receive the server's response to determine which\n # method was selected\n writer.flush()\n chosen_auth = self._readall(reader, 2)\n\n if chosen_auth[0:1] != b\"\\x05\":\n # Note: string[i:i+1] is used because indexing of a bytestring\n # via bytestring[i] yields an integer in Python 3\n raise GeneralProxyError(\"SOCKS5 proxy server sent invalid data\")\n\n # Check the chosen authentication method\n\n if chosen_auth[1:2] == b\"\\x02\":\n # Okay, we need to perform a basic username/password\n # authentication.\n writer.write(b\"\\x01\" + chr(len(username)).encode()\n + username\n + chr(len(password)).encode()\n + password)\n writer.flush()\n auth_status = self._readall(reader, 2)\n if auth_status[0:1] != b\"\\x01\":\n # Bad response\n raise GeneralProxyError(\"SOCKS5 proxy server sent invalid data\")\n if auth_status[1:2] != b\"\\x00\":\n # Authentication failed\n raise SOCKS5AuthError(\"SOCKS5 authentication failed\")\n\n # Otherwise, authentication succeeded\n\n # No authentication is required if 0x00\n elif chosen_auth[1:2] != b\"\\x00\":\n # Reaching here is always bad\n if chosen_auth[1:2] == b\"\\xFF\":\n raise SOCKS5AuthError(\"All offered SOCKS5 authentication methods were rejected\")\n else:\n raise GeneralProxyError(\"SOCKS5 proxy server sent invalid data\")\n\n # Now we can request the actual connection\n writer.write(b\"\\x05\" + cmd + b\"\\x00\")\n resolved = self._write_SOCKS5_address(dst, writer)\n writer.flush()\n\n # Get the response\n resp = self._readall(reader, 3)\n if resp[0:1] != b\"\\x05\":\n raise GeneralProxyError(\"SOCKS5 proxy server sent invalid data\")\n\n status = ord(resp[1:2])\n if status != 0x00:\n # Connection failed: server returned an error\n error = SOCKS5_ERRORS.get(status, \"Unknown error\")\n raise SOCKS5Error(\"{0:#04x}: {1}\".format(status, error))\n\n # Get the bound address/port\n bnd = self._read_SOCKS5_address(reader)\n\n super(socksocket, self).settimeout(self._timeout)\n return (resolved, bnd)\n finally:\n reader.close()\n writer.close()\n",
"def _proxy_addr(self):\n \"\"\"\n Return proxy address to connect to as tuple object\n \"\"\"\n proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy\n proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type)\n if not proxy_port:\n raise GeneralProxyError(\"Invalid proxy type\")\n return proxy_addr, proxy_port\n"
] |
class socksocket(_BaseSocket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET and proto=0.
The "type" argument must be either SOCK_STREAM or SOCK_DGRAM.
"""
default_proxy = None
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, *args, **kwargs):
if type not in (socket.SOCK_STREAM, socket.SOCK_DGRAM):
msg = "Socket type must be stream or datagram, not {!r}"
raise ValueError(msg.format(type))
super(socksocket, self).__init__(family, type, proto, *args, **kwargs)
self._proxyconn = None # TCP connection to keep UDP relay alive
if self.default_proxy:
self.proxy = self.default_proxy
else:
self.proxy = (None, None, None, None, None, None)
self.proxy_sockname = None
self.proxy_peername = None
self._timeout = None
def _readall(self, file, count):
"""
Receive EXACTLY the number of bytes requested from the file object.
Blocks until the required number of bytes have been received.
"""
data = b""
while len(data) < count:
d = file.read(count - len(data))
if not d:
raise GeneralProxyError("Connection closed unexpectedly")
data += d
return data
def settimeout(self, timeout):
self._timeout = timeout
try:
# test if we're connected, if so apply timeout
peer = self.get_proxy_peername()
super(socksocket, self).settimeout(self._timeout)
except socket.error:
pass
def gettimeout(self):
return self._timeout
def setblocking(self, v):
if v:
self.settimeout(None)
else:
self.settimeout(0.0)
def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
"""set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxy_type - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be performed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.proxy = (proxy_type, addr, port, rdns,
username.encode() if username else None,
password.encode() if password else None)
def setproxy(self, *args, **kwargs):
if 'proxytype' in kwargs:
kwargs['proxy_type'] = kwargs.pop('proxytype')
return self.set_proxy(*args, **kwargs)
# Unknown
def sendto(self, bytes, *args, **kwargs):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).sendto(bytes, *args, **kwargs)
if not self._proxyconn:
self.bind(("", 0))
address = args[-1]
flags = args[:-1]
header = BytesIO()
RSV = b"\x00\x00"
header.write(RSV)
STANDALONE = b"\x00"
header.write(STANDALONE)
self._write_SOCKS5_address(address, header)
sent = super(socksocket, self).send(header.getvalue() + bytes, *flags, **kwargs)
return sent - header.tell()
def send(self, bytes, flags=0, **kwargs):
if self.type == socket.SOCK_DGRAM:
return self.sendto(bytes, flags, self.proxy_peername, **kwargs)
else:
return super(socksocket, self).send(bytes, flags, **kwargs)
def recvfrom(self, bufsize, flags=0):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).recvfrom(bufsize, flags)
if not self._proxyconn:
self.bind(("", 0))
buf = BytesIO(super(socksocket, self).recv(bufsize + 1024, flags))
buf.seek(2, SEEK_CUR)
frag = buf.read(1)
if ord(frag):
raise NotImplementedError("Received UDP packet fragment")
fromhost, fromport = self._read_SOCKS5_address(buf)
if self.proxy_peername:
peerhost, peerport = self.proxy_peername
if fromhost != peerhost or peerport not in (0, fromport):
raise socket.error(EAGAIN, "Packet filtered")
return (buf.read(bufsize), (fromhost, fromport))
def recv(self, *pos, **kw):
bytes, _ = self.recvfrom(*pos, **kw)
return bytes
def close(self):
if self._proxyconn:
self._proxyconn.close()
return super(socksocket, self).close()
def get_proxy_sockname(self):
"""
Returns the bound IP address and port number at the proxy.
"""
return self.proxy_sockname
getproxysockname = get_proxy_sockname
def get_proxy_peername(self):
"""
Returns the IP and port number of the proxy.
"""
return super(socksocket, self).getpeername()
getproxypeername = get_proxy_peername
def get_peername(self):
"""
Returns the IP address and port number of the destination
machine (note: get_proxy_peername returns the proxy)
"""
return self.proxy_peername
getpeername = get_peername
def _negotiate_SOCKS5(self, *dest_addr):
"""
Negotiates a stream connection through a SOCKS5 server.
"""
CONNECT = b"\x01"
self.proxy_peername, self.proxy_sockname = self._SOCKS5_request(self,
CONNECT, dest_addr)
def _SOCKS5_request(self, conn, cmd, dst):
"""
Send SOCKS5 request with given command (CMD field) and
address (DST field). Returns resolved DST address that was used.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = conn.makefile("wb")
reader = conn.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# First we'll send the authentication packages we support.
if username and password:
# The username/password details were supplied to the
# set_proxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
writer.write(b"\x05\x02\x00\x02")
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
writer.write(b"\x05\x01\x00")
# We'll receive the server's response to determine which
# method was selected
writer.flush()
chosen_auth = self._readall(reader, 2)
if chosen_auth[0:1] != b"\x05":
# Note: string[i:i+1] is used because indexing of a bytestring
# via bytestring[i] yields an integer in Python 3
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Check the chosen authentication method
if chosen_auth[1:2] == b"\x02":
# Okay, we need to perform a basic username/password
# authentication.
writer.write(b"\x01" + chr(len(username)).encode()
+ username
+ chr(len(password)).encode()
+ password)
writer.flush()
auth_status = self._readall(reader, 2)
if auth_status[0:1] != b"\x01":
# Bad response
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
if auth_status[1:2] != b"\x00":
# Authentication failed
raise SOCKS5AuthError("SOCKS5 authentication failed")
# Otherwise, authentication succeeded
# No authentication is required if 0x00
elif chosen_auth[1:2] != b"\x00":
# Reaching here is always bad
if chosen_auth[1:2] == b"\xFF":
raise SOCKS5AuthError("All offered SOCKS5 authentication methods were rejected")
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Now we can request the actual connection
writer.write(b"\x05" + cmd + b"\x00")
resolved = self._write_SOCKS5_address(dst, writer)
writer.flush()
# Get the response
resp = self._readall(reader, 3)
if resp[0:1] != b"\x05":
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x00:
# Connection failed: server returned an error
error = SOCKS5_ERRORS.get(status, "Unknown error")
raise SOCKS5Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
bnd = self._read_SOCKS5_address(reader)
super(socksocket, self).settimeout(self._timeout)
return (resolved, bnd)
finally:
reader.close()
writer.close()
def _write_SOCKS5_address(self, addr, file):
"""
Return the host and port packed for the SOCKS5 protocol,
and the resolved address as a tuple object.
"""
host, port = addr
proxy_type, _, _, rdns, username, password = self.proxy
family_to_byte = {socket.AF_INET: b"\x01", socket.AF_INET6: b"\x04"}
# If the given destination address is an IP address, we'll
# use the IP address request even if remote resolving was specified.
# Detect whether the address is IPv4/6 directly.
for family in (socket.AF_INET, socket.AF_INET6):
try:
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
except socket.error:
continue
# Well it's not an IP number, so it's probably a DNS name.
if rdns:
# Resolve remotely
host_bytes = host.encode('idna')
file.write(b"\x03" + chr(len(host_bytes)).encode() + host_bytes)
else:
# Resolve locally
addresses = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_ADDRCONFIG)
# We can't really work out what IP is reachable, so just pick the
# first.
target_addr = addresses[0]
family = target_addr[0]
host = target_addr[4][0]
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
def _read_SOCKS5_address(self, file):
atyp = self._readall(file, 1)
if atyp == b"\x01":
addr = socket.inet_ntoa(self._readall(file, 4))
elif atyp == b"\x03":
length = self._readall(file, 1)
addr = self._readall(file, ord(length))
elif atyp == b"\x04":
addr = socket.inet_ntop(socket.AF_INET6, self._readall(file, 16))
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
port = struct.unpack(">H", self._readall(file, 2))[0]
return addr, port
def _negotiate_SOCKS4(self, dest_addr, dest_port):
"""
Negotiates a connection through a SOCKS4 server.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = self.makefile("wb")
reader = self.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# Check if the destination address provided is an IP address
remote_resolve = False
try:
addr_bytes = socket.inet_aton(dest_addr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if rdns:
addr_bytes = b"\x00\x00\x00\x01"
remote_resolve = True
else:
addr_bytes = socket.inet_aton(socket.gethostbyname(dest_addr))
# Construct the request packet
writer.write(struct.pack(">BBH", 0x04, 0x01, dest_port))
writer.write(addr_bytes)
# The username parameter is considered userid for SOCKS4
if username:
writer.write(username)
writer.write(b"\x00")
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if remote_resolve:
writer.write(dest_addr.encode('idna') + b"\x00")
writer.flush()
# Get the response from the server
resp = self._readall(reader, 8)
if resp[0:1] != b"\x00":
# Bad data
raise GeneralProxyError("SOCKS4 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x5A:
# Connection failed: server returned an error
error = SOCKS4_ERRORS.get(status, "Unknown error")
raise SOCKS4Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
self.proxy_sockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if remote_resolve:
self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port
else:
self.proxy_peername = dest_addr, dest_port
finally:
reader.close()
writer.close()
def _negotiate_HTTP(self, dest_addr, dest_port):
"""
Negotiates a connection through an HTTP server.
NOTE: This currently only supports HTTP CONNECT-style proxies.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
# If we need to resolve locally, we do this now
addr = dest_addr if rdns else socket.gethostbyname(dest_addr)
http_headers = [
b"CONNECT " + addr.encode('idna') + b":" + str(dest_port).encode() + b" HTTP/1.1",
b"Host: " + dest_addr.encode('idna')
]
if username and password:
http_headers.append(b"Proxy-Authorization: basic " + b64encode(username + b":" + password))
http_headers.append(b"\r\n")
self.sendall(b"\r\n".join(http_headers))
# We just need the first line to check if the connection was successful
fobj = self.makefile()
status_line = fobj.readline()
fobj.close()
if not status_line:
raise GeneralProxyError("Connection closed unexpectedly")
try:
proto, status_code, status_msg = status_line.split(" ", 2)
except ValueError:
raise GeneralProxyError("HTTP proxy server sent invalid response")
if not proto.startswith("HTTP/"):
raise GeneralProxyError("Proxy server does not appear to be an HTTP proxy")
try:
status_code = int(status_code)
except ValueError:
raise HTTPError("HTTP proxy server did not return a valid HTTP status")
if status_code != 200:
error = "{0}: {1}".format(status_code, status_msg)
if status_code in (400, 403, 405):
# It's likely that the HTTP proxy server does not support the CONNECT tunneling method
error += ("\n[*] Note: The HTTP proxy server may not be supported by PySocks"
" (must be a CONNECT tunnel proxy)")
raise HTTPError(error)
self.proxy_sockname = (b"0.0.0.0", 0)
self.proxy_peername = addr, dest_port
_proxy_negotiators = {
SOCKS4: _negotiate_SOCKS4,
SOCKS5: _negotiate_SOCKS5,
HTTP: _negotiate_HTTP
}
@set_self_blocking
def connect(self, dest_pair):
"""
Connects to the specified destination through a proxy.
Uses the same API as socket's connect().
To select the proxy server, use set_proxy().
dest_pair - 2-tuple of (IP/hostname, port).
"""
if len(dest_pair) != 2 or dest_pair[0].startswith("["):
# Probably IPv6, not supported -- raise an error, and hope
# Happy Eyeballs (RFC6555) makes sure at least the IPv4
# connection works...
raise socket.error("PySocks doesn't support IPv6: %s" % str(dest_pair))
dest_addr, dest_port = dest_pair
if self.type == socket.SOCK_DGRAM:
if not self._proxyconn:
self.bind(("", 0))
dest_addr = socket.gethostbyname(dest_addr)
# If the host address is INADDR_ANY or similar, reset the peer
# address so that packets are received from any peer
if dest_addr == "0.0.0.0" and not dest_port:
self.proxy_peername = None
else:
self.proxy_peername = (dest_addr, dest_port)
return
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
# Do a minimal input check first
if (not isinstance(dest_pair, (list, tuple))
or len(dest_pair) != 2
or not dest_addr
or not isinstance(dest_port, int)):
raise GeneralProxyError("Invalid destination-connection (host, port) pair")
# We set the timeout here so that we don't hang in connection or during
# negotiation.
super(socksocket, self).settimeout(self._timeout)
if proxy_type is None:
# Treat like regular socket object
self.proxy_peername = dest_pair
super(socksocket, self).settimeout(self._timeout)
super(socksocket, self).connect((dest_addr, dest_port))
return
proxy_addr = self._proxy_addr()
try:
# Initial connection to proxy server.
super(socksocket, self).connect(proxy_addr)
except socket.error as error:
# Error while connecting to proxy
self.close()
proxy_addr, proxy_port = proxy_addr
proxy_server = "{0}:{1}".format(proxy_addr, proxy_port)
printable_type = PRINTABLE_PROXY_TYPES[proxy_type]
msg = "Error connecting to {0} proxy {1}".format(printable_type,
proxy_server)
log.debug("%s due to: %s", msg, error)
raise ProxyConnectionError(msg, error)
else:
# Connected to proxy server, now negotiate
try:
# Calls negotiate_{SOCKS4, SOCKS5, HTTP}
negotiate = self._proxy_negotiators[proxy_type]
negotiate(self, dest_addr, dest_port)
except socket.error as error:
# Wrap socket errors
self.close()
raise GeneralProxyError("Socket error", error)
except ProxyError:
# Protocol error while negotiating with proxy
self.close()
raise
def _proxy_addr(self):
"""
Return proxy address to connect to as tuple object
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type)
if not proxy_port:
raise GeneralProxyError("Invalid proxy type")
return proxy_addr, proxy_port
|
JinnLynn/genpac
|
genpac/pysocks/socks.py
|
socksocket._negotiate_SOCKS5
|
python
|
def _negotiate_SOCKS5(self, *dest_addr):
CONNECT = b"\x01"
self.proxy_peername, self.proxy_sockname = self._SOCKS5_request(self,
CONNECT, dest_addr)
|
Negotiates a stream connection through a SOCKS5 server.
|
train
|
https://github.com/JinnLynn/genpac/blob/2f466d28f403a9a5624e02edcd538475fe475fc8/genpac/pysocks/socks.py#L471-L477
| null |
class socksocket(_BaseSocket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET and proto=0.
The "type" argument must be either SOCK_STREAM or SOCK_DGRAM.
"""
default_proxy = None
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, *args, **kwargs):
if type not in (socket.SOCK_STREAM, socket.SOCK_DGRAM):
msg = "Socket type must be stream or datagram, not {!r}"
raise ValueError(msg.format(type))
super(socksocket, self).__init__(family, type, proto, *args, **kwargs)
self._proxyconn = None # TCP connection to keep UDP relay alive
if self.default_proxy:
self.proxy = self.default_proxy
else:
self.proxy = (None, None, None, None, None, None)
self.proxy_sockname = None
self.proxy_peername = None
self._timeout = None
def _readall(self, file, count):
"""
Receive EXACTLY the number of bytes requested from the file object.
Blocks until the required number of bytes have been received.
"""
data = b""
while len(data) < count:
d = file.read(count - len(data))
if not d:
raise GeneralProxyError("Connection closed unexpectedly")
data += d
return data
def settimeout(self, timeout):
self._timeout = timeout
try:
# test if we're connected, if so apply timeout
peer = self.get_proxy_peername()
super(socksocket, self).settimeout(self._timeout)
except socket.error:
pass
def gettimeout(self):
return self._timeout
def setblocking(self, v):
if v:
self.settimeout(None)
else:
self.settimeout(0.0)
def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
"""set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxy_type - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be performed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.proxy = (proxy_type, addr, port, rdns,
username.encode() if username else None,
password.encode() if password else None)
def setproxy(self, *args, **kwargs):
if 'proxytype' in kwargs:
kwargs['proxy_type'] = kwargs.pop('proxytype')
return self.set_proxy(*args, **kwargs)
def bind(self, *pos, **kw):
"""
Implements proxy connection for UDP sockets,
which happens during the bind() phase.
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
if not proxy_type or self.type != socket.SOCK_DGRAM:
return _orig_socket.bind(self, *pos, **kw)
if self._proxyconn:
raise socket.error(EINVAL, "Socket already bound to an address")
if proxy_type != SOCKS5:
msg = "UDP only supported by SOCKS5 proxy type"
raise socket.error(EOPNOTSUPP, msg)
super(socksocket, self).bind(*pos, **kw)
# Need to specify actual local port because
# some relays drop packets if a port of zero is specified.
# Avoid specifying host address in case of NAT though.
_, port = self.getsockname()
dst = ("0", port)
self._proxyconn = _orig_socket()
proxy = self._proxy_addr()
self._proxyconn.connect(proxy)
UDP_ASSOCIATE = b"\x03"
_, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst)
# The relay is most likely on the same host as the SOCKS proxy,
# but some proxies return a private IP address (10.x.y.z)
host, _ = proxy
_, port = relay
super(socksocket, self).connect((host, port))
super(socksocket, self).settimeout(self._timeout)
self.proxy_sockname = ("0.0.0.0", 0) # Unknown
def sendto(self, bytes, *args, **kwargs):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).sendto(bytes, *args, **kwargs)
if not self._proxyconn:
self.bind(("", 0))
address = args[-1]
flags = args[:-1]
header = BytesIO()
RSV = b"\x00\x00"
header.write(RSV)
STANDALONE = b"\x00"
header.write(STANDALONE)
self._write_SOCKS5_address(address, header)
sent = super(socksocket, self).send(header.getvalue() + bytes, *flags, **kwargs)
return sent - header.tell()
def send(self, bytes, flags=0, **kwargs):
if self.type == socket.SOCK_DGRAM:
return self.sendto(bytes, flags, self.proxy_peername, **kwargs)
else:
return super(socksocket, self).send(bytes, flags, **kwargs)
def recvfrom(self, bufsize, flags=0):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).recvfrom(bufsize, flags)
if not self._proxyconn:
self.bind(("", 0))
buf = BytesIO(super(socksocket, self).recv(bufsize + 1024, flags))
buf.seek(2, SEEK_CUR)
frag = buf.read(1)
if ord(frag):
raise NotImplementedError("Received UDP packet fragment")
fromhost, fromport = self._read_SOCKS5_address(buf)
if self.proxy_peername:
peerhost, peerport = self.proxy_peername
if fromhost != peerhost or peerport not in (0, fromport):
raise socket.error(EAGAIN, "Packet filtered")
return (buf.read(bufsize), (fromhost, fromport))
def recv(self, *pos, **kw):
bytes, _ = self.recvfrom(*pos, **kw)
return bytes
def close(self):
if self._proxyconn:
self._proxyconn.close()
return super(socksocket, self).close()
def get_proxy_sockname(self):
"""
Returns the bound IP address and port number at the proxy.
"""
return self.proxy_sockname
getproxysockname = get_proxy_sockname
def get_proxy_peername(self):
"""
Returns the IP and port number of the proxy.
"""
return super(socksocket, self).getpeername()
getproxypeername = get_proxy_peername
def get_peername(self):
"""
Returns the IP address and port number of the destination
machine (note: get_proxy_peername returns the proxy)
"""
return self.proxy_peername
getpeername = get_peername
def _SOCKS5_request(self, conn, cmd, dst):
"""
Send SOCKS5 request with given command (CMD field) and
address (DST field). Returns resolved DST address that was used.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = conn.makefile("wb")
reader = conn.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# First we'll send the authentication packages we support.
if username and password:
# The username/password details were supplied to the
# set_proxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
writer.write(b"\x05\x02\x00\x02")
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
writer.write(b"\x05\x01\x00")
# We'll receive the server's response to determine which
# method was selected
writer.flush()
chosen_auth = self._readall(reader, 2)
if chosen_auth[0:1] != b"\x05":
# Note: string[i:i+1] is used because indexing of a bytestring
# via bytestring[i] yields an integer in Python 3
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Check the chosen authentication method
if chosen_auth[1:2] == b"\x02":
# Okay, we need to perform a basic username/password
# authentication.
writer.write(b"\x01" + chr(len(username)).encode()
+ username
+ chr(len(password)).encode()
+ password)
writer.flush()
auth_status = self._readall(reader, 2)
if auth_status[0:1] != b"\x01":
# Bad response
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
if auth_status[1:2] != b"\x00":
# Authentication failed
raise SOCKS5AuthError("SOCKS5 authentication failed")
# Otherwise, authentication succeeded
# No authentication is required if 0x00
elif chosen_auth[1:2] != b"\x00":
# Reaching here is always bad
if chosen_auth[1:2] == b"\xFF":
raise SOCKS5AuthError("All offered SOCKS5 authentication methods were rejected")
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Now we can request the actual connection
writer.write(b"\x05" + cmd + b"\x00")
resolved = self._write_SOCKS5_address(dst, writer)
writer.flush()
# Get the response
resp = self._readall(reader, 3)
if resp[0:1] != b"\x05":
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x00:
# Connection failed: server returned an error
error = SOCKS5_ERRORS.get(status, "Unknown error")
raise SOCKS5Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
bnd = self._read_SOCKS5_address(reader)
super(socksocket, self).settimeout(self._timeout)
return (resolved, bnd)
finally:
reader.close()
writer.close()
def _write_SOCKS5_address(self, addr, file):
"""
Return the host and port packed for the SOCKS5 protocol,
and the resolved address as a tuple object.
"""
host, port = addr
proxy_type, _, _, rdns, username, password = self.proxy
family_to_byte = {socket.AF_INET: b"\x01", socket.AF_INET6: b"\x04"}
# If the given destination address is an IP address, we'll
# use the IP address request even if remote resolving was specified.
# Detect whether the address is IPv4/6 directly.
for family in (socket.AF_INET, socket.AF_INET6):
try:
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
except socket.error:
continue
# Well it's not an IP number, so it's probably a DNS name.
if rdns:
# Resolve remotely
host_bytes = host.encode('idna')
file.write(b"\x03" + chr(len(host_bytes)).encode() + host_bytes)
else:
# Resolve locally
addresses = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_ADDRCONFIG)
# We can't really work out what IP is reachable, so just pick the
# first.
target_addr = addresses[0]
family = target_addr[0]
host = target_addr[4][0]
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
def _read_SOCKS5_address(self, file):
atyp = self._readall(file, 1)
if atyp == b"\x01":
addr = socket.inet_ntoa(self._readall(file, 4))
elif atyp == b"\x03":
length = self._readall(file, 1)
addr = self._readall(file, ord(length))
elif atyp == b"\x04":
addr = socket.inet_ntop(socket.AF_INET6, self._readall(file, 16))
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
port = struct.unpack(">H", self._readall(file, 2))[0]
return addr, port
def _negotiate_SOCKS4(self, dest_addr, dest_port):
"""
Negotiates a connection through a SOCKS4 server.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = self.makefile("wb")
reader = self.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# Check if the destination address provided is an IP address
remote_resolve = False
try:
addr_bytes = socket.inet_aton(dest_addr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if rdns:
addr_bytes = b"\x00\x00\x00\x01"
remote_resolve = True
else:
addr_bytes = socket.inet_aton(socket.gethostbyname(dest_addr))
# Construct the request packet
writer.write(struct.pack(">BBH", 0x04, 0x01, dest_port))
writer.write(addr_bytes)
# The username parameter is considered userid for SOCKS4
if username:
writer.write(username)
writer.write(b"\x00")
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if remote_resolve:
writer.write(dest_addr.encode('idna') + b"\x00")
writer.flush()
# Get the response from the server
resp = self._readall(reader, 8)
if resp[0:1] != b"\x00":
# Bad data
raise GeneralProxyError("SOCKS4 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x5A:
# Connection failed: server returned an error
error = SOCKS4_ERRORS.get(status, "Unknown error")
raise SOCKS4Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
self.proxy_sockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if remote_resolve:
self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port
else:
self.proxy_peername = dest_addr, dest_port
finally:
reader.close()
writer.close()
def _negotiate_HTTP(self, dest_addr, dest_port):
"""
Negotiates a connection through an HTTP server.
NOTE: This currently only supports HTTP CONNECT-style proxies.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
# If we need to resolve locally, we do this now
addr = dest_addr if rdns else socket.gethostbyname(dest_addr)
http_headers = [
b"CONNECT " + addr.encode('idna') + b":" + str(dest_port).encode() + b" HTTP/1.1",
b"Host: " + dest_addr.encode('idna')
]
if username and password:
http_headers.append(b"Proxy-Authorization: basic " + b64encode(username + b":" + password))
http_headers.append(b"\r\n")
self.sendall(b"\r\n".join(http_headers))
# We just need the first line to check if the connection was successful
fobj = self.makefile()
status_line = fobj.readline()
fobj.close()
if not status_line:
raise GeneralProxyError("Connection closed unexpectedly")
try:
proto, status_code, status_msg = status_line.split(" ", 2)
except ValueError:
raise GeneralProxyError("HTTP proxy server sent invalid response")
if not proto.startswith("HTTP/"):
raise GeneralProxyError("Proxy server does not appear to be an HTTP proxy")
try:
status_code = int(status_code)
except ValueError:
raise HTTPError("HTTP proxy server did not return a valid HTTP status")
if status_code != 200:
error = "{0}: {1}".format(status_code, status_msg)
if status_code in (400, 403, 405):
# It's likely that the HTTP proxy server does not support the CONNECT tunneling method
error += ("\n[*] Note: The HTTP proxy server may not be supported by PySocks"
" (must be a CONNECT tunnel proxy)")
raise HTTPError(error)
self.proxy_sockname = (b"0.0.0.0", 0)
self.proxy_peername = addr, dest_port
_proxy_negotiators = {
SOCKS4: _negotiate_SOCKS4,
SOCKS5: _negotiate_SOCKS5,
HTTP: _negotiate_HTTP
}
@set_self_blocking
def connect(self, dest_pair):
"""
Connects to the specified destination through a proxy.
Uses the same API as socket's connect().
To select the proxy server, use set_proxy().
dest_pair - 2-tuple of (IP/hostname, port).
"""
if len(dest_pair) != 2 or dest_pair[0].startswith("["):
# Probably IPv6, not supported -- raise an error, and hope
# Happy Eyeballs (RFC6555) makes sure at least the IPv4
# connection works...
raise socket.error("PySocks doesn't support IPv6: %s" % str(dest_pair))
dest_addr, dest_port = dest_pair
if self.type == socket.SOCK_DGRAM:
if not self._proxyconn:
self.bind(("", 0))
dest_addr = socket.gethostbyname(dest_addr)
# If the host address is INADDR_ANY or similar, reset the peer
# address so that packets are received from any peer
if dest_addr == "0.0.0.0" and not dest_port:
self.proxy_peername = None
else:
self.proxy_peername = (dest_addr, dest_port)
return
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
# Do a minimal input check first
if (not isinstance(dest_pair, (list, tuple))
or len(dest_pair) != 2
or not dest_addr
or not isinstance(dest_port, int)):
raise GeneralProxyError("Invalid destination-connection (host, port) pair")
# We set the timeout here so that we don't hang in connection or during
# negotiation.
super(socksocket, self).settimeout(self._timeout)
if proxy_type is None:
# Treat like regular socket object
self.proxy_peername = dest_pair
super(socksocket, self).settimeout(self._timeout)
super(socksocket, self).connect((dest_addr, dest_port))
return
proxy_addr = self._proxy_addr()
try:
# Initial connection to proxy server.
super(socksocket, self).connect(proxy_addr)
except socket.error as error:
# Error while connecting to proxy
self.close()
proxy_addr, proxy_port = proxy_addr
proxy_server = "{0}:{1}".format(proxy_addr, proxy_port)
printable_type = PRINTABLE_PROXY_TYPES[proxy_type]
msg = "Error connecting to {0} proxy {1}".format(printable_type,
proxy_server)
log.debug("%s due to: %s", msg, error)
raise ProxyConnectionError(msg, error)
else:
# Connected to proxy server, now negotiate
try:
# Calls negotiate_{SOCKS4, SOCKS5, HTTP}
negotiate = self._proxy_negotiators[proxy_type]
negotiate(self, dest_addr, dest_port)
except socket.error as error:
# Wrap socket errors
self.close()
raise GeneralProxyError("Socket error", error)
except ProxyError:
# Protocol error while negotiating with proxy
self.close()
raise
def _proxy_addr(self):
"""
Return proxy address to connect to as tuple object
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type)
if not proxy_port:
raise GeneralProxyError("Invalid proxy type")
return proxy_addr, proxy_port
|
JinnLynn/genpac
|
genpac/pysocks/socks.py
|
socksocket._SOCKS5_request
|
python
|
def _SOCKS5_request(self, conn, cmd, dst):
proxy_type, addr, port, rdns, username, password = self.proxy
writer = conn.makefile("wb")
reader = conn.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# First we'll send the authentication packages we support.
if username and password:
# The username/password details were supplied to the
# set_proxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
writer.write(b"\x05\x02\x00\x02")
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
writer.write(b"\x05\x01\x00")
# We'll receive the server's response to determine which
# method was selected
writer.flush()
chosen_auth = self._readall(reader, 2)
if chosen_auth[0:1] != b"\x05":
# Note: string[i:i+1] is used because indexing of a bytestring
# via bytestring[i] yields an integer in Python 3
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Check the chosen authentication method
if chosen_auth[1:2] == b"\x02":
# Okay, we need to perform a basic username/password
# authentication.
writer.write(b"\x01" + chr(len(username)).encode()
+ username
+ chr(len(password)).encode()
+ password)
writer.flush()
auth_status = self._readall(reader, 2)
if auth_status[0:1] != b"\x01":
# Bad response
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
if auth_status[1:2] != b"\x00":
# Authentication failed
raise SOCKS5AuthError("SOCKS5 authentication failed")
# Otherwise, authentication succeeded
# No authentication is required if 0x00
elif chosen_auth[1:2] != b"\x00":
# Reaching here is always bad
if chosen_auth[1:2] == b"\xFF":
raise SOCKS5AuthError("All offered SOCKS5 authentication methods were rejected")
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Now we can request the actual connection
writer.write(b"\x05" + cmd + b"\x00")
resolved = self._write_SOCKS5_address(dst, writer)
writer.flush()
# Get the response
resp = self._readall(reader, 3)
if resp[0:1] != b"\x05":
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x00:
# Connection failed: server returned an error
error = SOCKS5_ERRORS.get(status, "Unknown error")
raise SOCKS5Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
bnd = self._read_SOCKS5_address(reader)
super(socksocket, self).settimeout(self._timeout)
return (resolved, bnd)
finally:
reader.close()
writer.close()
|
Send SOCKS5 request with given command (CMD field) and
address (DST field). Returns resolved DST address that was used.
|
train
|
https://github.com/JinnLynn/genpac/blob/2f466d28f403a9a5624e02edcd538475fe475fc8/genpac/pysocks/socks.py#L479-L561
|
[
"def _readall(self, file, count):\n \"\"\"\n Receive EXACTLY the number of bytes requested from the file object.\n Blocks until the required number of bytes have been received.\n \"\"\"\n data = b\"\"\n while len(data) < count:\n d = file.read(count - len(data))\n if not d:\n raise GeneralProxyError(\"Connection closed unexpectedly\")\n data += d\n return data\n",
"def _write_SOCKS5_address(self, addr, file):\n \"\"\"\n Return the host and port packed for the SOCKS5 protocol,\n and the resolved address as a tuple object.\n \"\"\"\n host, port = addr\n proxy_type, _, _, rdns, username, password = self.proxy\n family_to_byte = {socket.AF_INET: b\"\\x01\", socket.AF_INET6: b\"\\x04\"}\n\n # If the given destination address is an IP address, we'll\n # use the IP address request even if remote resolving was specified.\n # Detect whether the address is IPv4/6 directly.\n for family in (socket.AF_INET, socket.AF_INET6):\n try:\n addr_bytes = socket.inet_pton(family, host)\n file.write(family_to_byte[family] + addr_bytes)\n host = socket.inet_ntop(family, addr_bytes)\n file.write(struct.pack(\">H\", port))\n return host, port\n except socket.error:\n continue\n\n # Well it's not an IP number, so it's probably a DNS name.\n if rdns:\n # Resolve remotely\n host_bytes = host.encode('idna')\n file.write(b\"\\x03\" + chr(len(host_bytes)).encode() + host_bytes)\n else:\n # Resolve locally\n addresses = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_ADDRCONFIG)\n # We can't really work out what IP is reachable, so just pick the\n # first.\n target_addr = addresses[0]\n family = target_addr[0]\n host = target_addr[4][0]\n\n addr_bytes = socket.inet_pton(family, host)\n file.write(family_to_byte[family] + addr_bytes)\n host = socket.inet_ntop(family, addr_bytes)\n file.write(struct.pack(\">H\", port))\n return host, port\n",
"def _read_SOCKS5_address(self, file):\n atyp = self._readall(file, 1)\n if atyp == b\"\\x01\":\n addr = socket.inet_ntoa(self._readall(file, 4))\n elif atyp == b\"\\x03\":\n length = self._readall(file, 1)\n addr = self._readall(file, ord(length))\n elif atyp == b\"\\x04\":\n addr = socket.inet_ntop(socket.AF_INET6, self._readall(file, 16))\n else:\n raise GeneralProxyError(\"SOCKS5 proxy server sent invalid data\")\n\n port = struct.unpack(\">H\", self._readall(file, 2))[0]\n return addr, port\n"
] |
class socksocket(_BaseSocket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET and proto=0.
The "type" argument must be either SOCK_STREAM or SOCK_DGRAM.
"""
default_proxy = None
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, *args, **kwargs):
if type not in (socket.SOCK_STREAM, socket.SOCK_DGRAM):
msg = "Socket type must be stream or datagram, not {!r}"
raise ValueError(msg.format(type))
super(socksocket, self).__init__(family, type, proto, *args, **kwargs)
self._proxyconn = None # TCP connection to keep UDP relay alive
if self.default_proxy:
self.proxy = self.default_proxy
else:
self.proxy = (None, None, None, None, None, None)
self.proxy_sockname = None
self.proxy_peername = None
self._timeout = None
def _readall(self, file, count):
"""
Receive EXACTLY the number of bytes requested from the file object.
Blocks until the required number of bytes have been received.
"""
data = b""
while len(data) < count:
d = file.read(count - len(data))
if not d:
raise GeneralProxyError("Connection closed unexpectedly")
data += d
return data
def settimeout(self, timeout):
self._timeout = timeout
try:
# test if we're connected, if so apply timeout
peer = self.get_proxy_peername()
super(socksocket, self).settimeout(self._timeout)
except socket.error:
pass
def gettimeout(self):
return self._timeout
def setblocking(self, v):
if v:
self.settimeout(None)
else:
self.settimeout(0.0)
def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
"""set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxy_type - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be performed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.proxy = (proxy_type, addr, port, rdns,
username.encode() if username else None,
password.encode() if password else None)
def setproxy(self, *args, **kwargs):
if 'proxytype' in kwargs:
kwargs['proxy_type'] = kwargs.pop('proxytype')
return self.set_proxy(*args, **kwargs)
def bind(self, *pos, **kw):
"""
Implements proxy connection for UDP sockets,
which happens during the bind() phase.
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
if not proxy_type or self.type != socket.SOCK_DGRAM:
return _orig_socket.bind(self, *pos, **kw)
if self._proxyconn:
raise socket.error(EINVAL, "Socket already bound to an address")
if proxy_type != SOCKS5:
msg = "UDP only supported by SOCKS5 proxy type"
raise socket.error(EOPNOTSUPP, msg)
super(socksocket, self).bind(*pos, **kw)
# Need to specify actual local port because
# some relays drop packets if a port of zero is specified.
# Avoid specifying host address in case of NAT though.
_, port = self.getsockname()
dst = ("0", port)
self._proxyconn = _orig_socket()
proxy = self._proxy_addr()
self._proxyconn.connect(proxy)
UDP_ASSOCIATE = b"\x03"
_, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst)
# The relay is most likely on the same host as the SOCKS proxy,
# but some proxies return a private IP address (10.x.y.z)
host, _ = proxy
_, port = relay
super(socksocket, self).connect((host, port))
super(socksocket, self).settimeout(self._timeout)
self.proxy_sockname = ("0.0.0.0", 0) # Unknown
def sendto(self, bytes, *args, **kwargs):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).sendto(bytes, *args, **kwargs)
if not self._proxyconn:
self.bind(("", 0))
address = args[-1]
flags = args[:-1]
header = BytesIO()
RSV = b"\x00\x00"
header.write(RSV)
STANDALONE = b"\x00"
header.write(STANDALONE)
self._write_SOCKS5_address(address, header)
sent = super(socksocket, self).send(header.getvalue() + bytes, *flags, **kwargs)
return sent - header.tell()
def send(self, bytes, flags=0, **kwargs):
if self.type == socket.SOCK_DGRAM:
return self.sendto(bytes, flags, self.proxy_peername, **kwargs)
else:
return super(socksocket, self).send(bytes, flags, **kwargs)
def recvfrom(self, bufsize, flags=0):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).recvfrom(bufsize, flags)
if not self._proxyconn:
self.bind(("", 0))
buf = BytesIO(super(socksocket, self).recv(bufsize + 1024, flags))
buf.seek(2, SEEK_CUR)
frag = buf.read(1)
if ord(frag):
raise NotImplementedError("Received UDP packet fragment")
fromhost, fromport = self._read_SOCKS5_address(buf)
if self.proxy_peername:
peerhost, peerport = self.proxy_peername
if fromhost != peerhost or peerport not in (0, fromport):
raise socket.error(EAGAIN, "Packet filtered")
return (buf.read(bufsize), (fromhost, fromport))
def recv(self, *pos, **kw):
bytes, _ = self.recvfrom(*pos, **kw)
return bytes
def close(self):
if self._proxyconn:
self._proxyconn.close()
return super(socksocket, self).close()
def get_proxy_sockname(self):
"""
Returns the bound IP address and port number at the proxy.
"""
return self.proxy_sockname
getproxysockname = get_proxy_sockname
def get_proxy_peername(self):
"""
Returns the IP and port number of the proxy.
"""
return super(socksocket, self).getpeername()
getproxypeername = get_proxy_peername
def get_peername(self):
"""
Returns the IP address and port number of the destination
machine (note: get_proxy_peername returns the proxy)
"""
return self.proxy_peername
getpeername = get_peername
def _negotiate_SOCKS5(self, *dest_addr):
"""
Negotiates a stream connection through a SOCKS5 server.
"""
CONNECT = b"\x01"
self.proxy_peername, self.proxy_sockname = self._SOCKS5_request(self,
CONNECT, dest_addr)
def _write_SOCKS5_address(self, addr, file):
"""
Return the host and port packed for the SOCKS5 protocol,
and the resolved address as a tuple object.
"""
host, port = addr
proxy_type, _, _, rdns, username, password = self.proxy
family_to_byte = {socket.AF_INET: b"\x01", socket.AF_INET6: b"\x04"}
# If the given destination address is an IP address, we'll
# use the IP address request even if remote resolving was specified.
# Detect whether the address is IPv4/6 directly.
for family in (socket.AF_INET, socket.AF_INET6):
try:
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
except socket.error:
continue
# Well it's not an IP number, so it's probably a DNS name.
if rdns:
# Resolve remotely
host_bytes = host.encode('idna')
file.write(b"\x03" + chr(len(host_bytes)).encode() + host_bytes)
else:
# Resolve locally
addresses = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_ADDRCONFIG)
# We can't really work out what IP is reachable, so just pick the
# first.
target_addr = addresses[0]
family = target_addr[0]
host = target_addr[4][0]
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
def _read_SOCKS5_address(self, file):
atyp = self._readall(file, 1)
if atyp == b"\x01":
addr = socket.inet_ntoa(self._readall(file, 4))
elif atyp == b"\x03":
length = self._readall(file, 1)
addr = self._readall(file, ord(length))
elif atyp == b"\x04":
addr = socket.inet_ntop(socket.AF_INET6, self._readall(file, 16))
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
port = struct.unpack(">H", self._readall(file, 2))[0]
return addr, port
def _negotiate_SOCKS4(self, dest_addr, dest_port):
"""
Negotiates a connection through a SOCKS4 server.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = self.makefile("wb")
reader = self.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# Check if the destination address provided is an IP address
remote_resolve = False
try:
addr_bytes = socket.inet_aton(dest_addr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if rdns:
addr_bytes = b"\x00\x00\x00\x01"
remote_resolve = True
else:
addr_bytes = socket.inet_aton(socket.gethostbyname(dest_addr))
# Construct the request packet
writer.write(struct.pack(">BBH", 0x04, 0x01, dest_port))
writer.write(addr_bytes)
# The username parameter is considered userid for SOCKS4
if username:
writer.write(username)
writer.write(b"\x00")
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if remote_resolve:
writer.write(dest_addr.encode('idna') + b"\x00")
writer.flush()
# Get the response from the server
resp = self._readall(reader, 8)
if resp[0:1] != b"\x00":
# Bad data
raise GeneralProxyError("SOCKS4 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x5A:
# Connection failed: server returned an error
error = SOCKS4_ERRORS.get(status, "Unknown error")
raise SOCKS4Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
self.proxy_sockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if remote_resolve:
self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port
else:
self.proxy_peername = dest_addr, dest_port
finally:
reader.close()
writer.close()
def _negotiate_HTTP(self, dest_addr, dest_port):
"""
Negotiates a connection through an HTTP server.
NOTE: This currently only supports HTTP CONNECT-style proxies.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
# If we need to resolve locally, we do this now
addr = dest_addr if rdns else socket.gethostbyname(dest_addr)
http_headers = [
b"CONNECT " + addr.encode('idna') + b":" + str(dest_port).encode() + b" HTTP/1.1",
b"Host: " + dest_addr.encode('idna')
]
if username and password:
http_headers.append(b"Proxy-Authorization: basic " + b64encode(username + b":" + password))
http_headers.append(b"\r\n")
self.sendall(b"\r\n".join(http_headers))
# We just need the first line to check if the connection was successful
fobj = self.makefile()
status_line = fobj.readline()
fobj.close()
if not status_line:
raise GeneralProxyError("Connection closed unexpectedly")
try:
proto, status_code, status_msg = status_line.split(" ", 2)
except ValueError:
raise GeneralProxyError("HTTP proxy server sent invalid response")
if not proto.startswith("HTTP/"):
raise GeneralProxyError("Proxy server does not appear to be an HTTP proxy")
try:
status_code = int(status_code)
except ValueError:
raise HTTPError("HTTP proxy server did not return a valid HTTP status")
if status_code != 200:
error = "{0}: {1}".format(status_code, status_msg)
if status_code in (400, 403, 405):
# It's likely that the HTTP proxy server does not support the CONNECT tunneling method
error += ("\n[*] Note: The HTTP proxy server may not be supported by PySocks"
" (must be a CONNECT tunnel proxy)")
raise HTTPError(error)
self.proxy_sockname = (b"0.0.0.0", 0)
self.proxy_peername = addr, dest_port
_proxy_negotiators = {
SOCKS4: _negotiate_SOCKS4,
SOCKS5: _negotiate_SOCKS5,
HTTP: _negotiate_HTTP
}
@set_self_blocking
def connect(self, dest_pair):
"""
Connects to the specified destination through a proxy.
Uses the same API as socket's connect().
To select the proxy server, use set_proxy().
dest_pair - 2-tuple of (IP/hostname, port).
"""
if len(dest_pair) != 2 or dest_pair[0].startswith("["):
# Probably IPv6, not supported -- raise an error, and hope
# Happy Eyeballs (RFC6555) makes sure at least the IPv4
# connection works...
raise socket.error("PySocks doesn't support IPv6: %s" % str(dest_pair))
dest_addr, dest_port = dest_pair
if self.type == socket.SOCK_DGRAM:
if not self._proxyconn:
self.bind(("", 0))
dest_addr = socket.gethostbyname(dest_addr)
# If the host address is INADDR_ANY or similar, reset the peer
# address so that packets are received from any peer
if dest_addr == "0.0.0.0" and not dest_port:
self.proxy_peername = None
else:
self.proxy_peername = (dest_addr, dest_port)
return
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
# Do a minimal input check first
if (not isinstance(dest_pair, (list, tuple))
or len(dest_pair) != 2
or not dest_addr
or not isinstance(dest_port, int)):
raise GeneralProxyError("Invalid destination-connection (host, port) pair")
# We set the timeout here so that we don't hang in connection or during
# negotiation.
super(socksocket, self).settimeout(self._timeout)
if proxy_type is None:
# Treat like regular socket object
self.proxy_peername = dest_pair
super(socksocket, self).settimeout(self._timeout)
super(socksocket, self).connect((dest_addr, dest_port))
return
proxy_addr = self._proxy_addr()
try:
# Initial connection to proxy server.
super(socksocket, self).connect(proxy_addr)
except socket.error as error:
# Error while connecting to proxy
self.close()
proxy_addr, proxy_port = proxy_addr
proxy_server = "{0}:{1}".format(proxy_addr, proxy_port)
printable_type = PRINTABLE_PROXY_TYPES[proxy_type]
msg = "Error connecting to {0} proxy {1}".format(printable_type,
proxy_server)
log.debug("%s due to: %s", msg, error)
raise ProxyConnectionError(msg, error)
else:
# Connected to proxy server, now negotiate
try:
# Calls negotiate_{SOCKS4, SOCKS5, HTTP}
negotiate = self._proxy_negotiators[proxy_type]
negotiate(self, dest_addr, dest_port)
except socket.error as error:
# Wrap socket errors
self.close()
raise GeneralProxyError("Socket error", error)
except ProxyError:
# Protocol error while negotiating with proxy
self.close()
raise
def _proxy_addr(self):
"""
Return proxy address to connect to as tuple object
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type)
if not proxy_port:
raise GeneralProxyError("Invalid proxy type")
return proxy_addr, proxy_port
|
JinnLynn/genpac
|
genpac/pysocks/socks.py
|
socksocket._write_SOCKS5_address
|
python
|
def _write_SOCKS5_address(self, addr, file):
host, port = addr
proxy_type, _, _, rdns, username, password = self.proxy
family_to_byte = {socket.AF_INET: b"\x01", socket.AF_INET6: b"\x04"}
# If the given destination address is an IP address, we'll
# use the IP address request even if remote resolving was specified.
# Detect whether the address is IPv4/6 directly.
for family in (socket.AF_INET, socket.AF_INET6):
try:
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
except socket.error:
continue
# Well it's not an IP number, so it's probably a DNS name.
if rdns:
# Resolve remotely
host_bytes = host.encode('idna')
file.write(b"\x03" + chr(len(host_bytes)).encode() + host_bytes)
else:
# Resolve locally
addresses = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_ADDRCONFIG)
# We can't really work out what IP is reachable, so just pick the
# first.
target_addr = addresses[0]
family = target_addr[0]
host = target_addr[4][0]
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
|
Return the host and port packed for the SOCKS5 protocol,
and the resolved address as a tuple object.
|
train
|
https://github.com/JinnLynn/genpac/blob/2f466d28f403a9a5624e02edcd538475fe475fc8/genpac/pysocks/socks.py#L563-L603
| null |
class socksocket(_BaseSocket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET and proto=0.
The "type" argument must be either SOCK_STREAM or SOCK_DGRAM.
"""
default_proxy = None
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, *args, **kwargs):
if type not in (socket.SOCK_STREAM, socket.SOCK_DGRAM):
msg = "Socket type must be stream or datagram, not {!r}"
raise ValueError(msg.format(type))
super(socksocket, self).__init__(family, type, proto, *args, **kwargs)
self._proxyconn = None # TCP connection to keep UDP relay alive
if self.default_proxy:
self.proxy = self.default_proxy
else:
self.proxy = (None, None, None, None, None, None)
self.proxy_sockname = None
self.proxy_peername = None
self._timeout = None
def _readall(self, file, count):
"""
Receive EXACTLY the number of bytes requested from the file object.
Blocks until the required number of bytes have been received.
"""
data = b""
while len(data) < count:
d = file.read(count - len(data))
if not d:
raise GeneralProxyError("Connection closed unexpectedly")
data += d
return data
def settimeout(self, timeout):
self._timeout = timeout
try:
# test if we're connected, if so apply timeout
peer = self.get_proxy_peername()
super(socksocket, self).settimeout(self._timeout)
except socket.error:
pass
def gettimeout(self):
return self._timeout
def setblocking(self, v):
if v:
self.settimeout(None)
else:
self.settimeout(0.0)
def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
"""set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxy_type - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be performed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.proxy = (proxy_type, addr, port, rdns,
username.encode() if username else None,
password.encode() if password else None)
def setproxy(self, *args, **kwargs):
if 'proxytype' in kwargs:
kwargs['proxy_type'] = kwargs.pop('proxytype')
return self.set_proxy(*args, **kwargs)
def bind(self, *pos, **kw):
"""
Implements proxy connection for UDP sockets,
which happens during the bind() phase.
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
if not proxy_type or self.type != socket.SOCK_DGRAM:
return _orig_socket.bind(self, *pos, **kw)
if self._proxyconn:
raise socket.error(EINVAL, "Socket already bound to an address")
if proxy_type != SOCKS5:
msg = "UDP only supported by SOCKS5 proxy type"
raise socket.error(EOPNOTSUPP, msg)
super(socksocket, self).bind(*pos, **kw)
# Need to specify actual local port because
# some relays drop packets if a port of zero is specified.
# Avoid specifying host address in case of NAT though.
_, port = self.getsockname()
dst = ("0", port)
self._proxyconn = _orig_socket()
proxy = self._proxy_addr()
self._proxyconn.connect(proxy)
UDP_ASSOCIATE = b"\x03"
_, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst)
# The relay is most likely on the same host as the SOCKS proxy,
# but some proxies return a private IP address (10.x.y.z)
host, _ = proxy
_, port = relay
super(socksocket, self).connect((host, port))
super(socksocket, self).settimeout(self._timeout)
self.proxy_sockname = ("0.0.0.0", 0) # Unknown
def sendto(self, bytes, *args, **kwargs):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).sendto(bytes, *args, **kwargs)
if not self._proxyconn:
self.bind(("", 0))
address = args[-1]
flags = args[:-1]
header = BytesIO()
RSV = b"\x00\x00"
header.write(RSV)
STANDALONE = b"\x00"
header.write(STANDALONE)
self._write_SOCKS5_address(address, header)
sent = super(socksocket, self).send(header.getvalue() + bytes, *flags, **kwargs)
return sent - header.tell()
def send(self, bytes, flags=0, **kwargs):
if self.type == socket.SOCK_DGRAM:
return self.sendto(bytes, flags, self.proxy_peername, **kwargs)
else:
return super(socksocket, self).send(bytes, flags, **kwargs)
def recvfrom(self, bufsize, flags=0):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).recvfrom(bufsize, flags)
if not self._proxyconn:
self.bind(("", 0))
buf = BytesIO(super(socksocket, self).recv(bufsize + 1024, flags))
buf.seek(2, SEEK_CUR)
frag = buf.read(1)
if ord(frag):
raise NotImplementedError("Received UDP packet fragment")
fromhost, fromport = self._read_SOCKS5_address(buf)
if self.proxy_peername:
peerhost, peerport = self.proxy_peername
if fromhost != peerhost or peerport not in (0, fromport):
raise socket.error(EAGAIN, "Packet filtered")
return (buf.read(bufsize), (fromhost, fromport))
def recv(self, *pos, **kw):
bytes, _ = self.recvfrom(*pos, **kw)
return bytes
def close(self):
if self._proxyconn:
self._proxyconn.close()
return super(socksocket, self).close()
def get_proxy_sockname(self):
"""
Returns the bound IP address and port number at the proxy.
"""
return self.proxy_sockname
getproxysockname = get_proxy_sockname
def get_proxy_peername(self):
"""
Returns the IP and port number of the proxy.
"""
return super(socksocket, self).getpeername()
getproxypeername = get_proxy_peername
def get_peername(self):
"""
Returns the IP address and port number of the destination
machine (note: get_proxy_peername returns the proxy)
"""
return self.proxy_peername
getpeername = get_peername
def _negotiate_SOCKS5(self, *dest_addr):
"""
Negotiates a stream connection through a SOCKS5 server.
"""
CONNECT = b"\x01"
self.proxy_peername, self.proxy_sockname = self._SOCKS5_request(self,
CONNECT, dest_addr)
def _SOCKS5_request(self, conn, cmd, dst):
"""
Send SOCKS5 request with given command (CMD field) and
address (DST field). Returns resolved DST address that was used.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = conn.makefile("wb")
reader = conn.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# First we'll send the authentication packages we support.
if username and password:
# The username/password details were supplied to the
# set_proxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
writer.write(b"\x05\x02\x00\x02")
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
writer.write(b"\x05\x01\x00")
# We'll receive the server's response to determine which
# method was selected
writer.flush()
chosen_auth = self._readall(reader, 2)
if chosen_auth[0:1] != b"\x05":
# Note: string[i:i+1] is used because indexing of a bytestring
# via bytestring[i] yields an integer in Python 3
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Check the chosen authentication method
if chosen_auth[1:2] == b"\x02":
# Okay, we need to perform a basic username/password
# authentication.
writer.write(b"\x01" + chr(len(username)).encode()
+ username
+ chr(len(password)).encode()
+ password)
writer.flush()
auth_status = self._readall(reader, 2)
if auth_status[0:1] != b"\x01":
# Bad response
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
if auth_status[1:2] != b"\x00":
# Authentication failed
raise SOCKS5AuthError("SOCKS5 authentication failed")
# Otherwise, authentication succeeded
# No authentication is required if 0x00
elif chosen_auth[1:2] != b"\x00":
# Reaching here is always bad
if chosen_auth[1:2] == b"\xFF":
raise SOCKS5AuthError("All offered SOCKS5 authentication methods were rejected")
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Now we can request the actual connection
writer.write(b"\x05" + cmd + b"\x00")
resolved = self._write_SOCKS5_address(dst, writer)
writer.flush()
# Get the response
resp = self._readall(reader, 3)
if resp[0:1] != b"\x05":
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x00:
# Connection failed: server returned an error
error = SOCKS5_ERRORS.get(status, "Unknown error")
raise SOCKS5Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
bnd = self._read_SOCKS5_address(reader)
super(socksocket, self).settimeout(self._timeout)
return (resolved, bnd)
finally:
reader.close()
writer.close()
def _read_SOCKS5_address(self, file):
atyp = self._readall(file, 1)
if atyp == b"\x01":
addr = socket.inet_ntoa(self._readall(file, 4))
elif atyp == b"\x03":
length = self._readall(file, 1)
addr = self._readall(file, ord(length))
elif atyp == b"\x04":
addr = socket.inet_ntop(socket.AF_INET6, self._readall(file, 16))
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
port = struct.unpack(">H", self._readall(file, 2))[0]
return addr, port
def _negotiate_SOCKS4(self, dest_addr, dest_port):
"""
Negotiates a connection through a SOCKS4 server.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = self.makefile("wb")
reader = self.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# Check if the destination address provided is an IP address
remote_resolve = False
try:
addr_bytes = socket.inet_aton(dest_addr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if rdns:
addr_bytes = b"\x00\x00\x00\x01"
remote_resolve = True
else:
addr_bytes = socket.inet_aton(socket.gethostbyname(dest_addr))
# Construct the request packet
writer.write(struct.pack(">BBH", 0x04, 0x01, dest_port))
writer.write(addr_bytes)
# The username parameter is considered userid for SOCKS4
if username:
writer.write(username)
writer.write(b"\x00")
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if remote_resolve:
writer.write(dest_addr.encode('idna') + b"\x00")
writer.flush()
# Get the response from the server
resp = self._readall(reader, 8)
if resp[0:1] != b"\x00":
# Bad data
raise GeneralProxyError("SOCKS4 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x5A:
# Connection failed: server returned an error
error = SOCKS4_ERRORS.get(status, "Unknown error")
raise SOCKS4Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
self.proxy_sockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if remote_resolve:
self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port
else:
self.proxy_peername = dest_addr, dest_port
finally:
reader.close()
writer.close()
def _negotiate_HTTP(self, dest_addr, dest_port):
"""
Negotiates a connection through an HTTP server.
NOTE: This currently only supports HTTP CONNECT-style proxies.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
# If we need to resolve locally, we do this now
addr = dest_addr if rdns else socket.gethostbyname(dest_addr)
http_headers = [
b"CONNECT " + addr.encode('idna') + b":" + str(dest_port).encode() + b" HTTP/1.1",
b"Host: " + dest_addr.encode('idna')
]
if username and password:
http_headers.append(b"Proxy-Authorization: basic " + b64encode(username + b":" + password))
http_headers.append(b"\r\n")
self.sendall(b"\r\n".join(http_headers))
# We just need the first line to check if the connection was successful
fobj = self.makefile()
status_line = fobj.readline()
fobj.close()
if not status_line:
raise GeneralProxyError("Connection closed unexpectedly")
try:
proto, status_code, status_msg = status_line.split(" ", 2)
except ValueError:
raise GeneralProxyError("HTTP proxy server sent invalid response")
if not proto.startswith("HTTP/"):
raise GeneralProxyError("Proxy server does not appear to be an HTTP proxy")
try:
status_code = int(status_code)
except ValueError:
raise HTTPError("HTTP proxy server did not return a valid HTTP status")
if status_code != 200:
error = "{0}: {1}".format(status_code, status_msg)
if status_code in (400, 403, 405):
# It's likely that the HTTP proxy server does not support the CONNECT tunneling method
error += ("\n[*] Note: The HTTP proxy server may not be supported by PySocks"
" (must be a CONNECT tunnel proxy)")
raise HTTPError(error)
self.proxy_sockname = (b"0.0.0.0", 0)
self.proxy_peername = addr, dest_port
_proxy_negotiators = {
SOCKS4: _negotiate_SOCKS4,
SOCKS5: _negotiate_SOCKS5,
HTTP: _negotiate_HTTP
}
@set_self_blocking
def connect(self, dest_pair):
"""
Connects to the specified destination through a proxy.
Uses the same API as socket's connect().
To select the proxy server, use set_proxy().
dest_pair - 2-tuple of (IP/hostname, port).
"""
if len(dest_pair) != 2 or dest_pair[0].startswith("["):
# Probably IPv6, not supported -- raise an error, and hope
# Happy Eyeballs (RFC6555) makes sure at least the IPv4
# connection works...
raise socket.error("PySocks doesn't support IPv6: %s" % str(dest_pair))
dest_addr, dest_port = dest_pair
if self.type == socket.SOCK_DGRAM:
if not self._proxyconn:
self.bind(("", 0))
dest_addr = socket.gethostbyname(dest_addr)
# If the host address is INADDR_ANY or similar, reset the peer
# address so that packets are received from any peer
if dest_addr == "0.0.0.0" and not dest_port:
self.proxy_peername = None
else:
self.proxy_peername = (dest_addr, dest_port)
return
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
# Do a minimal input check first
if (not isinstance(dest_pair, (list, tuple))
or len(dest_pair) != 2
or not dest_addr
or not isinstance(dest_port, int)):
raise GeneralProxyError("Invalid destination-connection (host, port) pair")
# We set the timeout here so that we don't hang in connection or during
# negotiation.
super(socksocket, self).settimeout(self._timeout)
if proxy_type is None:
# Treat like regular socket object
self.proxy_peername = dest_pair
super(socksocket, self).settimeout(self._timeout)
super(socksocket, self).connect((dest_addr, dest_port))
return
proxy_addr = self._proxy_addr()
try:
# Initial connection to proxy server.
super(socksocket, self).connect(proxy_addr)
except socket.error as error:
# Error while connecting to proxy
self.close()
proxy_addr, proxy_port = proxy_addr
proxy_server = "{0}:{1}".format(proxy_addr, proxy_port)
printable_type = PRINTABLE_PROXY_TYPES[proxy_type]
msg = "Error connecting to {0} proxy {1}".format(printable_type,
proxy_server)
log.debug("%s due to: %s", msg, error)
raise ProxyConnectionError(msg, error)
else:
# Connected to proxy server, now negotiate
try:
# Calls negotiate_{SOCKS4, SOCKS5, HTTP}
negotiate = self._proxy_negotiators[proxy_type]
negotiate(self, dest_addr, dest_port)
except socket.error as error:
# Wrap socket errors
self.close()
raise GeneralProxyError("Socket error", error)
except ProxyError:
# Protocol error while negotiating with proxy
self.close()
raise
def _proxy_addr(self):
"""
Return proxy address to connect to as tuple object
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type)
if not proxy_port:
raise GeneralProxyError("Invalid proxy type")
return proxy_addr, proxy_port
|
JinnLynn/genpac
|
genpac/pysocks/socks.py
|
socksocket._negotiate_SOCKS4
|
python
|
def _negotiate_SOCKS4(self, dest_addr, dest_port):
proxy_type, addr, port, rdns, username, password = self.proxy
writer = self.makefile("wb")
reader = self.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# Check if the destination address provided is an IP address
remote_resolve = False
try:
addr_bytes = socket.inet_aton(dest_addr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if rdns:
addr_bytes = b"\x00\x00\x00\x01"
remote_resolve = True
else:
addr_bytes = socket.inet_aton(socket.gethostbyname(dest_addr))
# Construct the request packet
writer.write(struct.pack(">BBH", 0x04, 0x01, dest_port))
writer.write(addr_bytes)
# The username parameter is considered userid for SOCKS4
if username:
writer.write(username)
writer.write(b"\x00")
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if remote_resolve:
writer.write(dest_addr.encode('idna') + b"\x00")
writer.flush()
# Get the response from the server
resp = self._readall(reader, 8)
if resp[0:1] != b"\x00":
# Bad data
raise GeneralProxyError("SOCKS4 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x5A:
# Connection failed: server returned an error
error = SOCKS4_ERRORS.get(status, "Unknown error")
raise SOCKS4Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
self.proxy_sockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if remote_resolve:
self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port
else:
self.proxy_peername = dest_addr, dest_port
finally:
reader.close()
writer.close()
|
Negotiates a connection through a SOCKS4 server.
|
train
|
https://github.com/JinnLynn/genpac/blob/2f466d28f403a9a5624e02edcd538475fe475fc8/genpac/pysocks/socks.py#L620-L677
| null |
class socksocket(_BaseSocket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET and proto=0.
The "type" argument must be either SOCK_STREAM or SOCK_DGRAM.
"""
default_proxy = None
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, *args, **kwargs):
if type not in (socket.SOCK_STREAM, socket.SOCK_DGRAM):
msg = "Socket type must be stream or datagram, not {!r}"
raise ValueError(msg.format(type))
super(socksocket, self).__init__(family, type, proto, *args, **kwargs)
self._proxyconn = None # TCP connection to keep UDP relay alive
if self.default_proxy:
self.proxy = self.default_proxy
else:
self.proxy = (None, None, None, None, None, None)
self.proxy_sockname = None
self.proxy_peername = None
self._timeout = None
def _readall(self, file, count):
"""
Receive EXACTLY the number of bytes requested from the file object.
Blocks until the required number of bytes have been received.
"""
data = b""
while len(data) < count:
d = file.read(count - len(data))
if not d:
raise GeneralProxyError("Connection closed unexpectedly")
data += d
return data
def settimeout(self, timeout):
self._timeout = timeout
try:
# test if we're connected, if so apply timeout
peer = self.get_proxy_peername()
super(socksocket, self).settimeout(self._timeout)
except socket.error:
pass
def gettimeout(self):
return self._timeout
def setblocking(self, v):
if v:
self.settimeout(None)
else:
self.settimeout(0.0)
def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
"""set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxy_type - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be performed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.proxy = (proxy_type, addr, port, rdns,
username.encode() if username else None,
password.encode() if password else None)
def setproxy(self, *args, **kwargs):
if 'proxytype' in kwargs:
kwargs['proxy_type'] = kwargs.pop('proxytype')
return self.set_proxy(*args, **kwargs)
def bind(self, *pos, **kw):
"""
Implements proxy connection for UDP sockets,
which happens during the bind() phase.
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
if not proxy_type or self.type != socket.SOCK_DGRAM:
return _orig_socket.bind(self, *pos, **kw)
if self._proxyconn:
raise socket.error(EINVAL, "Socket already bound to an address")
if proxy_type != SOCKS5:
msg = "UDP only supported by SOCKS5 proxy type"
raise socket.error(EOPNOTSUPP, msg)
super(socksocket, self).bind(*pos, **kw)
# Need to specify actual local port because
# some relays drop packets if a port of zero is specified.
# Avoid specifying host address in case of NAT though.
_, port = self.getsockname()
dst = ("0", port)
self._proxyconn = _orig_socket()
proxy = self._proxy_addr()
self._proxyconn.connect(proxy)
UDP_ASSOCIATE = b"\x03"
_, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst)
# The relay is most likely on the same host as the SOCKS proxy,
# but some proxies return a private IP address (10.x.y.z)
host, _ = proxy
_, port = relay
super(socksocket, self).connect((host, port))
super(socksocket, self).settimeout(self._timeout)
self.proxy_sockname = ("0.0.0.0", 0) # Unknown
def sendto(self, bytes, *args, **kwargs):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).sendto(bytes, *args, **kwargs)
if not self._proxyconn:
self.bind(("", 0))
address = args[-1]
flags = args[:-1]
header = BytesIO()
RSV = b"\x00\x00"
header.write(RSV)
STANDALONE = b"\x00"
header.write(STANDALONE)
self._write_SOCKS5_address(address, header)
sent = super(socksocket, self).send(header.getvalue() + bytes, *flags, **kwargs)
return sent - header.tell()
def send(self, bytes, flags=0, **kwargs):
if self.type == socket.SOCK_DGRAM:
return self.sendto(bytes, flags, self.proxy_peername, **kwargs)
else:
return super(socksocket, self).send(bytes, flags, **kwargs)
def recvfrom(self, bufsize, flags=0):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).recvfrom(bufsize, flags)
if not self._proxyconn:
self.bind(("", 0))
buf = BytesIO(super(socksocket, self).recv(bufsize + 1024, flags))
buf.seek(2, SEEK_CUR)
frag = buf.read(1)
if ord(frag):
raise NotImplementedError("Received UDP packet fragment")
fromhost, fromport = self._read_SOCKS5_address(buf)
if self.proxy_peername:
peerhost, peerport = self.proxy_peername
if fromhost != peerhost or peerport not in (0, fromport):
raise socket.error(EAGAIN, "Packet filtered")
return (buf.read(bufsize), (fromhost, fromport))
def recv(self, *pos, **kw):
bytes, _ = self.recvfrom(*pos, **kw)
return bytes
def close(self):
if self._proxyconn:
self._proxyconn.close()
return super(socksocket, self).close()
def get_proxy_sockname(self):
"""
Returns the bound IP address and port number at the proxy.
"""
return self.proxy_sockname
getproxysockname = get_proxy_sockname
def get_proxy_peername(self):
"""
Returns the IP and port number of the proxy.
"""
return super(socksocket, self).getpeername()
getproxypeername = get_proxy_peername
def get_peername(self):
"""
Returns the IP address and port number of the destination
machine (note: get_proxy_peername returns the proxy)
"""
return self.proxy_peername
getpeername = get_peername
def _negotiate_SOCKS5(self, *dest_addr):
"""
Negotiates a stream connection through a SOCKS5 server.
"""
CONNECT = b"\x01"
self.proxy_peername, self.proxy_sockname = self._SOCKS5_request(self,
CONNECT, dest_addr)
def _SOCKS5_request(self, conn, cmd, dst):
"""
Send SOCKS5 request with given command (CMD field) and
address (DST field). Returns resolved DST address that was used.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = conn.makefile("wb")
reader = conn.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# First we'll send the authentication packages we support.
if username and password:
# The username/password details were supplied to the
# set_proxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
writer.write(b"\x05\x02\x00\x02")
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
writer.write(b"\x05\x01\x00")
# We'll receive the server's response to determine which
# method was selected
writer.flush()
chosen_auth = self._readall(reader, 2)
if chosen_auth[0:1] != b"\x05":
# Note: string[i:i+1] is used because indexing of a bytestring
# via bytestring[i] yields an integer in Python 3
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Check the chosen authentication method
if chosen_auth[1:2] == b"\x02":
# Okay, we need to perform a basic username/password
# authentication.
writer.write(b"\x01" + chr(len(username)).encode()
+ username
+ chr(len(password)).encode()
+ password)
writer.flush()
auth_status = self._readall(reader, 2)
if auth_status[0:1] != b"\x01":
# Bad response
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
if auth_status[1:2] != b"\x00":
# Authentication failed
raise SOCKS5AuthError("SOCKS5 authentication failed")
# Otherwise, authentication succeeded
# No authentication is required if 0x00
elif chosen_auth[1:2] != b"\x00":
# Reaching here is always bad
if chosen_auth[1:2] == b"\xFF":
raise SOCKS5AuthError("All offered SOCKS5 authentication methods were rejected")
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Now we can request the actual connection
writer.write(b"\x05" + cmd + b"\x00")
resolved = self._write_SOCKS5_address(dst, writer)
writer.flush()
# Get the response
resp = self._readall(reader, 3)
if resp[0:1] != b"\x05":
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x00:
# Connection failed: server returned an error
error = SOCKS5_ERRORS.get(status, "Unknown error")
raise SOCKS5Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
bnd = self._read_SOCKS5_address(reader)
super(socksocket, self).settimeout(self._timeout)
return (resolved, bnd)
finally:
reader.close()
writer.close()
def _write_SOCKS5_address(self, addr, file):
"""
Return the host and port packed for the SOCKS5 protocol,
and the resolved address as a tuple object.
"""
host, port = addr
proxy_type, _, _, rdns, username, password = self.proxy
family_to_byte = {socket.AF_INET: b"\x01", socket.AF_INET6: b"\x04"}
# If the given destination address is an IP address, we'll
# use the IP address request even if remote resolving was specified.
# Detect whether the address is IPv4/6 directly.
for family in (socket.AF_INET, socket.AF_INET6):
try:
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
except socket.error:
continue
# Well it's not an IP number, so it's probably a DNS name.
if rdns:
# Resolve remotely
host_bytes = host.encode('idna')
file.write(b"\x03" + chr(len(host_bytes)).encode() + host_bytes)
else:
# Resolve locally
addresses = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_ADDRCONFIG)
# We can't really work out what IP is reachable, so just pick the
# first.
target_addr = addresses[0]
family = target_addr[0]
host = target_addr[4][0]
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
def _read_SOCKS5_address(self, file):
atyp = self._readall(file, 1)
if atyp == b"\x01":
addr = socket.inet_ntoa(self._readall(file, 4))
elif atyp == b"\x03":
length = self._readall(file, 1)
addr = self._readall(file, ord(length))
elif atyp == b"\x04":
addr = socket.inet_ntop(socket.AF_INET6, self._readall(file, 16))
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
port = struct.unpack(">H", self._readall(file, 2))[0]
return addr, port
def _negotiate_HTTP(self, dest_addr, dest_port):
"""
Negotiates a connection through an HTTP server.
NOTE: This currently only supports HTTP CONNECT-style proxies.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
# If we need to resolve locally, we do this now
addr = dest_addr if rdns else socket.gethostbyname(dest_addr)
http_headers = [
b"CONNECT " + addr.encode('idna') + b":" + str(dest_port).encode() + b" HTTP/1.1",
b"Host: " + dest_addr.encode('idna')
]
if username and password:
http_headers.append(b"Proxy-Authorization: basic " + b64encode(username + b":" + password))
http_headers.append(b"\r\n")
self.sendall(b"\r\n".join(http_headers))
# We just need the first line to check if the connection was successful
fobj = self.makefile()
status_line = fobj.readline()
fobj.close()
if not status_line:
raise GeneralProxyError("Connection closed unexpectedly")
try:
proto, status_code, status_msg = status_line.split(" ", 2)
except ValueError:
raise GeneralProxyError("HTTP proxy server sent invalid response")
if not proto.startswith("HTTP/"):
raise GeneralProxyError("Proxy server does not appear to be an HTTP proxy")
try:
status_code = int(status_code)
except ValueError:
raise HTTPError("HTTP proxy server did not return a valid HTTP status")
if status_code != 200:
error = "{0}: {1}".format(status_code, status_msg)
if status_code in (400, 403, 405):
# It's likely that the HTTP proxy server does not support the CONNECT tunneling method
error += ("\n[*] Note: The HTTP proxy server may not be supported by PySocks"
" (must be a CONNECT tunnel proxy)")
raise HTTPError(error)
self.proxy_sockname = (b"0.0.0.0", 0)
self.proxy_peername = addr, dest_port
_proxy_negotiators = {
SOCKS4: _negotiate_SOCKS4,
SOCKS5: _negotiate_SOCKS5,
HTTP: _negotiate_HTTP
}
@set_self_blocking
def connect(self, dest_pair):
"""
Connects to the specified destination through a proxy.
Uses the same API as socket's connect().
To select the proxy server, use set_proxy().
dest_pair - 2-tuple of (IP/hostname, port).
"""
if len(dest_pair) != 2 or dest_pair[0].startswith("["):
# Probably IPv6, not supported -- raise an error, and hope
# Happy Eyeballs (RFC6555) makes sure at least the IPv4
# connection works...
raise socket.error("PySocks doesn't support IPv6: %s" % str(dest_pair))
dest_addr, dest_port = dest_pair
if self.type == socket.SOCK_DGRAM:
if not self._proxyconn:
self.bind(("", 0))
dest_addr = socket.gethostbyname(dest_addr)
# If the host address is INADDR_ANY or similar, reset the peer
# address so that packets are received from any peer
if dest_addr == "0.0.0.0" and not dest_port:
self.proxy_peername = None
else:
self.proxy_peername = (dest_addr, dest_port)
return
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
# Do a minimal input check first
if (not isinstance(dest_pair, (list, tuple))
or len(dest_pair) != 2
or not dest_addr
or not isinstance(dest_port, int)):
raise GeneralProxyError("Invalid destination-connection (host, port) pair")
# We set the timeout here so that we don't hang in connection or during
# negotiation.
super(socksocket, self).settimeout(self._timeout)
if proxy_type is None:
# Treat like regular socket object
self.proxy_peername = dest_pair
super(socksocket, self).settimeout(self._timeout)
super(socksocket, self).connect((dest_addr, dest_port))
return
proxy_addr = self._proxy_addr()
try:
# Initial connection to proxy server.
super(socksocket, self).connect(proxy_addr)
except socket.error as error:
# Error while connecting to proxy
self.close()
proxy_addr, proxy_port = proxy_addr
proxy_server = "{0}:{1}".format(proxy_addr, proxy_port)
printable_type = PRINTABLE_PROXY_TYPES[proxy_type]
msg = "Error connecting to {0} proxy {1}".format(printable_type,
proxy_server)
log.debug("%s due to: %s", msg, error)
raise ProxyConnectionError(msg, error)
else:
# Connected to proxy server, now negotiate
try:
# Calls negotiate_{SOCKS4, SOCKS5, HTTP}
negotiate = self._proxy_negotiators[proxy_type]
negotiate(self, dest_addr, dest_port)
except socket.error as error:
# Wrap socket errors
self.close()
raise GeneralProxyError("Socket error", error)
except ProxyError:
# Protocol error while negotiating with proxy
self.close()
raise
def _proxy_addr(self):
"""
Return proxy address to connect to as tuple object
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type)
if not proxy_port:
raise GeneralProxyError("Invalid proxy type")
return proxy_addr, proxy_port
|
JinnLynn/genpac
|
genpac/pysocks/socks.py
|
socksocket._negotiate_HTTP
|
python
|
def _negotiate_HTTP(self, dest_addr, dest_port):
proxy_type, addr, port, rdns, username, password = self.proxy
# If we need to resolve locally, we do this now
addr = dest_addr if rdns else socket.gethostbyname(dest_addr)
http_headers = [
b"CONNECT " + addr.encode('idna') + b":" + str(dest_port).encode() + b" HTTP/1.1",
b"Host: " + dest_addr.encode('idna')
]
if username and password:
http_headers.append(b"Proxy-Authorization: basic " + b64encode(username + b":" + password))
http_headers.append(b"\r\n")
self.sendall(b"\r\n".join(http_headers))
# We just need the first line to check if the connection was successful
fobj = self.makefile()
status_line = fobj.readline()
fobj.close()
if not status_line:
raise GeneralProxyError("Connection closed unexpectedly")
try:
proto, status_code, status_msg = status_line.split(" ", 2)
except ValueError:
raise GeneralProxyError("HTTP proxy server sent invalid response")
if not proto.startswith("HTTP/"):
raise GeneralProxyError("Proxy server does not appear to be an HTTP proxy")
try:
status_code = int(status_code)
except ValueError:
raise HTTPError("HTTP proxy server did not return a valid HTTP status")
if status_code != 200:
error = "{0}: {1}".format(status_code, status_msg)
if status_code in (400, 403, 405):
# It's likely that the HTTP proxy server does not support the CONNECT tunneling method
error += ("\n[*] Note: The HTTP proxy server may not be supported by PySocks"
" (must be a CONNECT tunnel proxy)")
raise HTTPError(error)
self.proxy_sockname = (b"0.0.0.0", 0)
self.proxy_peername = addr, dest_port
|
Negotiates a connection through an HTTP server.
NOTE: This currently only supports HTTP CONNECT-style proxies.
|
train
|
https://github.com/JinnLynn/genpac/blob/2f466d28f403a9a5624e02edcd538475fe475fc8/genpac/pysocks/socks.py#L679-L731
| null |
class socksocket(_BaseSocket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET and proto=0.
The "type" argument must be either SOCK_STREAM or SOCK_DGRAM.
"""
default_proxy = None
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, *args, **kwargs):
if type not in (socket.SOCK_STREAM, socket.SOCK_DGRAM):
msg = "Socket type must be stream or datagram, not {!r}"
raise ValueError(msg.format(type))
super(socksocket, self).__init__(family, type, proto, *args, **kwargs)
self._proxyconn = None # TCP connection to keep UDP relay alive
if self.default_proxy:
self.proxy = self.default_proxy
else:
self.proxy = (None, None, None, None, None, None)
self.proxy_sockname = None
self.proxy_peername = None
self._timeout = None
def _readall(self, file, count):
"""
Receive EXACTLY the number of bytes requested from the file object.
Blocks until the required number of bytes have been received.
"""
data = b""
while len(data) < count:
d = file.read(count - len(data))
if not d:
raise GeneralProxyError("Connection closed unexpectedly")
data += d
return data
def settimeout(self, timeout):
self._timeout = timeout
try:
# test if we're connected, if so apply timeout
peer = self.get_proxy_peername()
super(socksocket, self).settimeout(self._timeout)
except socket.error:
pass
def gettimeout(self):
return self._timeout
def setblocking(self, v):
if v:
self.settimeout(None)
else:
self.settimeout(0.0)
def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
"""set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxy_type - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be performed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.proxy = (proxy_type, addr, port, rdns,
username.encode() if username else None,
password.encode() if password else None)
def setproxy(self, *args, **kwargs):
if 'proxytype' in kwargs:
kwargs['proxy_type'] = kwargs.pop('proxytype')
return self.set_proxy(*args, **kwargs)
def bind(self, *pos, **kw):
"""
Implements proxy connection for UDP sockets,
which happens during the bind() phase.
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
if not proxy_type or self.type != socket.SOCK_DGRAM:
return _orig_socket.bind(self, *pos, **kw)
if self._proxyconn:
raise socket.error(EINVAL, "Socket already bound to an address")
if proxy_type != SOCKS5:
msg = "UDP only supported by SOCKS5 proxy type"
raise socket.error(EOPNOTSUPP, msg)
super(socksocket, self).bind(*pos, **kw)
# Need to specify actual local port because
# some relays drop packets if a port of zero is specified.
# Avoid specifying host address in case of NAT though.
_, port = self.getsockname()
dst = ("0", port)
self._proxyconn = _orig_socket()
proxy = self._proxy_addr()
self._proxyconn.connect(proxy)
UDP_ASSOCIATE = b"\x03"
_, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst)
# The relay is most likely on the same host as the SOCKS proxy,
# but some proxies return a private IP address (10.x.y.z)
host, _ = proxy
_, port = relay
super(socksocket, self).connect((host, port))
super(socksocket, self).settimeout(self._timeout)
self.proxy_sockname = ("0.0.0.0", 0) # Unknown
def sendto(self, bytes, *args, **kwargs):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).sendto(bytes, *args, **kwargs)
if not self._proxyconn:
self.bind(("", 0))
address = args[-1]
flags = args[:-1]
header = BytesIO()
RSV = b"\x00\x00"
header.write(RSV)
STANDALONE = b"\x00"
header.write(STANDALONE)
self._write_SOCKS5_address(address, header)
sent = super(socksocket, self).send(header.getvalue() + bytes, *flags, **kwargs)
return sent - header.tell()
def send(self, bytes, flags=0, **kwargs):
if self.type == socket.SOCK_DGRAM:
return self.sendto(bytes, flags, self.proxy_peername, **kwargs)
else:
return super(socksocket, self).send(bytes, flags, **kwargs)
def recvfrom(self, bufsize, flags=0):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).recvfrom(bufsize, flags)
if not self._proxyconn:
self.bind(("", 0))
buf = BytesIO(super(socksocket, self).recv(bufsize + 1024, flags))
buf.seek(2, SEEK_CUR)
frag = buf.read(1)
if ord(frag):
raise NotImplementedError("Received UDP packet fragment")
fromhost, fromport = self._read_SOCKS5_address(buf)
if self.proxy_peername:
peerhost, peerport = self.proxy_peername
if fromhost != peerhost or peerport not in (0, fromport):
raise socket.error(EAGAIN, "Packet filtered")
return (buf.read(bufsize), (fromhost, fromport))
def recv(self, *pos, **kw):
bytes, _ = self.recvfrom(*pos, **kw)
return bytes
def close(self):
if self._proxyconn:
self._proxyconn.close()
return super(socksocket, self).close()
def get_proxy_sockname(self):
"""
Returns the bound IP address and port number at the proxy.
"""
return self.proxy_sockname
getproxysockname = get_proxy_sockname
def get_proxy_peername(self):
"""
Returns the IP and port number of the proxy.
"""
return super(socksocket, self).getpeername()
getproxypeername = get_proxy_peername
def get_peername(self):
"""
Returns the IP address and port number of the destination
machine (note: get_proxy_peername returns the proxy)
"""
return self.proxy_peername
getpeername = get_peername
def _negotiate_SOCKS5(self, *dest_addr):
"""
Negotiates a stream connection through a SOCKS5 server.
"""
CONNECT = b"\x01"
self.proxy_peername, self.proxy_sockname = self._SOCKS5_request(self,
CONNECT, dest_addr)
def _SOCKS5_request(self, conn, cmd, dst):
"""
Send SOCKS5 request with given command (CMD field) and
address (DST field). Returns resolved DST address that was used.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = conn.makefile("wb")
reader = conn.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# First we'll send the authentication packages we support.
if username and password:
# The username/password details were supplied to the
# set_proxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
writer.write(b"\x05\x02\x00\x02")
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
writer.write(b"\x05\x01\x00")
# We'll receive the server's response to determine which
# method was selected
writer.flush()
chosen_auth = self._readall(reader, 2)
if chosen_auth[0:1] != b"\x05":
# Note: string[i:i+1] is used because indexing of a bytestring
# via bytestring[i] yields an integer in Python 3
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Check the chosen authentication method
if chosen_auth[1:2] == b"\x02":
# Okay, we need to perform a basic username/password
# authentication.
writer.write(b"\x01" + chr(len(username)).encode()
+ username
+ chr(len(password)).encode()
+ password)
writer.flush()
auth_status = self._readall(reader, 2)
if auth_status[0:1] != b"\x01":
# Bad response
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
if auth_status[1:2] != b"\x00":
# Authentication failed
raise SOCKS5AuthError("SOCKS5 authentication failed")
# Otherwise, authentication succeeded
# No authentication is required if 0x00
elif chosen_auth[1:2] != b"\x00":
# Reaching here is always bad
if chosen_auth[1:2] == b"\xFF":
raise SOCKS5AuthError("All offered SOCKS5 authentication methods were rejected")
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Now we can request the actual connection
writer.write(b"\x05" + cmd + b"\x00")
resolved = self._write_SOCKS5_address(dst, writer)
writer.flush()
# Get the response
resp = self._readall(reader, 3)
if resp[0:1] != b"\x05":
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x00:
# Connection failed: server returned an error
error = SOCKS5_ERRORS.get(status, "Unknown error")
raise SOCKS5Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
bnd = self._read_SOCKS5_address(reader)
super(socksocket, self).settimeout(self._timeout)
return (resolved, bnd)
finally:
reader.close()
writer.close()
def _write_SOCKS5_address(self, addr, file):
"""
Return the host and port packed for the SOCKS5 protocol,
and the resolved address as a tuple object.
"""
host, port = addr
proxy_type, _, _, rdns, username, password = self.proxy
family_to_byte = {socket.AF_INET: b"\x01", socket.AF_INET6: b"\x04"}
# If the given destination address is an IP address, we'll
# use the IP address request even if remote resolving was specified.
# Detect whether the address is IPv4/6 directly.
for family in (socket.AF_INET, socket.AF_INET6):
try:
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
except socket.error:
continue
# Well it's not an IP number, so it's probably a DNS name.
if rdns:
# Resolve remotely
host_bytes = host.encode('idna')
file.write(b"\x03" + chr(len(host_bytes)).encode() + host_bytes)
else:
# Resolve locally
addresses = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_ADDRCONFIG)
# We can't really work out what IP is reachable, so just pick the
# first.
target_addr = addresses[0]
family = target_addr[0]
host = target_addr[4][0]
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
def _read_SOCKS5_address(self, file):
atyp = self._readall(file, 1)
if atyp == b"\x01":
addr = socket.inet_ntoa(self._readall(file, 4))
elif atyp == b"\x03":
length = self._readall(file, 1)
addr = self._readall(file, ord(length))
elif atyp == b"\x04":
addr = socket.inet_ntop(socket.AF_INET6, self._readall(file, 16))
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
port = struct.unpack(">H", self._readall(file, 2))[0]
return addr, port
def _negotiate_SOCKS4(self, dest_addr, dest_port):
"""
Negotiates a connection through a SOCKS4 server.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = self.makefile("wb")
reader = self.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# Check if the destination address provided is an IP address
remote_resolve = False
try:
addr_bytes = socket.inet_aton(dest_addr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if rdns:
addr_bytes = b"\x00\x00\x00\x01"
remote_resolve = True
else:
addr_bytes = socket.inet_aton(socket.gethostbyname(dest_addr))
# Construct the request packet
writer.write(struct.pack(">BBH", 0x04, 0x01, dest_port))
writer.write(addr_bytes)
# The username parameter is considered userid for SOCKS4
if username:
writer.write(username)
writer.write(b"\x00")
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if remote_resolve:
writer.write(dest_addr.encode('idna') + b"\x00")
writer.flush()
# Get the response from the server
resp = self._readall(reader, 8)
if resp[0:1] != b"\x00":
# Bad data
raise GeneralProxyError("SOCKS4 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x5A:
# Connection failed: server returned an error
error = SOCKS4_ERRORS.get(status, "Unknown error")
raise SOCKS4Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
self.proxy_sockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if remote_resolve:
self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port
else:
self.proxy_peername = dest_addr, dest_port
finally:
reader.close()
writer.close()
_proxy_negotiators = {
SOCKS4: _negotiate_SOCKS4,
SOCKS5: _negotiate_SOCKS5,
HTTP: _negotiate_HTTP
}
@set_self_blocking
def connect(self, dest_pair):
"""
Connects to the specified destination through a proxy.
Uses the same API as socket's connect().
To select the proxy server, use set_proxy().
dest_pair - 2-tuple of (IP/hostname, port).
"""
if len(dest_pair) != 2 or dest_pair[0].startswith("["):
# Probably IPv6, not supported -- raise an error, and hope
# Happy Eyeballs (RFC6555) makes sure at least the IPv4
# connection works...
raise socket.error("PySocks doesn't support IPv6: %s" % str(dest_pair))
dest_addr, dest_port = dest_pair
if self.type == socket.SOCK_DGRAM:
if not self._proxyconn:
self.bind(("", 0))
dest_addr = socket.gethostbyname(dest_addr)
# If the host address is INADDR_ANY or similar, reset the peer
# address so that packets are received from any peer
if dest_addr == "0.0.0.0" and not dest_port:
self.proxy_peername = None
else:
self.proxy_peername = (dest_addr, dest_port)
return
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
# Do a minimal input check first
if (not isinstance(dest_pair, (list, tuple))
or len(dest_pair) != 2
or not dest_addr
or not isinstance(dest_port, int)):
raise GeneralProxyError("Invalid destination-connection (host, port) pair")
# We set the timeout here so that we don't hang in connection or during
# negotiation.
super(socksocket, self).settimeout(self._timeout)
if proxy_type is None:
# Treat like regular socket object
self.proxy_peername = dest_pair
super(socksocket, self).settimeout(self._timeout)
super(socksocket, self).connect((dest_addr, dest_port))
return
proxy_addr = self._proxy_addr()
try:
# Initial connection to proxy server.
super(socksocket, self).connect(proxy_addr)
except socket.error as error:
# Error while connecting to proxy
self.close()
proxy_addr, proxy_port = proxy_addr
proxy_server = "{0}:{1}".format(proxy_addr, proxy_port)
printable_type = PRINTABLE_PROXY_TYPES[proxy_type]
msg = "Error connecting to {0} proxy {1}".format(printable_type,
proxy_server)
log.debug("%s due to: %s", msg, error)
raise ProxyConnectionError(msg, error)
else:
# Connected to proxy server, now negotiate
try:
# Calls negotiate_{SOCKS4, SOCKS5, HTTP}
negotiate = self._proxy_negotiators[proxy_type]
negotiate(self, dest_addr, dest_port)
except socket.error as error:
# Wrap socket errors
self.close()
raise GeneralProxyError("Socket error", error)
except ProxyError:
# Protocol error while negotiating with proxy
self.close()
raise
def _proxy_addr(self):
"""
Return proxy address to connect to as tuple object
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type)
if not proxy_port:
raise GeneralProxyError("Invalid proxy type")
return proxy_addr, proxy_port
|
JinnLynn/genpac
|
genpac/pysocks/socks.py
|
socksocket.connect
|
python
|
def connect(self, dest_pair):
if len(dest_pair) != 2 or dest_pair[0].startswith("["):
# Probably IPv6, not supported -- raise an error, and hope
# Happy Eyeballs (RFC6555) makes sure at least the IPv4
# connection works...
raise socket.error("PySocks doesn't support IPv6: %s" % str(dest_pair))
dest_addr, dest_port = dest_pair
if self.type == socket.SOCK_DGRAM:
if not self._proxyconn:
self.bind(("", 0))
dest_addr = socket.gethostbyname(dest_addr)
# If the host address is INADDR_ANY or similar, reset the peer
# address so that packets are received from any peer
if dest_addr == "0.0.0.0" and not dest_port:
self.proxy_peername = None
else:
self.proxy_peername = (dest_addr, dest_port)
return
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
# Do a minimal input check first
if (not isinstance(dest_pair, (list, tuple))
or len(dest_pair) != 2
or not dest_addr
or not isinstance(dest_port, int)):
raise GeneralProxyError("Invalid destination-connection (host, port) pair")
# We set the timeout here so that we don't hang in connection or during
# negotiation.
super(socksocket, self).settimeout(self._timeout)
if proxy_type is None:
# Treat like regular socket object
self.proxy_peername = dest_pair
super(socksocket, self).settimeout(self._timeout)
super(socksocket, self).connect((dest_addr, dest_port))
return
proxy_addr = self._proxy_addr()
try:
# Initial connection to proxy server.
super(socksocket, self).connect(proxy_addr)
except socket.error as error:
# Error while connecting to proxy
self.close()
proxy_addr, proxy_port = proxy_addr
proxy_server = "{0}:{1}".format(proxy_addr, proxy_port)
printable_type = PRINTABLE_PROXY_TYPES[proxy_type]
msg = "Error connecting to {0} proxy {1}".format(printable_type,
proxy_server)
log.debug("%s due to: %s", msg, error)
raise ProxyConnectionError(msg, error)
else:
# Connected to proxy server, now negotiate
try:
# Calls negotiate_{SOCKS4, SOCKS5, HTTP}
negotiate = self._proxy_negotiators[proxy_type]
negotiate(self, dest_addr, dest_port)
except socket.error as error:
# Wrap socket errors
self.close()
raise GeneralProxyError("Socket error", error)
except ProxyError:
# Protocol error while negotiating with proxy
self.close()
raise
|
Connects to the specified destination through a proxy.
Uses the same API as socket's connect().
To select the proxy server, use set_proxy().
dest_pair - 2-tuple of (IP/hostname, port).
|
train
|
https://github.com/JinnLynn/genpac/blob/2f466d28f403a9a5624e02edcd538475fe475fc8/genpac/pysocks/socks.py#L740-L821
|
[
"def bind(self, *pos, **kw):\n \"\"\"\n Implements proxy connection for UDP sockets,\n which happens during the bind() phase.\n \"\"\"\n proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy\n if not proxy_type or self.type != socket.SOCK_DGRAM:\n return _orig_socket.bind(self, *pos, **kw)\n\n if self._proxyconn:\n raise socket.error(EINVAL, \"Socket already bound to an address\")\n if proxy_type != SOCKS5:\n msg = \"UDP only supported by SOCKS5 proxy type\"\n raise socket.error(EOPNOTSUPP, msg)\n super(socksocket, self).bind(*pos, **kw)\n\n # Need to specify actual local port because\n # some relays drop packets if a port of zero is specified.\n # Avoid specifying host address in case of NAT though.\n _, port = self.getsockname()\n dst = (\"0\", port)\n\n self._proxyconn = _orig_socket()\n proxy = self._proxy_addr()\n self._proxyconn.connect(proxy)\n\n UDP_ASSOCIATE = b\"\\x03\"\n _, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst)\n\n # The relay is most likely on the same host as the SOCKS proxy,\n # but some proxies return a private IP address (10.x.y.z)\n host, _ = proxy\n _, port = relay\n super(socksocket, self).connect((host, port))\n super(socksocket, self).settimeout(self._timeout)\n self.proxy_sockname = (\"0.0.0.0\", 0) # Unknown\n"
] |
class socksocket(_BaseSocket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET and proto=0.
The "type" argument must be either SOCK_STREAM or SOCK_DGRAM.
"""
default_proxy = None
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, *args, **kwargs):
if type not in (socket.SOCK_STREAM, socket.SOCK_DGRAM):
msg = "Socket type must be stream or datagram, not {!r}"
raise ValueError(msg.format(type))
super(socksocket, self).__init__(family, type, proto, *args, **kwargs)
self._proxyconn = None # TCP connection to keep UDP relay alive
if self.default_proxy:
self.proxy = self.default_proxy
else:
self.proxy = (None, None, None, None, None, None)
self.proxy_sockname = None
self.proxy_peername = None
self._timeout = None
def _readall(self, file, count):
"""
Receive EXACTLY the number of bytes requested from the file object.
Blocks until the required number of bytes have been received.
"""
data = b""
while len(data) < count:
d = file.read(count - len(data))
if not d:
raise GeneralProxyError("Connection closed unexpectedly")
data += d
return data
def settimeout(self, timeout):
self._timeout = timeout
try:
# test if we're connected, if so apply timeout
peer = self.get_proxy_peername()
super(socksocket, self).settimeout(self._timeout)
except socket.error:
pass
def gettimeout(self):
return self._timeout
def setblocking(self, v):
if v:
self.settimeout(None)
else:
self.settimeout(0.0)
def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
"""set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxy_type - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be performed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.proxy = (proxy_type, addr, port, rdns,
username.encode() if username else None,
password.encode() if password else None)
def setproxy(self, *args, **kwargs):
if 'proxytype' in kwargs:
kwargs['proxy_type'] = kwargs.pop('proxytype')
return self.set_proxy(*args, **kwargs)
def bind(self, *pos, **kw):
"""
Implements proxy connection for UDP sockets,
which happens during the bind() phase.
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
if not proxy_type or self.type != socket.SOCK_DGRAM:
return _orig_socket.bind(self, *pos, **kw)
if self._proxyconn:
raise socket.error(EINVAL, "Socket already bound to an address")
if proxy_type != SOCKS5:
msg = "UDP only supported by SOCKS5 proxy type"
raise socket.error(EOPNOTSUPP, msg)
super(socksocket, self).bind(*pos, **kw)
# Need to specify actual local port because
# some relays drop packets if a port of zero is specified.
# Avoid specifying host address in case of NAT though.
_, port = self.getsockname()
dst = ("0", port)
self._proxyconn = _orig_socket()
proxy = self._proxy_addr()
self._proxyconn.connect(proxy)
UDP_ASSOCIATE = b"\x03"
_, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst)
# The relay is most likely on the same host as the SOCKS proxy,
# but some proxies return a private IP address (10.x.y.z)
host, _ = proxy
_, port = relay
super(socksocket, self).connect((host, port))
super(socksocket, self).settimeout(self._timeout)
self.proxy_sockname = ("0.0.0.0", 0) # Unknown
def sendto(self, bytes, *args, **kwargs):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).sendto(bytes, *args, **kwargs)
if not self._proxyconn:
self.bind(("", 0))
address = args[-1]
flags = args[:-1]
header = BytesIO()
RSV = b"\x00\x00"
header.write(RSV)
STANDALONE = b"\x00"
header.write(STANDALONE)
self._write_SOCKS5_address(address, header)
sent = super(socksocket, self).send(header.getvalue() + bytes, *flags, **kwargs)
return sent - header.tell()
def send(self, bytes, flags=0, **kwargs):
if self.type == socket.SOCK_DGRAM:
return self.sendto(bytes, flags, self.proxy_peername, **kwargs)
else:
return super(socksocket, self).send(bytes, flags, **kwargs)
def recvfrom(self, bufsize, flags=0):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).recvfrom(bufsize, flags)
if not self._proxyconn:
self.bind(("", 0))
buf = BytesIO(super(socksocket, self).recv(bufsize + 1024, flags))
buf.seek(2, SEEK_CUR)
frag = buf.read(1)
if ord(frag):
raise NotImplementedError("Received UDP packet fragment")
fromhost, fromport = self._read_SOCKS5_address(buf)
if self.proxy_peername:
peerhost, peerport = self.proxy_peername
if fromhost != peerhost or peerport not in (0, fromport):
raise socket.error(EAGAIN, "Packet filtered")
return (buf.read(bufsize), (fromhost, fromport))
def recv(self, *pos, **kw):
bytes, _ = self.recvfrom(*pos, **kw)
return bytes
def close(self):
if self._proxyconn:
self._proxyconn.close()
return super(socksocket, self).close()
def get_proxy_sockname(self):
"""
Returns the bound IP address and port number at the proxy.
"""
return self.proxy_sockname
getproxysockname = get_proxy_sockname
def get_proxy_peername(self):
"""
Returns the IP and port number of the proxy.
"""
return super(socksocket, self).getpeername()
getproxypeername = get_proxy_peername
def get_peername(self):
"""
Returns the IP address and port number of the destination
machine (note: get_proxy_peername returns the proxy)
"""
return self.proxy_peername
getpeername = get_peername
def _negotiate_SOCKS5(self, *dest_addr):
"""
Negotiates a stream connection through a SOCKS5 server.
"""
CONNECT = b"\x01"
self.proxy_peername, self.proxy_sockname = self._SOCKS5_request(self,
CONNECT, dest_addr)
def _SOCKS5_request(self, conn, cmd, dst):
"""
Send SOCKS5 request with given command (CMD field) and
address (DST field). Returns resolved DST address that was used.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = conn.makefile("wb")
reader = conn.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# First we'll send the authentication packages we support.
if username and password:
# The username/password details were supplied to the
# set_proxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
writer.write(b"\x05\x02\x00\x02")
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
writer.write(b"\x05\x01\x00")
# We'll receive the server's response to determine which
# method was selected
writer.flush()
chosen_auth = self._readall(reader, 2)
if chosen_auth[0:1] != b"\x05":
# Note: string[i:i+1] is used because indexing of a bytestring
# via bytestring[i] yields an integer in Python 3
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Check the chosen authentication method
if chosen_auth[1:2] == b"\x02":
# Okay, we need to perform a basic username/password
# authentication.
writer.write(b"\x01" + chr(len(username)).encode()
+ username
+ chr(len(password)).encode()
+ password)
writer.flush()
auth_status = self._readall(reader, 2)
if auth_status[0:1] != b"\x01":
# Bad response
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
if auth_status[1:2] != b"\x00":
# Authentication failed
raise SOCKS5AuthError("SOCKS5 authentication failed")
# Otherwise, authentication succeeded
# No authentication is required if 0x00
elif chosen_auth[1:2] != b"\x00":
# Reaching here is always bad
if chosen_auth[1:2] == b"\xFF":
raise SOCKS5AuthError("All offered SOCKS5 authentication methods were rejected")
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Now we can request the actual connection
writer.write(b"\x05" + cmd + b"\x00")
resolved = self._write_SOCKS5_address(dst, writer)
writer.flush()
# Get the response
resp = self._readall(reader, 3)
if resp[0:1] != b"\x05":
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x00:
# Connection failed: server returned an error
error = SOCKS5_ERRORS.get(status, "Unknown error")
raise SOCKS5Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
bnd = self._read_SOCKS5_address(reader)
super(socksocket, self).settimeout(self._timeout)
return (resolved, bnd)
finally:
reader.close()
writer.close()
def _write_SOCKS5_address(self, addr, file):
"""
Return the host and port packed for the SOCKS5 protocol,
and the resolved address as a tuple object.
"""
host, port = addr
proxy_type, _, _, rdns, username, password = self.proxy
family_to_byte = {socket.AF_INET: b"\x01", socket.AF_INET6: b"\x04"}
# If the given destination address is an IP address, we'll
# use the IP address request even if remote resolving was specified.
# Detect whether the address is IPv4/6 directly.
for family in (socket.AF_INET, socket.AF_INET6):
try:
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
except socket.error:
continue
# Well it's not an IP number, so it's probably a DNS name.
if rdns:
# Resolve remotely
host_bytes = host.encode('idna')
file.write(b"\x03" + chr(len(host_bytes)).encode() + host_bytes)
else:
# Resolve locally
addresses = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_ADDRCONFIG)
# We can't really work out what IP is reachable, so just pick the
# first.
target_addr = addresses[0]
family = target_addr[0]
host = target_addr[4][0]
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
def _read_SOCKS5_address(self, file):
atyp = self._readall(file, 1)
if atyp == b"\x01":
addr = socket.inet_ntoa(self._readall(file, 4))
elif atyp == b"\x03":
length = self._readall(file, 1)
addr = self._readall(file, ord(length))
elif atyp == b"\x04":
addr = socket.inet_ntop(socket.AF_INET6, self._readall(file, 16))
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
port = struct.unpack(">H", self._readall(file, 2))[0]
return addr, port
def _negotiate_SOCKS4(self, dest_addr, dest_port):
"""
Negotiates a connection through a SOCKS4 server.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = self.makefile("wb")
reader = self.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# Check if the destination address provided is an IP address
remote_resolve = False
try:
addr_bytes = socket.inet_aton(dest_addr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if rdns:
addr_bytes = b"\x00\x00\x00\x01"
remote_resolve = True
else:
addr_bytes = socket.inet_aton(socket.gethostbyname(dest_addr))
# Construct the request packet
writer.write(struct.pack(">BBH", 0x04, 0x01, dest_port))
writer.write(addr_bytes)
# The username parameter is considered userid for SOCKS4
if username:
writer.write(username)
writer.write(b"\x00")
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if remote_resolve:
writer.write(dest_addr.encode('idna') + b"\x00")
writer.flush()
# Get the response from the server
resp = self._readall(reader, 8)
if resp[0:1] != b"\x00":
# Bad data
raise GeneralProxyError("SOCKS4 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x5A:
# Connection failed: server returned an error
error = SOCKS4_ERRORS.get(status, "Unknown error")
raise SOCKS4Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
self.proxy_sockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if remote_resolve:
self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port
else:
self.proxy_peername = dest_addr, dest_port
finally:
reader.close()
writer.close()
def _negotiate_HTTP(self, dest_addr, dest_port):
"""
Negotiates a connection through an HTTP server.
NOTE: This currently only supports HTTP CONNECT-style proxies.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
# If we need to resolve locally, we do this now
addr = dest_addr if rdns else socket.gethostbyname(dest_addr)
http_headers = [
b"CONNECT " + addr.encode('idna') + b":" + str(dest_port).encode() + b" HTTP/1.1",
b"Host: " + dest_addr.encode('idna')
]
if username and password:
http_headers.append(b"Proxy-Authorization: basic " + b64encode(username + b":" + password))
http_headers.append(b"\r\n")
self.sendall(b"\r\n".join(http_headers))
# We just need the first line to check if the connection was successful
fobj = self.makefile()
status_line = fobj.readline()
fobj.close()
if not status_line:
raise GeneralProxyError("Connection closed unexpectedly")
try:
proto, status_code, status_msg = status_line.split(" ", 2)
except ValueError:
raise GeneralProxyError("HTTP proxy server sent invalid response")
if not proto.startswith("HTTP/"):
raise GeneralProxyError("Proxy server does not appear to be an HTTP proxy")
try:
status_code = int(status_code)
except ValueError:
raise HTTPError("HTTP proxy server did not return a valid HTTP status")
if status_code != 200:
error = "{0}: {1}".format(status_code, status_msg)
if status_code in (400, 403, 405):
# It's likely that the HTTP proxy server does not support the CONNECT tunneling method
error += ("\n[*] Note: The HTTP proxy server may not be supported by PySocks"
" (must be a CONNECT tunnel proxy)")
raise HTTPError(error)
self.proxy_sockname = (b"0.0.0.0", 0)
self.proxy_peername = addr, dest_port
_proxy_negotiators = {
SOCKS4: _negotiate_SOCKS4,
SOCKS5: _negotiate_SOCKS5,
HTTP: _negotiate_HTTP
}
@set_self_blocking
def _proxy_addr(self):
"""
Return proxy address to connect to as tuple object
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type)
if not proxy_port:
raise GeneralProxyError("Invalid proxy type")
return proxy_addr, proxy_port
|
JinnLynn/genpac
|
genpac/pysocks/socks.py
|
socksocket._proxy_addr
|
python
|
def _proxy_addr(self):
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type)
if not proxy_port:
raise GeneralProxyError("Invalid proxy type")
return proxy_addr, proxy_port
|
Return proxy address to connect to as tuple object
|
train
|
https://github.com/JinnLynn/genpac/blob/2f466d28f403a9a5624e02edcd538475fe475fc8/genpac/pysocks/socks.py#L823-L831
| null |
class socksocket(_BaseSocket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET and proto=0.
The "type" argument must be either SOCK_STREAM or SOCK_DGRAM.
"""
default_proxy = None
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, *args, **kwargs):
if type not in (socket.SOCK_STREAM, socket.SOCK_DGRAM):
msg = "Socket type must be stream or datagram, not {!r}"
raise ValueError(msg.format(type))
super(socksocket, self).__init__(family, type, proto, *args, **kwargs)
self._proxyconn = None # TCP connection to keep UDP relay alive
if self.default_proxy:
self.proxy = self.default_proxy
else:
self.proxy = (None, None, None, None, None, None)
self.proxy_sockname = None
self.proxy_peername = None
self._timeout = None
def _readall(self, file, count):
"""
Receive EXACTLY the number of bytes requested from the file object.
Blocks until the required number of bytes have been received.
"""
data = b""
while len(data) < count:
d = file.read(count - len(data))
if not d:
raise GeneralProxyError("Connection closed unexpectedly")
data += d
return data
def settimeout(self, timeout):
self._timeout = timeout
try:
# test if we're connected, if so apply timeout
peer = self.get_proxy_peername()
super(socksocket, self).settimeout(self._timeout)
except socket.error:
pass
def gettimeout(self):
return self._timeout
def setblocking(self, v):
if v:
self.settimeout(None)
else:
self.settimeout(0.0)
def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
"""set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxy_type - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be performed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.proxy = (proxy_type, addr, port, rdns,
username.encode() if username else None,
password.encode() if password else None)
def setproxy(self, *args, **kwargs):
if 'proxytype' in kwargs:
kwargs['proxy_type'] = kwargs.pop('proxytype')
return self.set_proxy(*args, **kwargs)
def bind(self, *pos, **kw):
"""
Implements proxy connection for UDP sockets,
which happens during the bind() phase.
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
if not proxy_type or self.type != socket.SOCK_DGRAM:
return _orig_socket.bind(self, *pos, **kw)
if self._proxyconn:
raise socket.error(EINVAL, "Socket already bound to an address")
if proxy_type != SOCKS5:
msg = "UDP only supported by SOCKS5 proxy type"
raise socket.error(EOPNOTSUPP, msg)
super(socksocket, self).bind(*pos, **kw)
# Need to specify actual local port because
# some relays drop packets if a port of zero is specified.
# Avoid specifying host address in case of NAT though.
_, port = self.getsockname()
dst = ("0", port)
self._proxyconn = _orig_socket()
proxy = self._proxy_addr()
self._proxyconn.connect(proxy)
UDP_ASSOCIATE = b"\x03"
_, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst)
# The relay is most likely on the same host as the SOCKS proxy,
# but some proxies return a private IP address (10.x.y.z)
host, _ = proxy
_, port = relay
super(socksocket, self).connect((host, port))
super(socksocket, self).settimeout(self._timeout)
self.proxy_sockname = ("0.0.0.0", 0) # Unknown
def sendto(self, bytes, *args, **kwargs):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).sendto(bytes, *args, **kwargs)
if not self._proxyconn:
self.bind(("", 0))
address = args[-1]
flags = args[:-1]
header = BytesIO()
RSV = b"\x00\x00"
header.write(RSV)
STANDALONE = b"\x00"
header.write(STANDALONE)
self._write_SOCKS5_address(address, header)
sent = super(socksocket, self).send(header.getvalue() + bytes, *flags, **kwargs)
return sent - header.tell()
def send(self, bytes, flags=0, **kwargs):
if self.type == socket.SOCK_DGRAM:
return self.sendto(bytes, flags, self.proxy_peername, **kwargs)
else:
return super(socksocket, self).send(bytes, flags, **kwargs)
def recvfrom(self, bufsize, flags=0):
if self.type != socket.SOCK_DGRAM:
return super(socksocket, self).recvfrom(bufsize, flags)
if not self._proxyconn:
self.bind(("", 0))
buf = BytesIO(super(socksocket, self).recv(bufsize + 1024, flags))
buf.seek(2, SEEK_CUR)
frag = buf.read(1)
if ord(frag):
raise NotImplementedError("Received UDP packet fragment")
fromhost, fromport = self._read_SOCKS5_address(buf)
if self.proxy_peername:
peerhost, peerport = self.proxy_peername
if fromhost != peerhost or peerport not in (0, fromport):
raise socket.error(EAGAIN, "Packet filtered")
return (buf.read(bufsize), (fromhost, fromport))
def recv(self, *pos, **kw):
bytes, _ = self.recvfrom(*pos, **kw)
return bytes
def close(self):
if self._proxyconn:
self._proxyconn.close()
return super(socksocket, self).close()
def get_proxy_sockname(self):
"""
Returns the bound IP address and port number at the proxy.
"""
return self.proxy_sockname
getproxysockname = get_proxy_sockname
def get_proxy_peername(self):
"""
Returns the IP and port number of the proxy.
"""
return super(socksocket, self).getpeername()
getproxypeername = get_proxy_peername
def get_peername(self):
"""
Returns the IP address and port number of the destination
machine (note: get_proxy_peername returns the proxy)
"""
return self.proxy_peername
getpeername = get_peername
def _negotiate_SOCKS5(self, *dest_addr):
"""
Negotiates a stream connection through a SOCKS5 server.
"""
CONNECT = b"\x01"
self.proxy_peername, self.proxy_sockname = self._SOCKS5_request(self,
CONNECT, dest_addr)
def _SOCKS5_request(self, conn, cmd, dst):
"""
Send SOCKS5 request with given command (CMD field) and
address (DST field). Returns resolved DST address that was used.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = conn.makefile("wb")
reader = conn.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# First we'll send the authentication packages we support.
if username and password:
# The username/password details were supplied to the
# set_proxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
writer.write(b"\x05\x02\x00\x02")
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
writer.write(b"\x05\x01\x00")
# We'll receive the server's response to determine which
# method was selected
writer.flush()
chosen_auth = self._readall(reader, 2)
if chosen_auth[0:1] != b"\x05":
# Note: string[i:i+1] is used because indexing of a bytestring
# via bytestring[i] yields an integer in Python 3
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Check the chosen authentication method
if chosen_auth[1:2] == b"\x02":
# Okay, we need to perform a basic username/password
# authentication.
writer.write(b"\x01" + chr(len(username)).encode()
+ username
+ chr(len(password)).encode()
+ password)
writer.flush()
auth_status = self._readall(reader, 2)
if auth_status[0:1] != b"\x01":
# Bad response
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
if auth_status[1:2] != b"\x00":
# Authentication failed
raise SOCKS5AuthError("SOCKS5 authentication failed")
# Otherwise, authentication succeeded
# No authentication is required if 0x00
elif chosen_auth[1:2] != b"\x00":
# Reaching here is always bad
if chosen_auth[1:2] == b"\xFF":
raise SOCKS5AuthError("All offered SOCKS5 authentication methods were rejected")
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Now we can request the actual connection
writer.write(b"\x05" + cmd + b"\x00")
resolved = self._write_SOCKS5_address(dst, writer)
writer.flush()
# Get the response
resp = self._readall(reader, 3)
if resp[0:1] != b"\x05":
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x00:
# Connection failed: server returned an error
error = SOCKS5_ERRORS.get(status, "Unknown error")
raise SOCKS5Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
bnd = self._read_SOCKS5_address(reader)
super(socksocket, self).settimeout(self._timeout)
return (resolved, bnd)
finally:
reader.close()
writer.close()
def _write_SOCKS5_address(self, addr, file):
"""
Return the host and port packed for the SOCKS5 protocol,
and the resolved address as a tuple object.
"""
host, port = addr
proxy_type, _, _, rdns, username, password = self.proxy
family_to_byte = {socket.AF_INET: b"\x01", socket.AF_INET6: b"\x04"}
# If the given destination address is an IP address, we'll
# use the IP address request even if remote resolving was specified.
# Detect whether the address is IPv4/6 directly.
for family in (socket.AF_INET, socket.AF_INET6):
try:
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
except socket.error:
continue
# Well it's not an IP number, so it's probably a DNS name.
if rdns:
# Resolve remotely
host_bytes = host.encode('idna')
file.write(b"\x03" + chr(len(host_bytes)).encode() + host_bytes)
else:
# Resolve locally
addresses = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_ADDRCONFIG)
# We can't really work out what IP is reachable, so just pick the
# first.
target_addr = addresses[0]
family = target_addr[0]
host = target_addr[4][0]
addr_bytes = socket.inet_pton(family, host)
file.write(family_to_byte[family] + addr_bytes)
host = socket.inet_ntop(family, addr_bytes)
file.write(struct.pack(">H", port))
return host, port
def _read_SOCKS5_address(self, file):
atyp = self._readall(file, 1)
if atyp == b"\x01":
addr = socket.inet_ntoa(self._readall(file, 4))
elif atyp == b"\x03":
length = self._readall(file, 1)
addr = self._readall(file, ord(length))
elif atyp == b"\x04":
addr = socket.inet_ntop(socket.AF_INET6, self._readall(file, 16))
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
port = struct.unpack(">H", self._readall(file, 2))[0]
return addr, port
def _negotiate_SOCKS4(self, dest_addr, dest_port):
"""
Negotiates a connection through a SOCKS4 server.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = self.makefile("wb")
reader = self.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# Check if the destination address provided is an IP address
remote_resolve = False
try:
addr_bytes = socket.inet_aton(dest_addr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if rdns:
addr_bytes = b"\x00\x00\x00\x01"
remote_resolve = True
else:
addr_bytes = socket.inet_aton(socket.gethostbyname(dest_addr))
# Construct the request packet
writer.write(struct.pack(">BBH", 0x04, 0x01, dest_port))
writer.write(addr_bytes)
# The username parameter is considered userid for SOCKS4
if username:
writer.write(username)
writer.write(b"\x00")
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if remote_resolve:
writer.write(dest_addr.encode('idna') + b"\x00")
writer.flush()
# Get the response from the server
resp = self._readall(reader, 8)
if resp[0:1] != b"\x00":
# Bad data
raise GeneralProxyError("SOCKS4 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x5A:
# Connection failed: server returned an error
error = SOCKS4_ERRORS.get(status, "Unknown error")
raise SOCKS4Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
self.proxy_sockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if remote_resolve:
self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port
else:
self.proxy_peername = dest_addr, dest_port
finally:
reader.close()
writer.close()
def _negotiate_HTTP(self, dest_addr, dest_port):
"""
Negotiates a connection through an HTTP server.
NOTE: This currently only supports HTTP CONNECT-style proxies.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
# If we need to resolve locally, we do this now
addr = dest_addr if rdns else socket.gethostbyname(dest_addr)
http_headers = [
b"CONNECT " + addr.encode('idna') + b":" + str(dest_port).encode() + b" HTTP/1.1",
b"Host: " + dest_addr.encode('idna')
]
if username and password:
http_headers.append(b"Proxy-Authorization: basic " + b64encode(username + b":" + password))
http_headers.append(b"\r\n")
self.sendall(b"\r\n".join(http_headers))
# We just need the first line to check if the connection was successful
fobj = self.makefile()
status_line = fobj.readline()
fobj.close()
if not status_line:
raise GeneralProxyError("Connection closed unexpectedly")
try:
proto, status_code, status_msg = status_line.split(" ", 2)
except ValueError:
raise GeneralProxyError("HTTP proxy server sent invalid response")
if not proto.startswith("HTTP/"):
raise GeneralProxyError("Proxy server does not appear to be an HTTP proxy")
try:
status_code = int(status_code)
except ValueError:
raise HTTPError("HTTP proxy server did not return a valid HTTP status")
if status_code != 200:
error = "{0}: {1}".format(status_code, status_msg)
if status_code in (400, 403, 405):
# It's likely that the HTTP proxy server does not support the CONNECT tunneling method
error += ("\n[*] Note: The HTTP proxy server may not be supported by PySocks"
" (must be a CONNECT tunnel proxy)")
raise HTTPError(error)
self.proxy_sockname = (b"0.0.0.0", 0)
self.proxy_peername = addr, dest_port
_proxy_negotiators = {
SOCKS4: _negotiate_SOCKS4,
SOCKS5: _negotiate_SOCKS5,
HTTP: _negotiate_HTTP
}
@set_self_blocking
def connect(self, dest_pair):
"""
Connects to the specified destination through a proxy.
Uses the same API as socket's connect().
To select the proxy server, use set_proxy().
dest_pair - 2-tuple of (IP/hostname, port).
"""
if len(dest_pair) != 2 or dest_pair[0].startswith("["):
# Probably IPv6, not supported -- raise an error, and hope
# Happy Eyeballs (RFC6555) makes sure at least the IPv4
# connection works...
raise socket.error("PySocks doesn't support IPv6: %s" % str(dest_pair))
dest_addr, dest_port = dest_pair
if self.type == socket.SOCK_DGRAM:
if not self._proxyconn:
self.bind(("", 0))
dest_addr = socket.gethostbyname(dest_addr)
# If the host address is INADDR_ANY or similar, reset the peer
# address so that packets are received from any peer
if dest_addr == "0.0.0.0" and not dest_port:
self.proxy_peername = None
else:
self.proxy_peername = (dest_addr, dest_port)
return
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
# Do a minimal input check first
if (not isinstance(dest_pair, (list, tuple))
or len(dest_pair) != 2
or not dest_addr
or not isinstance(dest_port, int)):
raise GeneralProxyError("Invalid destination-connection (host, port) pair")
# We set the timeout here so that we don't hang in connection or during
# negotiation.
super(socksocket, self).settimeout(self._timeout)
if proxy_type is None:
# Treat like regular socket object
self.proxy_peername = dest_pair
super(socksocket, self).settimeout(self._timeout)
super(socksocket, self).connect((dest_addr, dest_port))
return
proxy_addr = self._proxy_addr()
try:
# Initial connection to proxy server.
super(socksocket, self).connect(proxy_addr)
except socket.error as error:
# Error while connecting to proxy
self.close()
proxy_addr, proxy_port = proxy_addr
proxy_server = "{0}:{1}".format(proxy_addr, proxy_port)
printable_type = PRINTABLE_PROXY_TYPES[proxy_type]
msg = "Error connecting to {0} proxy {1}".format(printable_type,
proxy_server)
log.debug("%s due to: %s", msg, error)
raise ProxyConnectionError(msg, error)
else:
# Connected to proxy server, now negotiate
try:
# Calls negotiate_{SOCKS4, SOCKS5, HTTP}
negotiate = self._proxy_negotiators[proxy_type]
negotiate(self, dest_addr, dest_port)
except socket.error as error:
# Wrap socket errors
self.close()
raise GeneralProxyError("Socket error", error)
except ProxyError:
# Protocol error while negotiating with proxy
self.close()
raise
|
JinnLynn/genpac
|
genpac/publicsuffixlist/__init__.py
|
PublicSuffixList._parse
|
python
|
def _parse(self, source, accept_encoded_idn, only_icann=False):
publicsuffix = set()
maxlabel = 0
section_is_icann = None
if isinstance(source, decodablestr):
source = source.splitlines()
ln = 0
for line in source:
ln += 1
if only_icann:
ul = u(line).rstrip()
if ul == "// ===BEGIN ICANN DOMAINS===":
section_is_icann = True
continue
elif ul == "// ===END ICANN DOMAINS===":
section_is_icann = False
continue
if not section_is_icann:
continue
s = u(line).lower().split(" ")[0].rstrip()
if s == "" or s.startswith("//"):
continue
maxlabel = max(maxlabel, s.count(".") + 1)
publicsuffix.add(s)
if accept_encoded_idn:
e = encode_idn(s.lstrip("!"))
if s[0] == "!":
publicsuffix.add("!" + e)
else:
publicsuffix.add(e)
self._publicsuffix = frozenset(publicsuffix)
self._maxlabel = maxlabel
|
PSL parser core
|
train
|
https://github.com/JinnLynn/genpac/blob/2f466d28f403a9a5624e02edcd538475fe475fc8/genpac/publicsuffixlist/__init__.py#L81-L119
|
[
"def u(s):\n return s if isinstance(s, str) else s.decode(ENCODING)\n",
"def encode_idn(domain):\n return u(domain).encode(\"idna\").decode(\"ascii\")\n"
] |
class PublicSuffixList(object):
""" PublicSuffixList parser.
After __init__(), all instance methods become thread-safe.
Most methods accept str or unicode as input in Python 2.x, str (not bytes) in Python 3.x.
"""
def __init__(self, source=None, accept_unknown=True, accept_encoded_idn=True,
only_icann=False):
""" Parse PSL source file and Return PSL object
source: file (line iterable) object, or flat str to parse. (Default: built-in PSL file)
accept_unknown: bool, assume unknown TLDs to be public suffix. (Default: True)
accept_encoded_idn: bool, if False, do not generate punycoded version of PSL.
Without punycoded PSL object, parseing punycoded IDN cause incorrect results. (Default: True)
only_icann: bool, if True, only ICANN suffixes are honored, not private ones.
The markers '// ===BEGIN ICANN DOMAINS===' and '// ===END ICANN DOMAINS==='
are needed for ICANN section detection. (Default: False)
"""
self.accept_unknown = accept_unknown
if source is None:
try:
source = open(PSLFILE, "rb")
self._parse(source, accept_encoded_idn, only_icann=only_icann)
finally:
if source:
source.close()
else:
self._parse(source, accept_encoded_idn, only_icann=only_icann)
def suffix(self, domain, accept_unknown=None):
""" Alias for privatesuffix """
return self.privatesuffix(domain, accept_unknown)
def privatesuffix(self, domain, accept_unknown=None):
""" Return shortest suffix assigned for an individual.
domain: str or unicode to parse. (Required)
accept_unknown: bool, assume unknown TLDs to be public suffix. (Default: object default)
Return None if domain has invalid format.
Return None if domain has no private part.
"""
if accept_unknown is None:
accept_unknown = self.accept_unknown
if not isinstance(domain, basestr):
raise TypeError()
labels = domain.lower().rsplit(".", self._maxlabel + 2)
ll = len(labels)
if "\0" in domain or "" in labels:
# not a valid domain
return None
if ll <= 1:
# is TLD
return None
# skip labels longer than rules
for i in range(max(0, ll - self._maxlabel), ll):
s = ".".join(labels[i:])
if i > 0 and ("!*." + s) in self._publicsuffix:
return ".".join(labels[i-1:])
if ("!" + s) in self._publicsuffix:
# exact private match
return s
if i > 0 and ("*." + s) in self._publicsuffix:
if i <= 1:
# domain is publicsuffix
return None
else:
return ".".join(labels[i-2:])
if s in self._publicsuffix:
if i > 0:
return ".".join(labels[i-1:])
else:
# domain is publicsuffix
return None
else:
# no match found
if self.accept_unknown and ll >= 2:
return ".".join(labels[-2:])
else:
return None
def publicsuffix(self, domain, accept_unknown=None):
""" Return longest publically shared suffix.
domain: str or unicode to parse. (Required)
accept_unknown: bool, assume unknown TLDs to be public suffix. (Default: object default)
Return None if domain has invalid format.
Return None if domain is not listed in PSL and accept_unknown is False.
"""
if accept_unknown is None:
accept_unknown = self.accept_unknown
if not isinstance(domain, basestr):
raise TypeError()
labels = domain.lower().rsplit(".", self._maxlabel + 2)
ll = len(labels)
if "\0" in domain or "" in labels:
# not a valid domain
return None
# shortcut for tld
if ll == 1:
if accept_unknown:
return domain
else:
return None
# skip labels longer than rules
for i in range(max(0, ll - self._maxlabel), ll):
s = ".".join(labels[i:])
if i > 0 and ("!*." + s) in self._publicsuffix:
return s
if ("!" + s) in self._publicsuffix:
# exact exclude
if i + 1 < ll:
return ".".join(labels[i+1:])
else:
return None
if i > 0 and ("*." + s) in self._publicsuffix:
return ".".join(labels[i-1:])
if s in self._publicsuffix:
return s
else:
# no match found
if accept_unknown:
return labels[-1]
else:
return None
def is_private(self, domain):
""" Return True if domain is private suffix or sub-domain. """
return self.suffix(domain) is not None
def is_public(self, domain):
""" Return True if domain is publix suffix. """
return self.publicsuffix(domain) == domain
def privateparts(self, domain):
""" Return tuple of labels and the private suffix. """
s = self.privatesuffix(domain)
if s is None:
return None
else:
# I know the domain is valid and ends with private suffix
pre = domain[0:-(len(s)+1)]
if pre == "":
return (s,)
else:
return tuple(pre.split(".") + [s])
def subdomain(self, domain, depth):
""" Return so-called subdomain of specified depth in the private suffix. """
p = self.privateparts(domain)
if p is None or depth > len(p) - 1:
return None
else:
return ".".join(p[-(depth+1):])
|
JinnLynn/genpac
|
genpac/publicsuffixlist/__init__.py
|
PublicSuffixList.privatesuffix
|
python
|
def privatesuffix(self, domain, accept_unknown=None):
if accept_unknown is None:
accept_unknown = self.accept_unknown
if not isinstance(domain, basestr):
raise TypeError()
labels = domain.lower().rsplit(".", self._maxlabel + 2)
ll = len(labels)
if "\0" in domain or "" in labels:
# not a valid domain
return None
if ll <= 1:
# is TLD
return None
# skip labels longer than rules
for i in range(max(0, ll - self._maxlabel), ll):
s = ".".join(labels[i:])
if i > 0 and ("!*." + s) in self._publicsuffix:
return ".".join(labels[i-1:])
if ("!" + s) in self._publicsuffix:
# exact private match
return s
if i > 0 and ("*." + s) in self._publicsuffix:
if i <= 1:
# domain is publicsuffix
return None
else:
return ".".join(labels[i-2:])
if s in self._publicsuffix:
if i > 0:
return ".".join(labels[i-1:])
else:
# domain is publicsuffix
return None
else:
# no match found
if self.accept_unknown and ll >= 2:
return ".".join(labels[-2:])
else:
return None
|
Return shortest suffix assigned for an individual.
domain: str or unicode to parse. (Required)
accept_unknown: bool, assume unknown TLDs to be public suffix. (Default: object default)
Return None if domain has invalid format.
Return None if domain has no private part.
|
train
|
https://github.com/JinnLynn/genpac/blob/2f466d28f403a9a5624e02edcd538475fe475fc8/genpac/publicsuffixlist/__init__.py#L125-L182
| null |
class PublicSuffixList(object):
""" PublicSuffixList parser.
After __init__(), all instance methods become thread-safe.
Most methods accept str or unicode as input in Python 2.x, str (not bytes) in Python 3.x.
"""
def __init__(self, source=None, accept_unknown=True, accept_encoded_idn=True,
only_icann=False):
""" Parse PSL source file and Return PSL object
source: file (line iterable) object, or flat str to parse. (Default: built-in PSL file)
accept_unknown: bool, assume unknown TLDs to be public suffix. (Default: True)
accept_encoded_idn: bool, if False, do not generate punycoded version of PSL.
Without punycoded PSL object, parseing punycoded IDN cause incorrect results. (Default: True)
only_icann: bool, if True, only ICANN suffixes are honored, not private ones.
The markers '// ===BEGIN ICANN DOMAINS===' and '// ===END ICANN DOMAINS==='
are needed for ICANN section detection. (Default: False)
"""
self.accept_unknown = accept_unknown
if source is None:
try:
source = open(PSLFILE, "rb")
self._parse(source, accept_encoded_idn, only_icann=only_icann)
finally:
if source:
source.close()
else:
self._parse(source, accept_encoded_idn, only_icann=only_icann)
def _parse(self, source, accept_encoded_idn, only_icann=False):
""" PSL parser core """
publicsuffix = set()
maxlabel = 0
section_is_icann = None
if isinstance(source, decodablestr):
source = source.splitlines()
ln = 0
for line in source:
ln += 1
if only_icann:
ul = u(line).rstrip()
if ul == "// ===BEGIN ICANN DOMAINS===":
section_is_icann = True
continue
elif ul == "// ===END ICANN DOMAINS===":
section_is_icann = False
continue
if not section_is_icann:
continue
s = u(line).lower().split(" ")[0].rstrip()
if s == "" or s.startswith("//"):
continue
maxlabel = max(maxlabel, s.count(".") + 1)
publicsuffix.add(s)
if accept_encoded_idn:
e = encode_idn(s.lstrip("!"))
if s[0] == "!":
publicsuffix.add("!" + e)
else:
publicsuffix.add(e)
self._publicsuffix = frozenset(publicsuffix)
self._maxlabel = maxlabel
def suffix(self, domain, accept_unknown=None):
""" Alias for privatesuffix """
return self.privatesuffix(domain, accept_unknown)
def publicsuffix(self, domain, accept_unknown=None):
""" Return longest publically shared suffix.
domain: str or unicode to parse. (Required)
accept_unknown: bool, assume unknown TLDs to be public suffix. (Default: object default)
Return None if domain has invalid format.
Return None if domain is not listed in PSL and accept_unknown is False.
"""
if accept_unknown is None:
accept_unknown = self.accept_unknown
if not isinstance(domain, basestr):
raise TypeError()
labels = domain.lower().rsplit(".", self._maxlabel + 2)
ll = len(labels)
if "\0" in domain or "" in labels:
# not a valid domain
return None
# shortcut for tld
if ll == 1:
if accept_unknown:
return domain
else:
return None
# skip labels longer than rules
for i in range(max(0, ll - self._maxlabel), ll):
s = ".".join(labels[i:])
if i > 0 and ("!*." + s) in self._publicsuffix:
return s
if ("!" + s) in self._publicsuffix:
# exact exclude
if i + 1 < ll:
return ".".join(labels[i+1:])
else:
return None
if i > 0 and ("*." + s) in self._publicsuffix:
return ".".join(labels[i-1:])
if s in self._publicsuffix:
return s
else:
# no match found
if accept_unknown:
return labels[-1]
else:
return None
def is_private(self, domain):
""" Return True if domain is private suffix or sub-domain. """
return self.suffix(domain) is not None
def is_public(self, domain):
""" Return True if domain is publix suffix. """
return self.publicsuffix(domain) == domain
def privateparts(self, domain):
""" Return tuple of labels and the private suffix. """
s = self.privatesuffix(domain)
if s is None:
return None
else:
# I know the domain is valid and ends with private suffix
pre = domain[0:-(len(s)+1)]
if pre == "":
return (s,)
else:
return tuple(pre.split(".") + [s])
def subdomain(self, domain, depth):
""" Return so-called subdomain of specified depth in the private suffix. """
p = self.privateparts(domain)
if p is None or depth > len(p) - 1:
return None
else:
return ".".join(p[-(depth+1):])
|
JinnLynn/genpac
|
genpac/publicsuffixlist/__init__.py
|
PublicSuffixList.privateparts
|
python
|
def privateparts(self, domain):
s = self.privatesuffix(domain)
if s is None:
return None
else:
# I know the domain is valid and ends with private suffix
pre = domain[0:-(len(s)+1)]
if pre == "":
return (s,)
else:
return tuple(pre.split(".") + [s])
|
Return tuple of labels and the private suffix.
|
train
|
https://github.com/JinnLynn/genpac/blob/2f466d28f403a9a5624e02edcd538475fe475fc8/genpac/publicsuffixlist/__init__.py#L249-L260
|
[
"def privatesuffix(self, domain, accept_unknown=None):\n \"\"\" Return shortest suffix assigned for an individual.\n\n domain: str or unicode to parse. (Required)\n accept_unknown: bool, assume unknown TLDs to be public suffix. (Default: object default)\n\n Return None if domain has invalid format.\n Return None if domain has no private part.\n \"\"\"\n\n if accept_unknown is None:\n accept_unknown = self.accept_unknown\n\n if not isinstance(domain, basestr):\n raise TypeError()\n\n labels = domain.lower().rsplit(\".\", self._maxlabel + 2)\n ll = len(labels)\n\n if \"\\0\" in domain or \"\" in labels:\n # not a valid domain\n return None\n\n if ll <= 1:\n # is TLD\n return None\n\n # skip labels longer than rules\n for i in range(max(0, ll - self._maxlabel), ll):\n s = \".\".join(labels[i:])\n\n if i > 0 and (\"!*.\" + s) in self._publicsuffix:\n return \".\".join(labels[i-1:])\n\n if (\"!\" + s) in self._publicsuffix:\n # exact private match\n return s\n\n if i > 0 and (\"*.\" + s) in self._publicsuffix:\n if i <= 1:\n # domain is publicsuffix\n return None\n else:\n return \".\".join(labels[i-2:])\n\n if s in self._publicsuffix:\n if i > 0:\n return \".\".join(labels[i-1:])\n else:\n # domain is publicsuffix\n return None\n"
] |
class PublicSuffixList(object):
""" PublicSuffixList parser.
After __init__(), all instance methods become thread-safe.
Most methods accept str or unicode as input in Python 2.x, str (not bytes) in Python 3.x.
"""
def __init__(self, source=None, accept_unknown=True, accept_encoded_idn=True,
only_icann=False):
""" Parse PSL source file and Return PSL object
source: file (line iterable) object, or flat str to parse. (Default: built-in PSL file)
accept_unknown: bool, assume unknown TLDs to be public suffix. (Default: True)
accept_encoded_idn: bool, if False, do not generate punycoded version of PSL.
Without punycoded PSL object, parseing punycoded IDN cause incorrect results. (Default: True)
only_icann: bool, if True, only ICANN suffixes are honored, not private ones.
The markers '// ===BEGIN ICANN DOMAINS===' and '// ===END ICANN DOMAINS==='
are needed for ICANN section detection. (Default: False)
"""
self.accept_unknown = accept_unknown
if source is None:
try:
source = open(PSLFILE, "rb")
self._parse(source, accept_encoded_idn, only_icann=only_icann)
finally:
if source:
source.close()
else:
self._parse(source, accept_encoded_idn, only_icann=only_icann)
def _parse(self, source, accept_encoded_idn, only_icann=False):
""" PSL parser core """
publicsuffix = set()
maxlabel = 0
section_is_icann = None
if isinstance(source, decodablestr):
source = source.splitlines()
ln = 0
for line in source:
ln += 1
if only_icann:
ul = u(line).rstrip()
if ul == "// ===BEGIN ICANN DOMAINS===":
section_is_icann = True
continue
elif ul == "// ===END ICANN DOMAINS===":
section_is_icann = False
continue
if not section_is_icann:
continue
s = u(line).lower().split(" ")[0].rstrip()
if s == "" or s.startswith("//"):
continue
maxlabel = max(maxlabel, s.count(".") + 1)
publicsuffix.add(s)
if accept_encoded_idn:
e = encode_idn(s.lstrip("!"))
if s[0] == "!":
publicsuffix.add("!" + e)
else:
publicsuffix.add(e)
self._publicsuffix = frozenset(publicsuffix)
self._maxlabel = maxlabel
def suffix(self, domain, accept_unknown=None):
""" Alias for privatesuffix """
return self.privatesuffix(domain, accept_unknown)
def privatesuffix(self, domain, accept_unknown=None):
""" Return shortest suffix assigned for an individual.
domain: str or unicode to parse. (Required)
accept_unknown: bool, assume unknown TLDs to be public suffix. (Default: object default)
Return None if domain has invalid format.
Return None if domain has no private part.
"""
if accept_unknown is None:
accept_unknown = self.accept_unknown
if not isinstance(domain, basestr):
raise TypeError()
labels = domain.lower().rsplit(".", self._maxlabel + 2)
ll = len(labels)
if "\0" in domain or "" in labels:
# not a valid domain
return None
if ll <= 1:
# is TLD
return None
# skip labels longer than rules
for i in range(max(0, ll - self._maxlabel), ll):
s = ".".join(labels[i:])
if i > 0 and ("!*." + s) in self._publicsuffix:
return ".".join(labels[i-1:])
if ("!" + s) in self._publicsuffix:
# exact private match
return s
if i > 0 and ("*." + s) in self._publicsuffix:
if i <= 1:
# domain is publicsuffix
return None
else:
return ".".join(labels[i-2:])
if s in self._publicsuffix:
if i > 0:
return ".".join(labels[i-1:])
else:
# domain is publicsuffix
return None
else:
# no match found
if self.accept_unknown and ll >= 2:
return ".".join(labels[-2:])
else:
return None
def publicsuffix(self, domain, accept_unknown=None):
""" Return longest publically shared suffix.
domain: str or unicode to parse. (Required)
accept_unknown: bool, assume unknown TLDs to be public suffix. (Default: object default)
Return None if domain has invalid format.
Return None if domain is not listed in PSL and accept_unknown is False.
"""
if accept_unknown is None:
accept_unknown = self.accept_unknown
if not isinstance(domain, basestr):
raise TypeError()
labels = domain.lower().rsplit(".", self._maxlabel + 2)
ll = len(labels)
if "\0" in domain or "" in labels:
# not a valid domain
return None
# shortcut for tld
if ll == 1:
if accept_unknown:
return domain
else:
return None
# skip labels longer than rules
for i in range(max(0, ll - self._maxlabel), ll):
s = ".".join(labels[i:])
if i > 0 and ("!*." + s) in self._publicsuffix:
return s
if ("!" + s) in self._publicsuffix:
# exact exclude
if i + 1 < ll:
return ".".join(labels[i+1:])
else:
return None
if i > 0 and ("*." + s) in self._publicsuffix:
return ".".join(labels[i-1:])
if s in self._publicsuffix:
return s
else:
# no match found
if accept_unknown:
return labels[-1]
else:
return None
def is_private(self, domain):
""" Return True if domain is private suffix or sub-domain. """
return self.suffix(domain) is not None
def is_public(self, domain):
""" Return True if domain is publix suffix. """
return self.publicsuffix(domain) == domain
def subdomain(self, domain, depth):
""" Return so-called subdomain of specified depth in the private suffix. """
p = self.privateparts(domain)
if p is None or depth > len(p) - 1:
return None
else:
return ".".join(p[-(depth+1):])
|
JinnLynn/genpac
|
genpac/publicsuffixlist/__init__.py
|
PublicSuffixList.subdomain
|
python
|
def subdomain(self, domain, depth):
p = self.privateparts(domain)
if p is None or depth > len(p) - 1:
return None
else:
return ".".join(p[-(depth+1):])
|
Return so-called subdomain of specified depth in the private suffix.
|
train
|
https://github.com/JinnLynn/genpac/blob/2f466d28f403a9a5624e02edcd538475fe475fc8/genpac/publicsuffixlist/__init__.py#L262-L268
|
[
"def privateparts(self, domain):\n \"\"\" Return tuple of labels and the private suffix. \"\"\"\n s = self.privatesuffix(domain)\n if s is None:\n return None\n else:\n # I know the domain is valid and ends with private suffix\n pre = domain[0:-(len(s)+1)]\n if pre == \"\":\n return (s,)\n else:\n return tuple(pre.split(\".\") + [s])\n"
] |
class PublicSuffixList(object):
""" PublicSuffixList parser.
After __init__(), all instance methods become thread-safe.
Most methods accept str or unicode as input in Python 2.x, str (not bytes) in Python 3.x.
"""
def __init__(self, source=None, accept_unknown=True, accept_encoded_idn=True,
only_icann=False):
""" Parse PSL source file and Return PSL object
source: file (line iterable) object, or flat str to parse. (Default: built-in PSL file)
accept_unknown: bool, assume unknown TLDs to be public suffix. (Default: True)
accept_encoded_idn: bool, if False, do not generate punycoded version of PSL.
Without punycoded PSL object, parseing punycoded IDN cause incorrect results. (Default: True)
only_icann: bool, if True, only ICANN suffixes are honored, not private ones.
The markers '// ===BEGIN ICANN DOMAINS===' and '// ===END ICANN DOMAINS==='
are needed for ICANN section detection. (Default: False)
"""
self.accept_unknown = accept_unknown
if source is None:
try:
source = open(PSLFILE, "rb")
self._parse(source, accept_encoded_idn, only_icann=only_icann)
finally:
if source:
source.close()
else:
self._parse(source, accept_encoded_idn, only_icann=only_icann)
def _parse(self, source, accept_encoded_idn, only_icann=False):
""" PSL parser core """
publicsuffix = set()
maxlabel = 0
section_is_icann = None
if isinstance(source, decodablestr):
source = source.splitlines()
ln = 0
for line in source:
ln += 1
if only_icann:
ul = u(line).rstrip()
if ul == "// ===BEGIN ICANN DOMAINS===":
section_is_icann = True
continue
elif ul == "// ===END ICANN DOMAINS===":
section_is_icann = False
continue
if not section_is_icann:
continue
s = u(line).lower().split(" ")[0].rstrip()
if s == "" or s.startswith("//"):
continue
maxlabel = max(maxlabel, s.count(".") + 1)
publicsuffix.add(s)
if accept_encoded_idn:
e = encode_idn(s.lstrip("!"))
if s[0] == "!":
publicsuffix.add("!" + e)
else:
publicsuffix.add(e)
self._publicsuffix = frozenset(publicsuffix)
self._maxlabel = maxlabel
def suffix(self, domain, accept_unknown=None):
""" Alias for privatesuffix """
return self.privatesuffix(domain, accept_unknown)
def privatesuffix(self, domain, accept_unknown=None):
""" Return shortest suffix assigned for an individual.
domain: str or unicode to parse. (Required)
accept_unknown: bool, assume unknown TLDs to be public suffix. (Default: object default)
Return None if domain has invalid format.
Return None if domain has no private part.
"""
if accept_unknown is None:
accept_unknown = self.accept_unknown
if not isinstance(domain, basestr):
raise TypeError()
labels = domain.lower().rsplit(".", self._maxlabel + 2)
ll = len(labels)
if "\0" in domain or "" in labels:
# not a valid domain
return None
if ll <= 1:
# is TLD
return None
# skip labels longer than rules
for i in range(max(0, ll - self._maxlabel), ll):
s = ".".join(labels[i:])
if i > 0 and ("!*." + s) in self._publicsuffix:
return ".".join(labels[i-1:])
if ("!" + s) in self._publicsuffix:
# exact private match
return s
if i > 0 and ("*." + s) in self._publicsuffix:
if i <= 1:
# domain is publicsuffix
return None
else:
return ".".join(labels[i-2:])
if s in self._publicsuffix:
if i > 0:
return ".".join(labels[i-1:])
else:
# domain is publicsuffix
return None
else:
# no match found
if self.accept_unknown and ll >= 2:
return ".".join(labels[-2:])
else:
return None
def publicsuffix(self, domain, accept_unknown=None):
""" Return longest publically shared suffix.
domain: str or unicode to parse. (Required)
accept_unknown: bool, assume unknown TLDs to be public suffix. (Default: object default)
Return None if domain has invalid format.
Return None if domain is not listed in PSL and accept_unknown is False.
"""
if accept_unknown is None:
accept_unknown = self.accept_unknown
if not isinstance(domain, basestr):
raise TypeError()
labels = domain.lower().rsplit(".", self._maxlabel + 2)
ll = len(labels)
if "\0" in domain or "" in labels:
# not a valid domain
return None
# shortcut for tld
if ll == 1:
if accept_unknown:
return domain
else:
return None
# skip labels longer than rules
for i in range(max(0, ll - self._maxlabel), ll):
s = ".".join(labels[i:])
if i > 0 and ("!*." + s) in self._publicsuffix:
return s
if ("!" + s) in self._publicsuffix:
# exact exclude
if i + 1 < ll:
return ".".join(labels[i+1:])
else:
return None
if i > 0 and ("*." + s) in self._publicsuffix:
return ".".join(labels[i-1:])
if s in self._publicsuffix:
return s
else:
# no match found
if accept_unknown:
return labels[-1]
else:
return None
def is_private(self, domain):
""" Return True if domain is private suffix or sub-domain. """
return self.suffix(domain) is not None
def is_public(self, domain):
""" Return True if domain is publix suffix. """
return self.publicsuffix(domain) == domain
def privateparts(self, domain):
""" Return tuple of labels and the private suffix. """
s = self.privatesuffix(domain)
if s is None:
return None
else:
# I know the domain is valid and ends with private suffix
pre = domain[0:-(len(s)+1)]
if pre == "":
return (s,)
else:
return tuple(pre.split(".") + [s])
|
JinnLynn/genpac
|
genpac/publicsuffixlist/update.py
|
updatePSL
|
python
|
def updatePSL(psl_file=PSLFILE):
if requests is None:
raise Exception("Please install python-requests http(s) library. $ sudo pip install requests")
r = requests.get(PSLURL)
if r.status_code != requests.codes.ok or len(r.content) == 0:
raise Exception("Could not download PSL from " + PSLURL)
lastmod = r.headers.get("last-modified", None)
f = open(psl_file + ".swp", "wb")
f.write(r.content)
f.close()
with open(psl_file + ".swp", "rb") as f:
psl = PublicSuffixList(f)
os.rename(psl_file + ".swp", psl_file)
if lastmod:
t = time.mktime(parsedate(lastmod))
os.utime(psl_file, (t, t))
print("PSL updated")
if lastmod:
print("last-modified: " + lastmod)
|
Updates a local copy of PSL file
:param psl_file: path for the file to store the list. Default: PSLFILE
|
train
|
https://github.com/JinnLynn/genpac/blob/2f466d28f403a9a5624e02edcd538475fe475fc8/genpac/publicsuffixlist/update.py#L22-L50
| null |
# -*- coding: utf-8 -*-
#
# Copyright 2014 ko-zu <causeless@gmail.com>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
import os
import time
from email.utils import parsedate
from publicsuffixlist import PSLFILE, PSLURL, PublicSuffixList
try:
import requests
except ImportError:
requests = None
if __name__ == "__main__":
updatePSL()
|
larsyencken/csvdiff
|
csvdiff/patch.py
|
apply
|
python
|
def apply(diff, recs, strict=True):
index_columns = diff['_index']
indexed = records.index(copy.deepcopy(list(recs)), index_columns)
_add_records(indexed, diff['added'], index_columns, strict=strict)
_remove_records(indexed, diff['removed'], index_columns, strict=strict)
_update_records(indexed, diff['changed'], strict=strict)
return records.sort(indexed.values())
|
Transform the records with the patch. May fail if the records do not
match those expected in the patch.
|
train
|
https://github.com/larsyencken/csvdiff/blob/163dd9da676a8e5f926a935803726340261f03ae/csvdiff/patch.py#L106-L116
|
[
"def index(record_seq: Iterator[Record], index_columns: List[str]) -> Index:\n if not index_columns:\n raise InvalidKeyError('must provide on or more columns to index on')\n\n try:\n obj = {\n tuple(r[i] for i in index_columns): r\n for r in record_seq\n }\n\n return obj\n\n except KeyError as k:\n raise InvalidKeyError('invalid column name {k} as key'.format(k=k))\n",
"def sort(records: Sequence[Record]) -> List[Record]:\n \"Sort records into a canonical order, suitable for comparison.\"\n return sorted(records, key=_record_key)\n",
"def _add_records(indexed, recs_to_add, index_columns, strict=True):\n indexed_to_add = records.index(recs_to_add, index_columns)\n for k, r in indexed_to_add.items():\n if strict and k in indexed:\n error.abort(\n 'error: key {0} already exists in source document'.format(k)\n )\n indexed[k] = r\n",
"def _remove_records(indexed, recs_to_remove, index_columns, strict=True):\n indexed_to_remove = records.index(recs_to_remove, index_columns)\n for k, r in indexed_to_remove.items():\n if strict:\n v = indexed.get(k)\n if v is None:\n error.abort(\n 'ERROR: key {0} does not exist in source '\n 'document'.format(k)\n )\n if v != r:\n error.abort(\n 'ERROR: source document version of {0} has '\n 'changed'.format(k)\n )\n\n del indexed[k]\n",
"def _update_records(indexed, deltas, strict=True):\n for delta in deltas:\n k = tuple(delta['key'])\n field_changes = delta['fields']\n\n r = indexed.get(k)\n\n # what happens when the record is missing?\n if r is None:\n if strict:\n error.abort(\n 'ERROR: source document is missing record '\n 'for {0}'.format(k)\n )\n continue\n\n r = indexed[k]\n for field, from_to in field_changes.items():\n expected = from_to['from']\n if strict and r.get(field) != expected:\n error.abort(\n 'ERROR: source document version of {0} has '\n 'changed {1} field'.format(k, field)\n )\n r[field] = from_to['to']\n"
] |
# -*- coding: utf-8 -*-
#
# patch.py
# csvdiff
#
"""
The the patch format.
"""
import sys
import json
import copy
import itertools
import jsonschema
from . import records
from . import error
SCHEMA = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'title': 'csvdiff',
'description': 'The patch format used by csvdiff.',
'type': 'object',
'properties': {
'_index': {
'type': 'array',
'minItems': 1,
'items': {'type': 'string'},
},
'added': {
'type': 'array',
'items': {'type': 'object',
'patternProperties': {
'^.*$': {'type': ['string', 'number']},
}},
},
'removed': {
'type': 'array',
'items': {'type': 'object',
'patternProperties': {
'^.*$': {'type': ['string', 'number']},
}},
},
'changed': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'key': {'type': 'array',
'items': {'type': ['string', 'number']},
'minItems': 1},
'fields': {
'type': 'object',
'minProperties': 1,
'patternProperties': {
'^.*$': {'type': 'object',
'properties': {
'from': {
'type': ['string', 'number']
},
'to': {
'type': ['string', 'number']
},
},
'required': ['from', 'to']},
},
},
},
'required': ['key', 'fields'],
},
},
},
'required': ['_index', 'added', 'changed', 'removed'],
}
def is_empty(diff):
"Are there any actual differences encoded in the delta?"
return not any([diff['added'], diff['changed'], diff['removed']])
def is_valid(diff):
"""
Validate the diff against the schema, returning True if it matches, False
otherwise.
"""
try:
validate(diff)
except jsonschema.ValidationError:
return False
return True
def validate(diff):
"""
Check the diff against the schema, raising an exception if it doesn't
match.
"""
return jsonschema.validate(diff, SCHEMA)
def _add_records(indexed, recs_to_add, index_columns, strict=True):
indexed_to_add = records.index(recs_to_add, index_columns)
for k, r in indexed_to_add.items():
if strict and k in indexed:
error.abort(
'error: key {0} already exists in source document'.format(k)
)
indexed[k] = r
def _remove_records(indexed, recs_to_remove, index_columns, strict=True):
indexed_to_remove = records.index(recs_to_remove, index_columns)
for k, r in indexed_to_remove.items():
if strict:
v = indexed.get(k)
if v is None:
error.abort(
'ERROR: key {0} does not exist in source '
'document'.format(k)
)
if v != r:
error.abort(
'ERROR: source document version of {0} has '
'changed'.format(k)
)
del indexed[k]
def _update_records(indexed, deltas, strict=True):
for delta in deltas:
k = tuple(delta['key'])
field_changes = delta['fields']
r = indexed.get(k)
# what happens when the record is missing?
if r is None:
if strict:
error.abort(
'ERROR: source document is missing record '
'for {0}'.format(k)
)
continue
r = indexed[k]
for field, from_to in field_changes.items():
expected = from_to['from']
if strict and r.get(field) != expected:
error.abort(
'ERROR: source document version of {0} has '
'changed {1} field'.format(k, field)
)
r[field] = from_to['to']
def load(istream, strict=True):
"Deserialize a patch object."
try:
diff = json.load(istream)
if strict:
jsonschema.validate(diff, SCHEMA)
except ValueError:
raise InvalidPatchError('patch is not valid JSON')
except jsonschema.exceptions.ValidationError as e:
raise InvalidPatchError(e.message)
return diff
def save(diff, stream=sys.stdout, compact=False):
"Serialize a patch object."
flags = {'sort_keys': True}
if not compact:
flags['indent'] = 2
json.dump(diff, stream, **flags)
def create(from_records, to_records, index_columns, ignore_columns=None):
"""
Diff two sets of records, using the index columns as the primary key for
both datasets.
"""
from_indexed = records.index(from_records, index_columns)
to_indexed = records.index(to_records, index_columns)
if ignore_columns is not None:
from_indexed = records.filter_ignored(from_indexed, ignore_columns)
to_indexed = records.filter_ignored(to_indexed, ignore_columns)
return create_indexed(from_indexed, to_indexed, index_columns)
def create_indexed(from_indexed, to_indexed, index_columns):
# examine keys for overlap
removed, added, shared = _compare_keys(from_indexed, to_indexed)
# check for changed rows
changed = _compare_rows(from_indexed, to_indexed, shared)
diff = _assemble(removed, added, changed, from_indexed, to_indexed,
index_columns)
return diff
def _compare_keys(from_recs, to_recs):
from_keys = set(from_recs)
to_keys = set(to_recs)
removed = from_keys.difference(to_keys)
shared = from_keys.intersection(to_keys)
added = to_keys.difference(from_keys)
return removed, added, shared
def _compare_rows(from_recs, to_recs, keys):
"Return the set of keys which have changed."
return set(
k for k in keys
if sorted(from_recs[k].items()) != sorted(to_recs[k].items())
)
def _assemble(removed, added, changed, from_recs, to_recs, index_columns):
diff = {}
diff['_index'] = index_columns
diff['added'] = records.sort(to_recs[k] for k in added)
diff['removed'] = records.sort(from_recs[k] for k in removed)
diff['changed'] = sorted(({'key': list(k),
'fields': record_diff(from_recs[k], to_recs[k])}
for k in changed),
key=_change_key)
return diff
def _change_key(c):
return tuple(c['key'])
def record_diff(lhs, rhs):
"Diff an individual row."
delta = {}
for k in set(lhs).union(rhs):
from_ = lhs[k]
to_ = rhs[k]
if from_ != to_:
delta[k] = {'from': from_, 'to': to_}
return delta
def is_typed(diff):
"Are any of the values in the diff typed?"
return any(type(v) != str for v in _iter_fields(diff))
def _iter_fields(diff):
return itertools.chain(
_iter_record_fields(diff['added']),
_iter_record_fields(diff['removed']),
_iter_change_fields(diff['changed']),
)
def _iter_change_fields(cs):
for c in cs:
for k in c['key']:
yield k
for v in c['fields'].values():
yield v['from']
yield v['to']
def _iter_record_fields(recs):
for r in recs:
for v in r.values():
yield v
class InvalidPatchError(Exception):
pass
def filter_significance(diff, significance):
"""
Prune any changes in the patch which are due to numeric changes less than this level of
significance.
"""
changed = diff['changed']
# remove individual field changes that are significant
reduced = [{'key': delta['key'],
'fields': {k: v
for k, v in delta['fields'].items()
if _is_significant(v, significance)}}
for delta in changed]
# call a key changed only if it still has significant changes
filtered = [delta for delta in reduced if delta['fields']]
diff = diff.copy()
diff['changed'] = filtered
return diff
def _is_significant(change, significance):
"""
Return True if a change is genuinely significant given our tolerance.
"""
try:
a = float(change['from'])
b = float(change['to'])
except ValueError:
return True
return abs(a - b) > 10 ** (-significance)
|
larsyencken/csvdiff
|
csvdiff/patch.py
|
load
|
python
|
def load(istream, strict=True):
"Deserialize a patch object."
try:
diff = json.load(istream)
if strict:
jsonschema.validate(diff, SCHEMA)
except ValueError:
raise InvalidPatchError('patch is not valid JSON')
except jsonschema.exceptions.ValidationError as e:
raise InvalidPatchError(e.message)
return diff
|
Deserialize a patch object.
|
train
|
https://github.com/larsyencken/csvdiff/blob/163dd9da676a8e5f926a935803726340261f03ae/csvdiff/patch.py#L175-L187
| null |
# -*- coding: utf-8 -*-
#
# patch.py
# csvdiff
#
"""
The the patch format.
"""
import sys
import json
import copy
import itertools
import jsonschema
from . import records
from . import error
SCHEMA = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'title': 'csvdiff',
'description': 'The patch format used by csvdiff.',
'type': 'object',
'properties': {
'_index': {
'type': 'array',
'minItems': 1,
'items': {'type': 'string'},
},
'added': {
'type': 'array',
'items': {'type': 'object',
'patternProperties': {
'^.*$': {'type': ['string', 'number']},
}},
},
'removed': {
'type': 'array',
'items': {'type': 'object',
'patternProperties': {
'^.*$': {'type': ['string', 'number']},
}},
},
'changed': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'key': {'type': 'array',
'items': {'type': ['string', 'number']},
'minItems': 1},
'fields': {
'type': 'object',
'minProperties': 1,
'patternProperties': {
'^.*$': {'type': 'object',
'properties': {
'from': {
'type': ['string', 'number']
},
'to': {
'type': ['string', 'number']
},
},
'required': ['from', 'to']},
},
},
},
'required': ['key', 'fields'],
},
},
},
'required': ['_index', 'added', 'changed', 'removed'],
}
def is_empty(diff):
"Are there any actual differences encoded in the delta?"
return not any([diff['added'], diff['changed'], diff['removed']])
def is_valid(diff):
"""
Validate the diff against the schema, returning True if it matches, False
otherwise.
"""
try:
validate(diff)
except jsonschema.ValidationError:
return False
return True
def validate(diff):
"""
Check the diff against the schema, raising an exception if it doesn't
match.
"""
return jsonschema.validate(diff, SCHEMA)
def apply(diff, recs, strict=True):
"""
Transform the records with the patch. May fail if the records do not
match those expected in the patch.
"""
index_columns = diff['_index']
indexed = records.index(copy.deepcopy(list(recs)), index_columns)
_add_records(indexed, diff['added'], index_columns, strict=strict)
_remove_records(indexed, diff['removed'], index_columns, strict=strict)
_update_records(indexed, diff['changed'], strict=strict)
return records.sort(indexed.values())
def _add_records(indexed, recs_to_add, index_columns, strict=True):
indexed_to_add = records.index(recs_to_add, index_columns)
for k, r in indexed_to_add.items():
if strict and k in indexed:
error.abort(
'error: key {0} already exists in source document'.format(k)
)
indexed[k] = r
def _remove_records(indexed, recs_to_remove, index_columns, strict=True):
indexed_to_remove = records.index(recs_to_remove, index_columns)
for k, r in indexed_to_remove.items():
if strict:
v = indexed.get(k)
if v is None:
error.abort(
'ERROR: key {0} does not exist in source '
'document'.format(k)
)
if v != r:
error.abort(
'ERROR: source document version of {0} has '
'changed'.format(k)
)
del indexed[k]
def _update_records(indexed, deltas, strict=True):
for delta in deltas:
k = tuple(delta['key'])
field_changes = delta['fields']
r = indexed.get(k)
# what happens when the record is missing?
if r is None:
if strict:
error.abort(
'ERROR: source document is missing record '
'for {0}'.format(k)
)
continue
r = indexed[k]
for field, from_to in field_changes.items():
expected = from_to['from']
if strict and r.get(field) != expected:
error.abort(
'ERROR: source document version of {0} has '
'changed {1} field'.format(k, field)
)
r[field] = from_to['to']
def save(diff, stream=sys.stdout, compact=False):
"Serialize a patch object."
flags = {'sort_keys': True}
if not compact:
flags['indent'] = 2
json.dump(diff, stream, **flags)
def create(from_records, to_records, index_columns, ignore_columns=None):
"""
Diff two sets of records, using the index columns as the primary key for
both datasets.
"""
from_indexed = records.index(from_records, index_columns)
to_indexed = records.index(to_records, index_columns)
if ignore_columns is not None:
from_indexed = records.filter_ignored(from_indexed, ignore_columns)
to_indexed = records.filter_ignored(to_indexed, ignore_columns)
return create_indexed(from_indexed, to_indexed, index_columns)
def create_indexed(from_indexed, to_indexed, index_columns):
# examine keys for overlap
removed, added, shared = _compare_keys(from_indexed, to_indexed)
# check for changed rows
changed = _compare_rows(from_indexed, to_indexed, shared)
diff = _assemble(removed, added, changed, from_indexed, to_indexed,
index_columns)
return diff
def _compare_keys(from_recs, to_recs):
from_keys = set(from_recs)
to_keys = set(to_recs)
removed = from_keys.difference(to_keys)
shared = from_keys.intersection(to_keys)
added = to_keys.difference(from_keys)
return removed, added, shared
def _compare_rows(from_recs, to_recs, keys):
"Return the set of keys which have changed."
return set(
k for k in keys
if sorted(from_recs[k].items()) != sorted(to_recs[k].items())
)
def _assemble(removed, added, changed, from_recs, to_recs, index_columns):
diff = {}
diff['_index'] = index_columns
diff['added'] = records.sort(to_recs[k] for k in added)
diff['removed'] = records.sort(from_recs[k] for k in removed)
diff['changed'] = sorted(({'key': list(k),
'fields': record_diff(from_recs[k], to_recs[k])}
for k in changed),
key=_change_key)
return diff
def _change_key(c):
return tuple(c['key'])
def record_diff(lhs, rhs):
"Diff an individual row."
delta = {}
for k in set(lhs).union(rhs):
from_ = lhs[k]
to_ = rhs[k]
if from_ != to_:
delta[k] = {'from': from_, 'to': to_}
return delta
def is_typed(diff):
"Are any of the values in the diff typed?"
return any(type(v) != str for v in _iter_fields(diff))
def _iter_fields(diff):
return itertools.chain(
_iter_record_fields(diff['added']),
_iter_record_fields(diff['removed']),
_iter_change_fields(diff['changed']),
)
def _iter_change_fields(cs):
for c in cs:
for k in c['key']:
yield k
for v in c['fields'].values():
yield v['from']
yield v['to']
def _iter_record_fields(recs):
for r in recs:
for v in r.values():
yield v
class InvalidPatchError(Exception):
pass
def filter_significance(diff, significance):
"""
Prune any changes in the patch which are due to numeric changes less than this level of
significance.
"""
changed = diff['changed']
# remove individual field changes that are significant
reduced = [{'key': delta['key'],
'fields': {k: v
for k, v in delta['fields'].items()
if _is_significant(v, significance)}}
for delta in changed]
# call a key changed only if it still has significant changes
filtered = [delta for delta in reduced if delta['fields']]
diff = diff.copy()
diff['changed'] = filtered
return diff
def _is_significant(change, significance):
"""
Return True if a change is genuinely significant given our tolerance.
"""
try:
a = float(change['from'])
b = float(change['to'])
except ValueError:
return True
return abs(a - b) > 10 ** (-significance)
|
larsyencken/csvdiff
|
csvdiff/patch.py
|
save
|
python
|
def save(diff, stream=sys.stdout, compact=False):
"Serialize a patch object."
flags = {'sort_keys': True}
if not compact:
flags['indent'] = 2
json.dump(diff, stream, **flags)
|
Serialize a patch object.
|
train
|
https://github.com/larsyencken/csvdiff/blob/163dd9da676a8e5f926a935803726340261f03ae/csvdiff/patch.py#L190-L196
| null |
# -*- coding: utf-8 -*-
#
# patch.py
# csvdiff
#
"""
The the patch format.
"""
import sys
import json
import copy
import itertools
import jsonschema
from . import records
from . import error
SCHEMA = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'title': 'csvdiff',
'description': 'The patch format used by csvdiff.',
'type': 'object',
'properties': {
'_index': {
'type': 'array',
'minItems': 1,
'items': {'type': 'string'},
},
'added': {
'type': 'array',
'items': {'type': 'object',
'patternProperties': {
'^.*$': {'type': ['string', 'number']},
}},
},
'removed': {
'type': 'array',
'items': {'type': 'object',
'patternProperties': {
'^.*$': {'type': ['string', 'number']},
}},
},
'changed': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'key': {'type': 'array',
'items': {'type': ['string', 'number']},
'minItems': 1},
'fields': {
'type': 'object',
'minProperties': 1,
'patternProperties': {
'^.*$': {'type': 'object',
'properties': {
'from': {
'type': ['string', 'number']
},
'to': {
'type': ['string', 'number']
},
},
'required': ['from', 'to']},
},
},
},
'required': ['key', 'fields'],
},
},
},
'required': ['_index', 'added', 'changed', 'removed'],
}
def is_empty(diff):
"Are there any actual differences encoded in the delta?"
return not any([diff['added'], diff['changed'], diff['removed']])
def is_valid(diff):
"""
Validate the diff against the schema, returning True if it matches, False
otherwise.
"""
try:
validate(diff)
except jsonschema.ValidationError:
return False
return True
def validate(diff):
"""
Check the diff against the schema, raising an exception if it doesn't
match.
"""
return jsonschema.validate(diff, SCHEMA)
def apply(diff, recs, strict=True):
"""
Transform the records with the patch. May fail if the records do not
match those expected in the patch.
"""
index_columns = diff['_index']
indexed = records.index(copy.deepcopy(list(recs)), index_columns)
_add_records(indexed, diff['added'], index_columns, strict=strict)
_remove_records(indexed, diff['removed'], index_columns, strict=strict)
_update_records(indexed, diff['changed'], strict=strict)
return records.sort(indexed.values())
def _add_records(indexed, recs_to_add, index_columns, strict=True):
indexed_to_add = records.index(recs_to_add, index_columns)
for k, r in indexed_to_add.items():
if strict and k in indexed:
error.abort(
'error: key {0} already exists in source document'.format(k)
)
indexed[k] = r
def _remove_records(indexed, recs_to_remove, index_columns, strict=True):
indexed_to_remove = records.index(recs_to_remove, index_columns)
for k, r in indexed_to_remove.items():
if strict:
v = indexed.get(k)
if v is None:
error.abort(
'ERROR: key {0} does not exist in source '
'document'.format(k)
)
if v != r:
error.abort(
'ERROR: source document version of {0} has '
'changed'.format(k)
)
del indexed[k]
def _update_records(indexed, deltas, strict=True):
for delta in deltas:
k = tuple(delta['key'])
field_changes = delta['fields']
r = indexed.get(k)
# what happens when the record is missing?
if r is None:
if strict:
error.abort(
'ERROR: source document is missing record '
'for {0}'.format(k)
)
continue
r = indexed[k]
for field, from_to in field_changes.items():
expected = from_to['from']
if strict and r.get(field) != expected:
error.abort(
'ERROR: source document version of {0} has '
'changed {1} field'.format(k, field)
)
r[field] = from_to['to']
def load(istream, strict=True):
"Deserialize a patch object."
try:
diff = json.load(istream)
if strict:
jsonschema.validate(diff, SCHEMA)
except ValueError:
raise InvalidPatchError('patch is not valid JSON')
except jsonschema.exceptions.ValidationError as e:
raise InvalidPatchError(e.message)
return diff
def create(from_records, to_records, index_columns, ignore_columns=None):
"""
Diff two sets of records, using the index columns as the primary key for
both datasets.
"""
from_indexed = records.index(from_records, index_columns)
to_indexed = records.index(to_records, index_columns)
if ignore_columns is not None:
from_indexed = records.filter_ignored(from_indexed, ignore_columns)
to_indexed = records.filter_ignored(to_indexed, ignore_columns)
return create_indexed(from_indexed, to_indexed, index_columns)
def create_indexed(from_indexed, to_indexed, index_columns):
# examine keys for overlap
removed, added, shared = _compare_keys(from_indexed, to_indexed)
# check for changed rows
changed = _compare_rows(from_indexed, to_indexed, shared)
diff = _assemble(removed, added, changed, from_indexed, to_indexed,
index_columns)
return diff
def _compare_keys(from_recs, to_recs):
from_keys = set(from_recs)
to_keys = set(to_recs)
removed = from_keys.difference(to_keys)
shared = from_keys.intersection(to_keys)
added = to_keys.difference(from_keys)
return removed, added, shared
def _compare_rows(from_recs, to_recs, keys):
"Return the set of keys which have changed."
return set(
k for k in keys
if sorted(from_recs[k].items()) != sorted(to_recs[k].items())
)
def _assemble(removed, added, changed, from_recs, to_recs, index_columns):
diff = {}
diff['_index'] = index_columns
diff['added'] = records.sort(to_recs[k] for k in added)
diff['removed'] = records.sort(from_recs[k] for k in removed)
diff['changed'] = sorted(({'key': list(k),
'fields': record_diff(from_recs[k], to_recs[k])}
for k in changed),
key=_change_key)
return diff
def _change_key(c):
return tuple(c['key'])
def record_diff(lhs, rhs):
"Diff an individual row."
delta = {}
for k in set(lhs).union(rhs):
from_ = lhs[k]
to_ = rhs[k]
if from_ != to_:
delta[k] = {'from': from_, 'to': to_}
return delta
def is_typed(diff):
"Are any of the values in the diff typed?"
return any(type(v) != str for v in _iter_fields(diff))
def _iter_fields(diff):
return itertools.chain(
_iter_record_fields(diff['added']),
_iter_record_fields(diff['removed']),
_iter_change_fields(diff['changed']),
)
def _iter_change_fields(cs):
for c in cs:
for k in c['key']:
yield k
for v in c['fields'].values():
yield v['from']
yield v['to']
def _iter_record_fields(recs):
for r in recs:
for v in r.values():
yield v
class InvalidPatchError(Exception):
pass
def filter_significance(diff, significance):
"""
Prune any changes in the patch which are due to numeric changes less than this level of
significance.
"""
changed = diff['changed']
# remove individual field changes that are significant
reduced = [{'key': delta['key'],
'fields': {k: v
for k, v in delta['fields'].items()
if _is_significant(v, significance)}}
for delta in changed]
# call a key changed only if it still has significant changes
filtered = [delta for delta in reduced if delta['fields']]
diff = diff.copy()
diff['changed'] = filtered
return diff
def _is_significant(change, significance):
"""
Return True if a change is genuinely significant given our tolerance.
"""
try:
a = float(change['from'])
b = float(change['to'])
except ValueError:
return True
return abs(a - b) > 10 ** (-significance)
|
larsyencken/csvdiff
|
csvdiff/patch.py
|
create
|
python
|
def create(from_records, to_records, index_columns, ignore_columns=None):
from_indexed = records.index(from_records, index_columns)
to_indexed = records.index(to_records, index_columns)
if ignore_columns is not None:
from_indexed = records.filter_ignored(from_indexed, ignore_columns)
to_indexed = records.filter_ignored(to_indexed, ignore_columns)
return create_indexed(from_indexed, to_indexed, index_columns)
|
Diff two sets of records, using the index columns as the primary key for
both datasets.
|
train
|
https://github.com/larsyencken/csvdiff/blob/163dd9da676a8e5f926a935803726340261f03ae/csvdiff/patch.py#L199-L211
|
[
"def index(record_seq: Iterator[Record], index_columns: List[str]) -> Index:\n if not index_columns:\n raise InvalidKeyError('must provide on or more columns to index on')\n\n try:\n obj = {\n tuple(r[i] for i in index_columns): r\n for r in record_seq\n }\n\n return obj\n\n except KeyError as k:\n raise InvalidKeyError('invalid column name {k} as key'.format(k=k))\n",
"def filter_ignored(index: Index, ignore_columns: List[Column]) -> Index:\n for record in index.values():\n # edit the record in-place\n for column in ignore_columns:\n del record[column]\n\n return index\n",
"def create_indexed(from_indexed, to_indexed, index_columns):\n # examine keys for overlap\n removed, added, shared = _compare_keys(from_indexed, to_indexed)\n\n # check for changed rows\n changed = _compare_rows(from_indexed, to_indexed, shared)\n\n diff = _assemble(removed, added, changed, from_indexed, to_indexed,\n index_columns)\n\n return diff\n"
] |
# -*- coding: utf-8 -*-
#
# patch.py
# csvdiff
#
"""
The the patch format.
"""
import sys
import json
import copy
import itertools
import jsonschema
from . import records
from . import error
SCHEMA = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'title': 'csvdiff',
'description': 'The patch format used by csvdiff.',
'type': 'object',
'properties': {
'_index': {
'type': 'array',
'minItems': 1,
'items': {'type': 'string'},
},
'added': {
'type': 'array',
'items': {'type': 'object',
'patternProperties': {
'^.*$': {'type': ['string', 'number']},
}},
},
'removed': {
'type': 'array',
'items': {'type': 'object',
'patternProperties': {
'^.*$': {'type': ['string', 'number']},
}},
},
'changed': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'key': {'type': 'array',
'items': {'type': ['string', 'number']},
'minItems': 1},
'fields': {
'type': 'object',
'minProperties': 1,
'patternProperties': {
'^.*$': {'type': 'object',
'properties': {
'from': {
'type': ['string', 'number']
},
'to': {
'type': ['string', 'number']
},
},
'required': ['from', 'to']},
},
},
},
'required': ['key', 'fields'],
},
},
},
'required': ['_index', 'added', 'changed', 'removed'],
}
def is_empty(diff):
"Are there any actual differences encoded in the delta?"
return not any([diff['added'], diff['changed'], diff['removed']])
def is_valid(diff):
"""
Validate the diff against the schema, returning True if it matches, False
otherwise.
"""
try:
validate(diff)
except jsonschema.ValidationError:
return False
return True
def validate(diff):
"""
Check the diff against the schema, raising an exception if it doesn't
match.
"""
return jsonschema.validate(diff, SCHEMA)
def apply(diff, recs, strict=True):
"""
Transform the records with the patch. May fail if the records do not
match those expected in the patch.
"""
index_columns = diff['_index']
indexed = records.index(copy.deepcopy(list(recs)), index_columns)
_add_records(indexed, diff['added'], index_columns, strict=strict)
_remove_records(indexed, diff['removed'], index_columns, strict=strict)
_update_records(indexed, diff['changed'], strict=strict)
return records.sort(indexed.values())
def _add_records(indexed, recs_to_add, index_columns, strict=True):
indexed_to_add = records.index(recs_to_add, index_columns)
for k, r in indexed_to_add.items():
if strict and k in indexed:
error.abort(
'error: key {0} already exists in source document'.format(k)
)
indexed[k] = r
def _remove_records(indexed, recs_to_remove, index_columns, strict=True):
indexed_to_remove = records.index(recs_to_remove, index_columns)
for k, r in indexed_to_remove.items():
if strict:
v = indexed.get(k)
if v is None:
error.abort(
'ERROR: key {0} does not exist in source '
'document'.format(k)
)
if v != r:
error.abort(
'ERROR: source document version of {0} has '
'changed'.format(k)
)
del indexed[k]
def _update_records(indexed, deltas, strict=True):
for delta in deltas:
k = tuple(delta['key'])
field_changes = delta['fields']
r = indexed.get(k)
# what happens when the record is missing?
if r is None:
if strict:
error.abort(
'ERROR: source document is missing record '
'for {0}'.format(k)
)
continue
r = indexed[k]
for field, from_to in field_changes.items():
expected = from_to['from']
if strict and r.get(field) != expected:
error.abort(
'ERROR: source document version of {0} has '
'changed {1} field'.format(k, field)
)
r[field] = from_to['to']
def load(istream, strict=True):
"Deserialize a patch object."
try:
diff = json.load(istream)
if strict:
jsonschema.validate(diff, SCHEMA)
except ValueError:
raise InvalidPatchError('patch is not valid JSON')
except jsonschema.exceptions.ValidationError as e:
raise InvalidPatchError(e.message)
return diff
def save(diff, stream=sys.stdout, compact=False):
"Serialize a patch object."
flags = {'sort_keys': True}
if not compact:
flags['indent'] = 2
json.dump(diff, stream, **flags)
def create_indexed(from_indexed, to_indexed, index_columns):
# examine keys for overlap
removed, added, shared = _compare_keys(from_indexed, to_indexed)
# check for changed rows
changed = _compare_rows(from_indexed, to_indexed, shared)
diff = _assemble(removed, added, changed, from_indexed, to_indexed,
index_columns)
return diff
def _compare_keys(from_recs, to_recs):
from_keys = set(from_recs)
to_keys = set(to_recs)
removed = from_keys.difference(to_keys)
shared = from_keys.intersection(to_keys)
added = to_keys.difference(from_keys)
return removed, added, shared
def _compare_rows(from_recs, to_recs, keys):
"Return the set of keys which have changed."
return set(
k for k in keys
if sorted(from_recs[k].items()) != sorted(to_recs[k].items())
)
def _assemble(removed, added, changed, from_recs, to_recs, index_columns):
diff = {}
diff['_index'] = index_columns
diff['added'] = records.sort(to_recs[k] for k in added)
diff['removed'] = records.sort(from_recs[k] for k in removed)
diff['changed'] = sorted(({'key': list(k),
'fields': record_diff(from_recs[k], to_recs[k])}
for k in changed),
key=_change_key)
return diff
def _change_key(c):
return tuple(c['key'])
def record_diff(lhs, rhs):
"Diff an individual row."
delta = {}
for k in set(lhs).union(rhs):
from_ = lhs[k]
to_ = rhs[k]
if from_ != to_:
delta[k] = {'from': from_, 'to': to_}
return delta
def is_typed(diff):
"Are any of the values in the diff typed?"
return any(type(v) != str for v in _iter_fields(diff))
def _iter_fields(diff):
return itertools.chain(
_iter_record_fields(diff['added']),
_iter_record_fields(diff['removed']),
_iter_change_fields(diff['changed']),
)
def _iter_change_fields(cs):
for c in cs:
for k in c['key']:
yield k
for v in c['fields'].values():
yield v['from']
yield v['to']
def _iter_record_fields(recs):
for r in recs:
for v in r.values():
yield v
class InvalidPatchError(Exception):
pass
def filter_significance(diff, significance):
"""
Prune any changes in the patch which are due to numeric changes less than this level of
significance.
"""
changed = diff['changed']
# remove individual field changes that are significant
reduced = [{'key': delta['key'],
'fields': {k: v
for k, v in delta['fields'].items()
if _is_significant(v, significance)}}
for delta in changed]
# call a key changed only if it still has significant changes
filtered = [delta for delta in reduced if delta['fields']]
diff = diff.copy()
diff['changed'] = filtered
return diff
def _is_significant(change, significance):
"""
Return True if a change is genuinely significant given our tolerance.
"""
try:
a = float(change['from'])
b = float(change['to'])
except ValueError:
return True
return abs(a - b) > 10 ** (-significance)
|
larsyencken/csvdiff
|
csvdiff/patch.py
|
_compare_rows
|
python
|
def _compare_rows(from_recs, to_recs, keys):
"Return the set of keys which have changed."
return set(
k for k in keys
if sorted(from_recs[k].items()) != sorted(to_recs[k].items())
)
|
Return the set of keys which have changed.
|
train
|
https://github.com/larsyencken/csvdiff/blob/163dd9da676a8e5f926a935803726340261f03ae/csvdiff/patch.py#L236-L241
| null |
# -*- coding: utf-8 -*-
#
# patch.py
# csvdiff
#
"""
The the patch format.
"""
import sys
import json
import copy
import itertools
import jsonschema
from . import records
from . import error
SCHEMA = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'title': 'csvdiff',
'description': 'The patch format used by csvdiff.',
'type': 'object',
'properties': {
'_index': {
'type': 'array',
'minItems': 1,
'items': {'type': 'string'},
},
'added': {
'type': 'array',
'items': {'type': 'object',
'patternProperties': {
'^.*$': {'type': ['string', 'number']},
}},
},
'removed': {
'type': 'array',
'items': {'type': 'object',
'patternProperties': {
'^.*$': {'type': ['string', 'number']},
}},
},
'changed': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'key': {'type': 'array',
'items': {'type': ['string', 'number']},
'minItems': 1},
'fields': {
'type': 'object',
'minProperties': 1,
'patternProperties': {
'^.*$': {'type': 'object',
'properties': {
'from': {
'type': ['string', 'number']
},
'to': {
'type': ['string', 'number']
},
},
'required': ['from', 'to']},
},
},
},
'required': ['key', 'fields'],
},
},
},
'required': ['_index', 'added', 'changed', 'removed'],
}
def is_empty(diff):
"Are there any actual differences encoded in the delta?"
return not any([diff['added'], diff['changed'], diff['removed']])
def is_valid(diff):
"""
Validate the diff against the schema, returning True if it matches, False
otherwise.
"""
try:
validate(diff)
except jsonschema.ValidationError:
return False
return True
def validate(diff):
"""
Check the diff against the schema, raising an exception if it doesn't
match.
"""
return jsonschema.validate(diff, SCHEMA)
def apply(diff, recs, strict=True):
"""
Transform the records with the patch. May fail if the records do not
match those expected in the patch.
"""
index_columns = diff['_index']
indexed = records.index(copy.deepcopy(list(recs)), index_columns)
_add_records(indexed, diff['added'], index_columns, strict=strict)
_remove_records(indexed, diff['removed'], index_columns, strict=strict)
_update_records(indexed, diff['changed'], strict=strict)
return records.sort(indexed.values())
def _add_records(indexed, recs_to_add, index_columns, strict=True):
indexed_to_add = records.index(recs_to_add, index_columns)
for k, r in indexed_to_add.items():
if strict and k in indexed:
error.abort(
'error: key {0} already exists in source document'.format(k)
)
indexed[k] = r
def _remove_records(indexed, recs_to_remove, index_columns, strict=True):
indexed_to_remove = records.index(recs_to_remove, index_columns)
for k, r in indexed_to_remove.items():
if strict:
v = indexed.get(k)
if v is None:
error.abort(
'ERROR: key {0} does not exist in source '
'document'.format(k)
)
if v != r:
error.abort(
'ERROR: source document version of {0} has '
'changed'.format(k)
)
del indexed[k]
def _update_records(indexed, deltas, strict=True):
for delta in deltas:
k = tuple(delta['key'])
field_changes = delta['fields']
r = indexed.get(k)
# what happens when the record is missing?
if r is None:
if strict:
error.abort(
'ERROR: source document is missing record '
'for {0}'.format(k)
)
continue
r = indexed[k]
for field, from_to in field_changes.items():
expected = from_to['from']
if strict and r.get(field) != expected:
error.abort(
'ERROR: source document version of {0} has '
'changed {1} field'.format(k, field)
)
r[field] = from_to['to']
def load(istream, strict=True):
"Deserialize a patch object."
try:
diff = json.load(istream)
if strict:
jsonschema.validate(diff, SCHEMA)
except ValueError:
raise InvalidPatchError('patch is not valid JSON')
except jsonschema.exceptions.ValidationError as e:
raise InvalidPatchError(e.message)
return diff
def save(diff, stream=sys.stdout, compact=False):
"Serialize a patch object."
flags = {'sort_keys': True}
if not compact:
flags['indent'] = 2
json.dump(diff, stream, **flags)
def create(from_records, to_records, index_columns, ignore_columns=None):
"""
Diff two sets of records, using the index columns as the primary key for
both datasets.
"""
from_indexed = records.index(from_records, index_columns)
to_indexed = records.index(to_records, index_columns)
if ignore_columns is not None:
from_indexed = records.filter_ignored(from_indexed, ignore_columns)
to_indexed = records.filter_ignored(to_indexed, ignore_columns)
return create_indexed(from_indexed, to_indexed, index_columns)
def create_indexed(from_indexed, to_indexed, index_columns):
# examine keys for overlap
removed, added, shared = _compare_keys(from_indexed, to_indexed)
# check for changed rows
changed = _compare_rows(from_indexed, to_indexed, shared)
diff = _assemble(removed, added, changed, from_indexed, to_indexed,
index_columns)
return diff
def _compare_keys(from_recs, to_recs):
from_keys = set(from_recs)
to_keys = set(to_recs)
removed = from_keys.difference(to_keys)
shared = from_keys.intersection(to_keys)
added = to_keys.difference(from_keys)
return removed, added, shared
def _assemble(removed, added, changed, from_recs, to_recs, index_columns):
diff = {}
diff['_index'] = index_columns
diff['added'] = records.sort(to_recs[k] for k in added)
diff['removed'] = records.sort(from_recs[k] for k in removed)
diff['changed'] = sorted(({'key': list(k),
'fields': record_diff(from_recs[k], to_recs[k])}
for k in changed),
key=_change_key)
return diff
def _change_key(c):
return tuple(c['key'])
def record_diff(lhs, rhs):
"Diff an individual row."
delta = {}
for k in set(lhs).union(rhs):
from_ = lhs[k]
to_ = rhs[k]
if from_ != to_:
delta[k] = {'from': from_, 'to': to_}
return delta
def is_typed(diff):
"Are any of the values in the diff typed?"
return any(type(v) != str for v in _iter_fields(diff))
def _iter_fields(diff):
return itertools.chain(
_iter_record_fields(diff['added']),
_iter_record_fields(diff['removed']),
_iter_change_fields(diff['changed']),
)
def _iter_change_fields(cs):
for c in cs:
for k in c['key']:
yield k
for v in c['fields'].values():
yield v['from']
yield v['to']
def _iter_record_fields(recs):
for r in recs:
for v in r.values():
yield v
class InvalidPatchError(Exception):
pass
def filter_significance(diff, significance):
"""
Prune any changes in the patch which are due to numeric changes less than this level of
significance.
"""
changed = diff['changed']
# remove individual field changes that are significant
reduced = [{'key': delta['key'],
'fields': {k: v
for k, v in delta['fields'].items()
if _is_significant(v, significance)}}
for delta in changed]
# call a key changed only if it still has significant changes
filtered = [delta for delta in reduced if delta['fields']]
diff = diff.copy()
diff['changed'] = filtered
return diff
def _is_significant(change, significance):
"""
Return True if a change is genuinely significant given our tolerance.
"""
try:
a = float(change['from'])
b = float(change['to'])
except ValueError:
return True
return abs(a - b) > 10 ** (-significance)
|
larsyencken/csvdiff
|
csvdiff/patch.py
|
record_diff
|
python
|
def record_diff(lhs, rhs):
"Diff an individual row."
delta = {}
for k in set(lhs).union(rhs):
from_ = lhs[k]
to_ = rhs[k]
if from_ != to_:
delta[k] = {'from': from_, 'to': to_}
return delta
|
Diff an individual row.
|
train
|
https://github.com/larsyencken/csvdiff/blob/163dd9da676a8e5f926a935803726340261f03ae/csvdiff/patch.py#L260-L269
| null |
# -*- coding: utf-8 -*-
#
# patch.py
# csvdiff
#
"""
The the patch format.
"""
import sys
import json
import copy
import itertools
import jsonschema
from . import records
from . import error
SCHEMA = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'title': 'csvdiff',
'description': 'The patch format used by csvdiff.',
'type': 'object',
'properties': {
'_index': {
'type': 'array',
'minItems': 1,
'items': {'type': 'string'},
},
'added': {
'type': 'array',
'items': {'type': 'object',
'patternProperties': {
'^.*$': {'type': ['string', 'number']},
}},
},
'removed': {
'type': 'array',
'items': {'type': 'object',
'patternProperties': {
'^.*$': {'type': ['string', 'number']},
}},
},
'changed': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'key': {'type': 'array',
'items': {'type': ['string', 'number']},
'minItems': 1},
'fields': {
'type': 'object',
'minProperties': 1,
'patternProperties': {
'^.*$': {'type': 'object',
'properties': {
'from': {
'type': ['string', 'number']
},
'to': {
'type': ['string', 'number']
},
},
'required': ['from', 'to']},
},
},
},
'required': ['key', 'fields'],
},
},
},
'required': ['_index', 'added', 'changed', 'removed'],
}
def is_empty(diff):
"Are there any actual differences encoded in the delta?"
return not any([diff['added'], diff['changed'], diff['removed']])
def is_valid(diff):
"""
Validate the diff against the schema, returning True if it matches, False
otherwise.
"""
try:
validate(diff)
except jsonschema.ValidationError:
return False
return True
def validate(diff):
"""
Check the diff against the schema, raising an exception if it doesn't
match.
"""
return jsonschema.validate(diff, SCHEMA)
def apply(diff, recs, strict=True):
"""
Transform the records with the patch. May fail if the records do not
match those expected in the patch.
"""
index_columns = diff['_index']
indexed = records.index(copy.deepcopy(list(recs)), index_columns)
_add_records(indexed, diff['added'], index_columns, strict=strict)
_remove_records(indexed, diff['removed'], index_columns, strict=strict)
_update_records(indexed, diff['changed'], strict=strict)
return records.sort(indexed.values())
def _add_records(indexed, recs_to_add, index_columns, strict=True):
indexed_to_add = records.index(recs_to_add, index_columns)
for k, r in indexed_to_add.items():
if strict and k in indexed:
error.abort(
'error: key {0} already exists in source document'.format(k)
)
indexed[k] = r
def _remove_records(indexed, recs_to_remove, index_columns, strict=True):
indexed_to_remove = records.index(recs_to_remove, index_columns)
for k, r in indexed_to_remove.items():
if strict:
v = indexed.get(k)
if v is None:
error.abort(
'ERROR: key {0} does not exist in source '
'document'.format(k)
)
if v != r:
error.abort(
'ERROR: source document version of {0} has '
'changed'.format(k)
)
del indexed[k]
def _update_records(indexed, deltas, strict=True):
for delta in deltas:
k = tuple(delta['key'])
field_changes = delta['fields']
r = indexed.get(k)
# what happens when the record is missing?
if r is None:
if strict:
error.abort(
'ERROR: source document is missing record '
'for {0}'.format(k)
)
continue
r = indexed[k]
for field, from_to in field_changes.items():
expected = from_to['from']
if strict and r.get(field) != expected:
error.abort(
'ERROR: source document version of {0} has '
'changed {1} field'.format(k, field)
)
r[field] = from_to['to']
def load(istream, strict=True):
"Deserialize a patch object."
try:
diff = json.load(istream)
if strict:
jsonschema.validate(diff, SCHEMA)
except ValueError:
raise InvalidPatchError('patch is not valid JSON')
except jsonschema.exceptions.ValidationError as e:
raise InvalidPatchError(e.message)
return diff
def save(diff, stream=sys.stdout, compact=False):
"Serialize a patch object."
flags = {'sort_keys': True}
if not compact:
flags['indent'] = 2
json.dump(diff, stream, **flags)
def create(from_records, to_records, index_columns, ignore_columns=None):
"""
Diff two sets of records, using the index columns as the primary key for
both datasets.
"""
from_indexed = records.index(from_records, index_columns)
to_indexed = records.index(to_records, index_columns)
if ignore_columns is not None:
from_indexed = records.filter_ignored(from_indexed, ignore_columns)
to_indexed = records.filter_ignored(to_indexed, ignore_columns)
return create_indexed(from_indexed, to_indexed, index_columns)
def create_indexed(from_indexed, to_indexed, index_columns):
# examine keys for overlap
removed, added, shared = _compare_keys(from_indexed, to_indexed)
# check for changed rows
changed = _compare_rows(from_indexed, to_indexed, shared)
diff = _assemble(removed, added, changed, from_indexed, to_indexed,
index_columns)
return diff
def _compare_keys(from_recs, to_recs):
from_keys = set(from_recs)
to_keys = set(to_recs)
removed = from_keys.difference(to_keys)
shared = from_keys.intersection(to_keys)
added = to_keys.difference(from_keys)
return removed, added, shared
def _compare_rows(from_recs, to_recs, keys):
"Return the set of keys which have changed."
return set(
k for k in keys
if sorted(from_recs[k].items()) != sorted(to_recs[k].items())
)
def _assemble(removed, added, changed, from_recs, to_recs, index_columns):
diff = {}
diff['_index'] = index_columns
diff['added'] = records.sort(to_recs[k] for k in added)
diff['removed'] = records.sort(from_recs[k] for k in removed)
diff['changed'] = sorted(({'key': list(k),
'fields': record_diff(from_recs[k], to_recs[k])}
for k in changed),
key=_change_key)
return diff
def _change_key(c):
return tuple(c['key'])
def is_typed(diff):
"Are any of the values in the diff typed?"
return any(type(v) != str for v in _iter_fields(diff))
def _iter_fields(diff):
return itertools.chain(
_iter_record_fields(diff['added']),
_iter_record_fields(diff['removed']),
_iter_change_fields(diff['changed']),
)
def _iter_change_fields(cs):
for c in cs:
for k in c['key']:
yield k
for v in c['fields'].values():
yield v['from']
yield v['to']
def _iter_record_fields(recs):
for r in recs:
for v in r.values():
yield v
class InvalidPatchError(Exception):
pass
def filter_significance(diff, significance):
"""
Prune any changes in the patch which are due to numeric changes less than this level of
significance.
"""
changed = diff['changed']
# remove individual field changes that are significant
reduced = [{'key': delta['key'],
'fields': {k: v
for k, v in delta['fields'].items()
if _is_significant(v, significance)}}
for delta in changed]
# call a key changed only if it still has significant changes
filtered = [delta for delta in reduced if delta['fields']]
diff = diff.copy()
diff['changed'] = filtered
return diff
def _is_significant(change, significance):
"""
Return True if a change is genuinely significant given our tolerance.
"""
try:
a = float(change['from'])
b = float(change['to'])
except ValueError:
return True
return abs(a - b) > 10 ** (-significance)
|
larsyencken/csvdiff
|
csvdiff/patch.py
|
filter_significance
|
python
|
def filter_significance(diff, significance):
changed = diff['changed']
# remove individual field changes that are significant
reduced = [{'key': delta['key'],
'fields': {k: v
for k, v in delta['fields'].items()
if _is_significant(v, significance)}}
for delta in changed]
# call a key changed only if it still has significant changes
filtered = [delta for delta in reduced if delta['fields']]
diff = diff.copy()
diff['changed'] = filtered
return diff
|
Prune any changes in the patch which are due to numeric changes less than this level of
significance.
|
train
|
https://github.com/larsyencken/csvdiff/blob/163dd9da676a8e5f926a935803726340261f03ae/csvdiff/patch.py#L304-L323
| null |
# -*- coding: utf-8 -*-
#
# patch.py
# csvdiff
#
"""
The the patch format.
"""
import sys
import json
import copy
import itertools
import jsonschema
from . import records
from . import error
SCHEMA = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'title': 'csvdiff',
'description': 'The patch format used by csvdiff.',
'type': 'object',
'properties': {
'_index': {
'type': 'array',
'minItems': 1,
'items': {'type': 'string'},
},
'added': {
'type': 'array',
'items': {'type': 'object',
'patternProperties': {
'^.*$': {'type': ['string', 'number']},
}},
},
'removed': {
'type': 'array',
'items': {'type': 'object',
'patternProperties': {
'^.*$': {'type': ['string', 'number']},
}},
},
'changed': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'key': {'type': 'array',
'items': {'type': ['string', 'number']},
'minItems': 1},
'fields': {
'type': 'object',
'minProperties': 1,
'patternProperties': {
'^.*$': {'type': 'object',
'properties': {
'from': {
'type': ['string', 'number']
},
'to': {
'type': ['string', 'number']
},
},
'required': ['from', 'to']},
},
},
},
'required': ['key', 'fields'],
},
},
},
'required': ['_index', 'added', 'changed', 'removed'],
}
def is_empty(diff):
"Are there any actual differences encoded in the delta?"
return not any([diff['added'], diff['changed'], diff['removed']])
def is_valid(diff):
"""
Validate the diff against the schema, returning True if it matches, False
otherwise.
"""
try:
validate(diff)
except jsonschema.ValidationError:
return False
return True
def validate(diff):
"""
Check the diff against the schema, raising an exception if it doesn't
match.
"""
return jsonschema.validate(diff, SCHEMA)
def apply(diff, recs, strict=True):
"""
Transform the records with the patch. May fail if the records do not
match those expected in the patch.
"""
index_columns = diff['_index']
indexed = records.index(copy.deepcopy(list(recs)), index_columns)
_add_records(indexed, diff['added'], index_columns, strict=strict)
_remove_records(indexed, diff['removed'], index_columns, strict=strict)
_update_records(indexed, diff['changed'], strict=strict)
return records.sort(indexed.values())
def _add_records(indexed, recs_to_add, index_columns, strict=True):
indexed_to_add = records.index(recs_to_add, index_columns)
for k, r in indexed_to_add.items():
if strict and k in indexed:
error.abort(
'error: key {0} already exists in source document'.format(k)
)
indexed[k] = r
def _remove_records(indexed, recs_to_remove, index_columns, strict=True):
indexed_to_remove = records.index(recs_to_remove, index_columns)
for k, r in indexed_to_remove.items():
if strict:
v = indexed.get(k)
if v is None:
error.abort(
'ERROR: key {0} does not exist in source '
'document'.format(k)
)
if v != r:
error.abort(
'ERROR: source document version of {0} has '
'changed'.format(k)
)
del indexed[k]
def _update_records(indexed, deltas, strict=True):
for delta in deltas:
k = tuple(delta['key'])
field_changes = delta['fields']
r = indexed.get(k)
# what happens when the record is missing?
if r is None:
if strict:
error.abort(
'ERROR: source document is missing record '
'for {0}'.format(k)
)
continue
r = indexed[k]
for field, from_to in field_changes.items():
expected = from_to['from']
if strict and r.get(field) != expected:
error.abort(
'ERROR: source document version of {0} has '
'changed {1} field'.format(k, field)
)
r[field] = from_to['to']
def load(istream, strict=True):
"Deserialize a patch object."
try:
diff = json.load(istream)
if strict:
jsonschema.validate(diff, SCHEMA)
except ValueError:
raise InvalidPatchError('patch is not valid JSON')
except jsonschema.exceptions.ValidationError as e:
raise InvalidPatchError(e.message)
return diff
def save(diff, stream=sys.stdout, compact=False):
"Serialize a patch object."
flags = {'sort_keys': True}
if not compact:
flags['indent'] = 2
json.dump(diff, stream, **flags)
def create(from_records, to_records, index_columns, ignore_columns=None):
"""
Diff two sets of records, using the index columns as the primary key for
both datasets.
"""
from_indexed = records.index(from_records, index_columns)
to_indexed = records.index(to_records, index_columns)
if ignore_columns is not None:
from_indexed = records.filter_ignored(from_indexed, ignore_columns)
to_indexed = records.filter_ignored(to_indexed, ignore_columns)
return create_indexed(from_indexed, to_indexed, index_columns)
def create_indexed(from_indexed, to_indexed, index_columns):
# examine keys for overlap
removed, added, shared = _compare_keys(from_indexed, to_indexed)
# check for changed rows
changed = _compare_rows(from_indexed, to_indexed, shared)
diff = _assemble(removed, added, changed, from_indexed, to_indexed,
index_columns)
return diff
def _compare_keys(from_recs, to_recs):
from_keys = set(from_recs)
to_keys = set(to_recs)
removed = from_keys.difference(to_keys)
shared = from_keys.intersection(to_keys)
added = to_keys.difference(from_keys)
return removed, added, shared
def _compare_rows(from_recs, to_recs, keys):
"Return the set of keys which have changed."
return set(
k for k in keys
if sorted(from_recs[k].items()) != sorted(to_recs[k].items())
)
def _assemble(removed, added, changed, from_recs, to_recs, index_columns):
diff = {}
diff['_index'] = index_columns
diff['added'] = records.sort(to_recs[k] for k in added)
diff['removed'] = records.sort(from_recs[k] for k in removed)
diff['changed'] = sorted(({'key': list(k),
'fields': record_diff(from_recs[k], to_recs[k])}
for k in changed),
key=_change_key)
return diff
def _change_key(c):
return tuple(c['key'])
def record_diff(lhs, rhs):
"Diff an individual row."
delta = {}
for k in set(lhs).union(rhs):
from_ = lhs[k]
to_ = rhs[k]
if from_ != to_:
delta[k] = {'from': from_, 'to': to_}
return delta
def is_typed(diff):
"Are any of the values in the diff typed?"
return any(type(v) != str for v in _iter_fields(diff))
def _iter_fields(diff):
return itertools.chain(
_iter_record_fields(diff['added']),
_iter_record_fields(diff['removed']),
_iter_change_fields(diff['changed']),
)
def _iter_change_fields(cs):
for c in cs:
for k in c['key']:
yield k
for v in c['fields'].values():
yield v['from']
yield v['to']
def _iter_record_fields(recs):
for r in recs:
for v in r.values():
yield v
class InvalidPatchError(Exception):
pass
def _is_significant(change, significance):
"""
Return True if a change is genuinely significant given our tolerance.
"""
try:
a = float(change['from'])
b = float(change['to'])
except ValueError:
return True
return abs(a - b) > 10 ** (-significance)
|
larsyencken/csvdiff
|
csvdiff/patch.py
|
_is_significant
|
python
|
def _is_significant(change, significance):
try:
a = float(change['from'])
b = float(change['to'])
except ValueError:
return True
return abs(a - b) > 10 ** (-significance)
|
Return True if a change is genuinely significant given our tolerance.
|
train
|
https://github.com/larsyencken/csvdiff/blob/163dd9da676a8e5f926a935803726340261f03ae/csvdiff/patch.py#L326-L337
| null |
# -*- coding: utf-8 -*-
#
# patch.py
# csvdiff
#
"""
The the patch format.
"""
import sys
import json
import copy
import itertools
import jsonschema
from . import records
from . import error
SCHEMA = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'title': 'csvdiff',
'description': 'The patch format used by csvdiff.',
'type': 'object',
'properties': {
'_index': {
'type': 'array',
'minItems': 1,
'items': {'type': 'string'},
},
'added': {
'type': 'array',
'items': {'type': 'object',
'patternProperties': {
'^.*$': {'type': ['string', 'number']},
}},
},
'removed': {
'type': 'array',
'items': {'type': 'object',
'patternProperties': {
'^.*$': {'type': ['string', 'number']},
}},
},
'changed': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'key': {'type': 'array',
'items': {'type': ['string', 'number']},
'minItems': 1},
'fields': {
'type': 'object',
'minProperties': 1,
'patternProperties': {
'^.*$': {'type': 'object',
'properties': {
'from': {
'type': ['string', 'number']
},
'to': {
'type': ['string', 'number']
},
},
'required': ['from', 'to']},
},
},
},
'required': ['key', 'fields'],
},
},
},
'required': ['_index', 'added', 'changed', 'removed'],
}
def is_empty(diff):
"Are there any actual differences encoded in the delta?"
return not any([diff['added'], diff['changed'], diff['removed']])
def is_valid(diff):
"""
Validate the diff against the schema, returning True if it matches, False
otherwise.
"""
try:
validate(diff)
except jsonschema.ValidationError:
return False
return True
def validate(diff):
"""
Check the diff against the schema, raising an exception if it doesn't
match.
"""
return jsonschema.validate(diff, SCHEMA)
def apply(diff, recs, strict=True):
"""
Transform the records with the patch. May fail if the records do not
match those expected in the patch.
"""
index_columns = diff['_index']
indexed = records.index(copy.deepcopy(list(recs)), index_columns)
_add_records(indexed, diff['added'], index_columns, strict=strict)
_remove_records(indexed, diff['removed'], index_columns, strict=strict)
_update_records(indexed, diff['changed'], strict=strict)
return records.sort(indexed.values())
def _add_records(indexed, recs_to_add, index_columns, strict=True):
indexed_to_add = records.index(recs_to_add, index_columns)
for k, r in indexed_to_add.items():
if strict and k in indexed:
error.abort(
'error: key {0} already exists in source document'.format(k)
)
indexed[k] = r
def _remove_records(indexed, recs_to_remove, index_columns, strict=True):
indexed_to_remove = records.index(recs_to_remove, index_columns)
for k, r in indexed_to_remove.items():
if strict:
v = indexed.get(k)
if v is None:
error.abort(
'ERROR: key {0} does not exist in source '
'document'.format(k)
)
if v != r:
error.abort(
'ERROR: source document version of {0} has '
'changed'.format(k)
)
del indexed[k]
def _update_records(indexed, deltas, strict=True):
for delta in deltas:
k = tuple(delta['key'])
field_changes = delta['fields']
r = indexed.get(k)
# what happens when the record is missing?
if r is None:
if strict:
error.abort(
'ERROR: source document is missing record '
'for {0}'.format(k)
)
continue
r = indexed[k]
for field, from_to in field_changes.items():
expected = from_to['from']
if strict and r.get(field) != expected:
error.abort(
'ERROR: source document version of {0} has '
'changed {1} field'.format(k, field)
)
r[field] = from_to['to']
def load(istream, strict=True):
"Deserialize a patch object."
try:
diff = json.load(istream)
if strict:
jsonschema.validate(diff, SCHEMA)
except ValueError:
raise InvalidPatchError('patch is not valid JSON')
except jsonschema.exceptions.ValidationError as e:
raise InvalidPatchError(e.message)
return diff
def save(diff, stream=sys.stdout, compact=False):
"Serialize a patch object."
flags = {'sort_keys': True}
if not compact:
flags['indent'] = 2
json.dump(diff, stream, **flags)
def create(from_records, to_records, index_columns, ignore_columns=None):
"""
Diff two sets of records, using the index columns as the primary key for
both datasets.
"""
from_indexed = records.index(from_records, index_columns)
to_indexed = records.index(to_records, index_columns)
if ignore_columns is not None:
from_indexed = records.filter_ignored(from_indexed, ignore_columns)
to_indexed = records.filter_ignored(to_indexed, ignore_columns)
return create_indexed(from_indexed, to_indexed, index_columns)
def create_indexed(from_indexed, to_indexed, index_columns):
# examine keys for overlap
removed, added, shared = _compare_keys(from_indexed, to_indexed)
# check for changed rows
changed = _compare_rows(from_indexed, to_indexed, shared)
diff = _assemble(removed, added, changed, from_indexed, to_indexed,
index_columns)
return diff
def _compare_keys(from_recs, to_recs):
from_keys = set(from_recs)
to_keys = set(to_recs)
removed = from_keys.difference(to_keys)
shared = from_keys.intersection(to_keys)
added = to_keys.difference(from_keys)
return removed, added, shared
def _compare_rows(from_recs, to_recs, keys):
"Return the set of keys which have changed."
return set(
k for k in keys
if sorted(from_recs[k].items()) != sorted(to_recs[k].items())
)
def _assemble(removed, added, changed, from_recs, to_recs, index_columns):
diff = {}
diff['_index'] = index_columns
diff['added'] = records.sort(to_recs[k] for k in added)
diff['removed'] = records.sort(from_recs[k] for k in removed)
diff['changed'] = sorted(({'key': list(k),
'fields': record_diff(from_recs[k], to_recs[k])}
for k in changed),
key=_change_key)
return diff
def _change_key(c):
return tuple(c['key'])
def record_diff(lhs, rhs):
"Diff an individual row."
delta = {}
for k in set(lhs).union(rhs):
from_ = lhs[k]
to_ = rhs[k]
if from_ != to_:
delta[k] = {'from': from_, 'to': to_}
return delta
def is_typed(diff):
"Are any of the values in the diff typed?"
return any(type(v) != str for v in _iter_fields(diff))
def _iter_fields(diff):
return itertools.chain(
_iter_record_fields(diff['added']),
_iter_record_fields(diff['removed']),
_iter_change_fields(diff['changed']),
)
def _iter_change_fields(cs):
for c in cs:
for k in c['key']:
yield k
for v in c['fields'].values():
yield v['from']
yield v['to']
def _iter_record_fields(recs):
for r in recs:
for v in r.values():
yield v
class InvalidPatchError(Exception):
pass
def filter_significance(diff, significance):
"""
Prune any changes in the patch which are due to numeric changes less than this level of
significance.
"""
changed = diff['changed']
# remove individual field changes that are significant
reduced = [{'key': delta['key'],
'fields': {k: v
for k, v in delta['fields'].items()
if _is_significant(v, significance)}}
for delta in changed]
# call a key changed only if it still has significant changes
filtered = [delta for delta in reduced if delta['fields']]
diff = diff.copy()
diff['changed'] = filtered
return diff
|
larsyencken/csvdiff
|
csvdiff/__init__.py
|
diff_files
|
python
|
def diff_files(from_file, to_file, index_columns, sep=',', ignored_columns=None):
with open(from_file) as from_stream:
with open(to_file) as to_stream:
from_records = records.load(from_stream, sep=sep)
to_records = records.load(to_stream, sep=sep)
return patch.create(from_records, to_records, index_columns,
ignore_columns=ignored_columns)
|
Diff two CSV files, returning the patch which transforms one into the
other.
|
train
|
https://github.com/larsyencken/csvdiff/blob/163dd9da676a8e5f926a935803726340261f03ae/csvdiff/__init__.py#L28-L38
|
[
"def load(file_or_stream: Any, sep: str = ',') -> SafeDictReader:\n istream = (open(file_or_stream)\n if not hasattr(file_or_stream, 'read')\n else file_or_stream)\n return SafeDictReader(istream, sep=sep)\n",
"def create(from_records, to_records, index_columns, ignore_columns=None):\n \"\"\"\n Diff two sets of records, using the index columns as the primary key for\n both datasets.\n \"\"\"\n from_indexed = records.index(from_records, index_columns)\n to_indexed = records.index(to_records, index_columns)\n\n if ignore_columns is not None:\n from_indexed = records.filter_ignored(from_indexed, ignore_columns)\n to_indexed = records.filter_ignored(to_indexed, ignore_columns)\n\n return create_indexed(from_indexed, to_indexed, index_columns)\n"
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# __init__.py
# csvdiff
#
import sys
from typing.io import TextIO
import io
import click
from . import records, patch, error
__author__ = 'Lars Yencken'
__email__ = 'lars@yencken.org'
__version__ = '0.3.1'
# exit codes for the command-line
EXIT_SAME = 0
EXIT_DIFFERENT = 1
EXIT_ERROR = 2
def diff_records(from_records, to_records, index_columns):
"""
Diff two sequences of dictionary records, returning the patch which
transforms one into the other.
"""
return patch.create(from_records, to_records, index_columns)
def patch_file(patch_stream: TextIO, fromcsv_stream: TextIO, tocsv_stream: TextIO,
strict: bool = True, sep: str = ','):
"""
Apply the patch to the source CSV file, and save the result to the target
file.
"""
diff = patch.load(patch_stream)
from_records = records.load(fromcsv_stream, sep=sep)
to_records = patch.apply(diff, from_records, strict=strict)
# what order should the columns be in?
if to_records:
# have data, use a nice ordering
all_columns = to_records[0].keys()
index_columns = diff['_index']
fieldnames = _nice_fieldnames(all_columns, index_columns)
else:
# no data, use the original order
fieldnames = from_records.fieldnames
records.save(to_records, fieldnames, tocsv_stream)
def patch_records(diff, from_records, strict=True):
"""
Apply the patch to the sequence of records, returning the transformed
records.
"""
return patch.apply(diff, from_records, strict=strict)
def _nice_fieldnames(all_columns, index_columns):
"Indexes on the left, other fields in alphabetical order on the right."
non_index_columns = set(all_columns).difference(index_columns)
return index_columns + sorted(non_index_columns)
class CSVType(click.ParamType):
name = 'csv'
def convert(self, value, param, ctx):
if isinstance(value, bytes):
try:
enc = getattr(sys.stdin, 'encoding', None)
if enc is not None:
value = value.decode(enc)
except UnicodeError:
try:
value = value.decode(sys.getfilesystemencoding())
except UnicodeError:
value = value.decode('utf-8', 'replace')
return value.split(',')
return value.split(',')
def __repr__(self):
return 'CSV'
@click.command()
@click.argument('index_columns', type=CSVType())
@click.argument('from_csv', type=click.Path(exists=True))
@click.argument('to_csv', type=click.Path(exists=True))
@click.option('--style',
type=click.Choice(['compact', 'pretty', 'summary']),
default='compact',
help=('Instead of the default compact output, pretty-print '
'or give a summary instead'))
@click.option('--output', '-o', type=click.Path(),
help='Output to a file instead of stdout')
@click.option('--quiet', '-q', is_flag=True,
help="Don't output anything, just use exit codes")
@click.option('--sep', default=',',
help='Separator to use between fields [default: comma]')
@click.option('--ignore-columns', '-i', type=CSVType(),
help='a comma seperated list of columns to ignore from the comparison')
@click.option('--significance', type=int,
help='Ignore numeric changes less than this number of significant figures')
def csvdiff_cmd(index_columns, from_csv, to_csv, style=None, output=None,
sep=',', quiet=False, ignore_columns=None, significance=None):
"""
Compare two csv files to see what rows differ between them. The files
are each expected to have a header row, and for each row to be uniquely
identified by one or more indexing columns.
"""
if ignore_columns is not None:
for i in ignore_columns:
if i in index_columns:
error.abort("You can't ignore an index column")
ostream = (open(output, 'w') if output
else io.StringIO() if quiet
else sys.stdout)
try:
if style == 'summary':
_diff_and_summarize(from_csv, to_csv, index_columns, ostream,
sep=sep, ignored_columns=ignore_columns,
significance=significance)
else:
compact = (style == 'compact')
_diff_files_to_stream(from_csv, to_csv, index_columns, ostream,
compact=compact, sep=sep, ignored_columns=ignore_columns,
significance=significance)
except records.InvalidKeyError as e:
error.abort(e.args[0])
finally:
ostream.close()
def _diff_files_to_stream(from_csv, to_csv, index_columns, ostream,
compact=False, sep=',', ignored_columns=None,
significance=None):
diff = diff_files(from_csv, to_csv, index_columns, sep=sep, ignored_columns=ignored_columns)
if significance is not None:
diff = patch.filter_significance(diff, significance)
patch.save(diff, ostream, compact=compact)
exit_code = (EXIT_SAME
if patch.is_empty(diff)
else EXIT_DIFFERENT)
sys.exit(exit_code)
def _diff_and_summarize(from_csv, to_csv, index_columns, stream=sys.stdout,
sep=',', ignored_columns=None, significance=None):
"""
Print a summary of the difference between the two files.
"""
from_records = list(records.load(from_csv, sep=sep))
to_records = records.load(to_csv, sep=sep)
diff = patch.create(from_records, to_records, index_columns, ignored_columns)
if significance is not None:
diff = patch.filter_significance(diff, significance)
_summarize_diff(diff, len(from_records), stream=stream)
exit_code = (EXIT_SAME
if patch.is_empty(diff)
else EXIT_DIFFERENT)
sys.exit(exit_code)
def _summarize_diff(diff, orig_size, stream=sys.stdout):
if orig_size == 0:
# slightly arbitrary when the original data was empty
orig_size = 1
n_removed = len(diff['removed'])
n_added = len(diff['added'])
n_changed = len(diff['changed'])
if n_removed or n_added or n_changed:
print(u'%d rows removed (%.01f%%)' % (
n_removed, 100 * n_removed / orig_size
), file=stream)
print(u'%d rows added (%.01f%%)' % (
n_added, 100 * n_added / orig_size
), file=stream)
print(u'%d rows changed (%.01f%%)' % (
n_changed, 100 * n_changed / orig_size
), file=stream)
else:
print(u'files are identical', file=stream)
@click.command()
@click.argument('input_csv', type=click.Path(exists=True))
@click.option('--input', '-i', type=click.Path(exists=True),
help='Read the JSON patch from the given file.')
@click.option('--output', '-o', type=click.Path(),
help='Write the transformed CSV to the given file.')
@click.option('--strict/--no-strict', default=True,
help='Whether or not to tolerate a changed source document '
'(default: strict)')
def csvpatch_cmd(input_csv, input=None, output=None, strict=True):
"""
Apply the changes from a csvdiff patch to an existing CSV file.
"""
patch_stream = (sys.stdin
if input is None
else open(input))
tocsv_stream = (sys.stdout
if output is None
else open(output, 'w'))
fromcsv_stream = open(input_csv)
try:
patch_file(patch_stream, fromcsv_stream, tocsv_stream, strict=strict)
except patch.InvalidPatchError as e:
error.abort('reading patch, {0}'.format(e.args[0]))
finally:
patch_stream.close()
fromcsv_stream.close()
tocsv_stream.close()
|
larsyencken/csvdiff
|
csvdiff/__init__.py
|
patch_file
|
python
|
def patch_file(patch_stream: TextIO, fromcsv_stream: TextIO, tocsv_stream: TextIO,
strict: bool = True, sep: str = ','):
diff = patch.load(patch_stream)
from_records = records.load(fromcsv_stream, sep=sep)
to_records = patch.apply(diff, from_records, strict=strict)
# what order should the columns be in?
if to_records:
# have data, use a nice ordering
all_columns = to_records[0].keys()
index_columns = diff['_index']
fieldnames = _nice_fieldnames(all_columns, index_columns)
else:
# no data, use the original order
fieldnames = from_records.fieldnames
records.save(to_records, fieldnames, tocsv_stream)
|
Apply the patch to the source CSV file, and save the result to the target
file.
|
train
|
https://github.com/larsyencken/csvdiff/blob/163dd9da676a8e5f926a935803726340261f03ae/csvdiff/__init__.py#L49-L70
|
[
"def save(records: Sequence[Record], fieldnames: List[Column], ostream: TextIO):\n writer = csv.DictWriter(ostream, fieldnames)\n writer.writeheader()\n for r in records:\n writer.writerow(r)\n",
"def load(istream, strict=True):\n \"Deserialize a patch object.\"\n try:\n diff = json.load(istream)\n if strict:\n jsonschema.validate(diff, SCHEMA)\n except ValueError:\n raise InvalidPatchError('patch is not valid JSON')\n\n except jsonschema.exceptions.ValidationError as e:\n raise InvalidPatchError(e.message)\n\n return diff\n",
"def load(file_or_stream: Any, sep: str = ',') -> SafeDictReader:\n istream = (open(file_or_stream)\n if not hasattr(file_or_stream, 'read')\n else file_or_stream)\n return SafeDictReader(istream, sep=sep)\n",
"def apply(diff, recs, strict=True):\n \"\"\"\n Transform the records with the patch. May fail if the records do not\n match those expected in the patch.\n \"\"\"\n index_columns = diff['_index']\n indexed = records.index(copy.deepcopy(list(recs)), index_columns)\n _add_records(indexed, diff['added'], index_columns, strict=strict)\n _remove_records(indexed, diff['removed'], index_columns, strict=strict)\n _update_records(indexed, diff['changed'], strict=strict)\n return records.sort(indexed.values())\n",
"def _nice_fieldnames(all_columns, index_columns):\n \"Indexes on the left, other fields in alphabetical order on the right.\"\n non_index_columns = set(all_columns).difference(index_columns)\n return index_columns + sorted(non_index_columns)\n"
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# __init__.py
# csvdiff
#
import sys
from typing.io import TextIO
import io
import click
from . import records, patch, error
__author__ = 'Lars Yencken'
__email__ = 'lars@yencken.org'
__version__ = '0.3.1'
# exit codes for the command-line
EXIT_SAME = 0
EXIT_DIFFERENT = 1
EXIT_ERROR = 2
def diff_files(from_file, to_file, index_columns, sep=',', ignored_columns=None):
"""
Diff two CSV files, returning the patch which transforms one into the
other.
"""
with open(from_file) as from_stream:
with open(to_file) as to_stream:
from_records = records.load(from_stream, sep=sep)
to_records = records.load(to_stream, sep=sep)
return patch.create(from_records, to_records, index_columns,
ignore_columns=ignored_columns)
def diff_records(from_records, to_records, index_columns):
"""
Diff two sequences of dictionary records, returning the patch which
transforms one into the other.
"""
return patch.create(from_records, to_records, index_columns)
def patch_records(diff, from_records, strict=True):
"""
Apply the patch to the sequence of records, returning the transformed
records.
"""
return patch.apply(diff, from_records, strict=strict)
def _nice_fieldnames(all_columns, index_columns):
"Indexes on the left, other fields in alphabetical order on the right."
non_index_columns = set(all_columns).difference(index_columns)
return index_columns + sorted(non_index_columns)
class CSVType(click.ParamType):
name = 'csv'
def convert(self, value, param, ctx):
if isinstance(value, bytes):
try:
enc = getattr(sys.stdin, 'encoding', None)
if enc is not None:
value = value.decode(enc)
except UnicodeError:
try:
value = value.decode(sys.getfilesystemencoding())
except UnicodeError:
value = value.decode('utf-8', 'replace')
return value.split(',')
return value.split(',')
def __repr__(self):
return 'CSV'
@click.command()
@click.argument('index_columns', type=CSVType())
@click.argument('from_csv', type=click.Path(exists=True))
@click.argument('to_csv', type=click.Path(exists=True))
@click.option('--style',
type=click.Choice(['compact', 'pretty', 'summary']),
default='compact',
help=('Instead of the default compact output, pretty-print '
'or give a summary instead'))
@click.option('--output', '-o', type=click.Path(),
help='Output to a file instead of stdout')
@click.option('--quiet', '-q', is_flag=True,
help="Don't output anything, just use exit codes")
@click.option('--sep', default=',',
help='Separator to use between fields [default: comma]')
@click.option('--ignore-columns', '-i', type=CSVType(),
help='a comma seperated list of columns to ignore from the comparison')
@click.option('--significance', type=int,
help='Ignore numeric changes less than this number of significant figures')
def csvdiff_cmd(index_columns, from_csv, to_csv, style=None, output=None,
sep=',', quiet=False, ignore_columns=None, significance=None):
"""
Compare two csv files to see what rows differ between them. The files
are each expected to have a header row, and for each row to be uniquely
identified by one or more indexing columns.
"""
if ignore_columns is not None:
for i in ignore_columns:
if i in index_columns:
error.abort("You can't ignore an index column")
ostream = (open(output, 'w') if output
else io.StringIO() if quiet
else sys.stdout)
try:
if style == 'summary':
_diff_and_summarize(from_csv, to_csv, index_columns, ostream,
sep=sep, ignored_columns=ignore_columns,
significance=significance)
else:
compact = (style == 'compact')
_diff_files_to_stream(from_csv, to_csv, index_columns, ostream,
compact=compact, sep=sep, ignored_columns=ignore_columns,
significance=significance)
except records.InvalidKeyError as e:
error.abort(e.args[0])
finally:
ostream.close()
def _diff_files_to_stream(from_csv, to_csv, index_columns, ostream,
compact=False, sep=',', ignored_columns=None,
significance=None):
diff = diff_files(from_csv, to_csv, index_columns, sep=sep, ignored_columns=ignored_columns)
if significance is not None:
diff = patch.filter_significance(diff, significance)
patch.save(diff, ostream, compact=compact)
exit_code = (EXIT_SAME
if patch.is_empty(diff)
else EXIT_DIFFERENT)
sys.exit(exit_code)
def _diff_and_summarize(from_csv, to_csv, index_columns, stream=sys.stdout,
sep=',', ignored_columns=None, significance=None):
"""
Print a summary of the difference between the two files.
"""
from_records = list(records.load(from_csv, sep=sep))
to_records = records.load(to_csv, sep=sep)
diff = patch.create(from_records, to_records, index_columns, ignored_columns)
if significance is not None:
diff = patch.filter_significance(diff, significance)
_summarize_diff(diff, len(from_records), stream=stream)
exit_code = (EXIT_SAME
if patch.is_empty(diff)
else EXIT_DIFFERENT)
sys.exit(exit_code)
def _summarize_diff(diff, orig_size, stream=sys.stdout):
if orig_size == 0:
# slightly arbitrary when the original data was empty
orig_size = 1
n_removed = len(diff['removed'])
n_added = len(diff['added'])
n_changed = len(diff['changed'])
if n_removed or n_added or n_changed:
print(u'%d rows removed (%.01f%%)' % (
n_removed, 100 * n_removed / orig_size
), file=stream)
print(u'%d rows added (%.01f%%)' % (
n_added, 100 * n_added / orig_size
), file=stream)
print(u'%d rows changed (%.01f%%)' % (
n_changed, 100 * n_changed / orig_size
), file=stream)
else:
print(u'files are identical', file=stream)
@click.command()
@click.argument('input_csv', type=click.Path(exists=True))
@click.option('--input', '-i', type=click.Path(exists=True),
help='Read the JSON patch from the given file.')
@click.option('--output', '-o', type=click.Path(),
help='Write the transformed CSV to the given file.')
@click.option('--strict/--no-strict', default=True,
help='Whether or not to tolerate a changed source document '
'(default: strict)')
def csvpatch_cmd(input_csv, input=None, output=None, strict=True):
"""
Apply the changes from a csvdiff patch to an existing CSV file.
"""
patch_stream = (sys.stdin
if input is None
else open(input))
tocsv_stream = (sys.stdout
if output is None
else open(output, 'w'))
fromcsv_stream = open(input_csv)
try:
patch_file(patch_stream, fromcsv_stream, tocsv_stream, strict=strict)
except patch.InvalidPatchError as e:
error.abort('reading patch, {0}'.format(e.args[0]))
finally:
patch_stream.close()
fromcsv_stream.close()
tocsv_stream.close()
|
larsyencken/csvdiff
|
csvdiff/__init__.py
|
patch_records
|
python
|
def patch_records(diff, from_records, strict=True):
return patch.apply(diff, from_records, strict=strict)
|
Apply the patch to the sequence of records, returning the transformed
records.
|
train
|
https://github.com/larsyencken/csvdiff/blob/163dd9da676a8e5f926a935803726340261f03ae/csvdiff/__init__.py#L73-L78
|
[
"def apply(diff, recs, strict=True):\n \"\"\"\n Transform the records with the patch. May fail if the records do not\n match those expected in the patch.\n \"\"\"\n index_columns = diff['_index']\n indexed = records.index(copy.deepcopy(list(recs)), index_columns)\n _add_records(indexed, diff['added'], index_columns, strict=strict)\n _remove_records(indexed, diff['removed'], index_columns, strict=strict)\n _update_records(indexed, diff['changed'], strict=strict)\n return records.sort(indexed.values())\n"
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# __init__.py
# csvdiff
#
import sys
from typing.io import TextIO
import io
import click
from . import records, patch, error
__author__ = 'Lars Yencken'
__email__ = 'lars@yencken.org'
__version__ = '0.3.1'
# exit codes for the command-line
EXIT_SAME = 0
EXIT_DIFFERENT = 1
EXIT_ERROR = 2
def diff_files(from_file, to_file, index_columns, sep=',', ignored_columns=None):
"""
Diff two CSV files, returning the patch which transforms one into the
other.
"""
with open(from_file) as from_stream:
with open(to_file) as to_stream:
from_records = records.load(from_stream, sep=sep)
to_records = records.load(to_stream, sep=sep)
return patch.create(from_records, to_records, index_columns,
ignore_columns=ignored_columns)
def diff_records(from_records, to_records, index_columns):
"""
Diff two sequences of dictionary records, returning the patch which
transforms one into the other.
"""
return patch.create(from_records, to_records, index_columns)
def patch_file(patch_stream: TextIO, fromcsv_stream: TextIO, tocsv_stream: TextIO,
strict: bool = True, sep: str = ','):
"""
Apply the patch to the source CSV file, and save the result to the target
file.
"""
diff = patch.load(patch_stream)
from_records = records.load(fromcsv_stream, sep=sep)
to_records = patch.apply(diff, from_records, strict=strict)
# what order should the columns be in?
if to_records:
# have data, use a nice ordering
all_columns = to_records[0].keys()
index_columns = diff['_index']
fieldnames = _nice_fieldnames(all_columns, index_columns)
else:
# no data, use the original order
fieldnames = from_records.fieldnames
records.save(to_records, fieldnames, tocsv_stream)
def _nice_fieldnames(all_columns, index_columns):
"Indexes on the left, other fields in alphabetical order on the right."
non_index_columns = set(all_columns).difference(index_columns)
return index_columns + sorted(non_index_columns)
class CSVType(click.ParamType):
name = 'csv'
def convert(self, value, param, ctx):
if isinstance(value, bytes):
try:
enc = getattr(sys.stdin, 'encoding', None)
if enc is not None:
value = value.decode(enc)
except UnicodeError:
try:
value = value.decode(sys.getfilesystemencoding())
except UnicodeError:
value = value.decode('utf-8', 'replace')
return value.split(',')
return value.split(',')
def __repr__(self):
return 'CSV'
@click.command()
@click.argument('index_columns', type=CSVType())
@click.argument('from_csv', type=click.Path(exists=True))
@click.argument('to_csv', type=click.Path(exists=True))
@click.option('--style',
type=click.Choice(['compact', 'pretty', 'summary']),
default='compact',
help=('Instead of the default compact output, pretty-print '
'or give a summary instead'))
@click.option('--output', '-o', type=click.Path(),
help='Output to a file instead of stdout')
@click.option('--quiet', '-q', is_flag=True,
help="Don't output anything, just use exit codes")
@click.option('--sep', default=',',
help='Separator to use between fields [default: comma]')
@click.option('--ignore-columns', '-i', type=CSVType(),
help='a comma seperated list of columns to ignore from the comparison')
@click.option('--significance', type=int,
help='Ignore numeric changes less than this number of significant figures')
def csvdiff_cmd(index_columns, from_csv, to_csv, style=None, output=None,
sep=',', quiet=False, ignore_columns=None, significance=None):
"""
Compare two csv files to see what rows differ between them. The files
are each expected to have a header row, and for each row to be uniquely
identified by one or more indexing columns.
"""
if ignore_columns is not None:
for i in ignore_columns:
if i in index_columns:
error.abort("You can't ignore an index column")
ostream = (open(output, 'w') if output
else io.StringIO() if quiet
else sys.stdout)
try:
if style == 'summary':
_diff_and_summarize(from_csv, to_csv, index_columns, ostream,
sep=sep, ignored_columns=ignore_columns,
significance=significance)
else:
compact = (style == 'compact')
_diff_files_to_stream(from_csv, to_csv, index_columns, ostream,
compact=compact, sep=sep, ignored_columns=ignore_columns,
significance=significance)
except records.InvalidKeyError as e:
error.abort(e.args[0])
finally:
ostream.close()
def _diff_files_to_stream(from_csv, to_csv, index_columns, ostream,
compact=False, sep=',', ignored_columns=None,
significance=None):
diff = diff_files(from_csv, to_csv, index_columns, sep=sep, ignored_columns=ignored_columns)
if significance is not None:
diff = patch.filter_significance(diff, significance)
patch.save(diff, ostream, compact=compact)
exit_code = (EXIT_SAME
if patch.is_empty(diff)
else EXIT_DIFFERENT)
sys.exit(exit_code)
def _diff_and_summarize(from_csv, to_csv, index_columns, stream=sys.stdout,
sep=',', ignored_columns=None, significance=None):
"""
Print a summary of the difference between the two files.
"""
from_records = list(records.load(from_csv, sep=sep))
to_records = records.load(to_csv, sep=sep)
diff = patch.create(from_records, to_records, index_columns, ignored_columns)
if significance is not None:
diff = patch.filter_significance(diff, significance)
_summarize_diff(diff, len(from_records), stream=stream)
exit_code = (EXIT_SAME
if patch.is_empty(diff)
else EXIT_DIFFERENT)
sys.exit(exit_code)
def _summarize_diff(diff, orig_size, stream=sys.stdout):
if orig_size == 0:
# slightly arbitrary when the original data was empty
orig_size = 1
n_removed = len(diff['removed'])
n_added = len(diff['added'])
n_changed = len(diff['changed'])
if n_removed or n_added or n_changed:
print(u'%d rows removed (%.01f%%)' % (
n_removed, 100 * n_removed / orig_size
), file=stream)
print(u'%d rows added (%.01f%%)' % (
n_added, 100 * n_added / orig_size
), file=stream)
print(u'%d rows changed (%.01f%%)' % (
n_changed, 100 * n_changed / orig_size
), file=stream)
else:
print(u'files are identical', file=stream)
@click.command()
@click.argument('input_csv', type=click.Path(exists=True))
@click.option('--input', '-i', type=click.Path(exists=True),
help='Read the JSON patch from the given file.')
@click.option('--output', '-o', type=click.Path(),
help='Write the transformed CSV to the given file.')
@click.option('--strict/--no-strict', default=True,
help='Whether or not to tolerate a changed source document '
'(default: strict)')
def csvpatch_cmd(input_csv, input=None, output=None, strict=True):
"""
Apply the changes from a csvdiff patch to an existing CSV file.
"""
patch_stream = (sys.stdin
if input is None
else open(input))
tocsv_stream = (sys.stdout
if output is None
else open(output, 'w'))
fromcsv_stream = open(input_csv)
try:
patch_file(patch_stream, fromcsv_stream, tocsv_stream, strict=strict)
except patch.InvalidPatchError as e:
error.abort('reading patch, {0}'.format(e.args[0]))
finally:
patch_stream.close()
fromcsv_stream.close()
tocsv_stream.close()
|
larsyencken/csvdiff
|
csvdiff/__init__.py
|
_nice_fieldnames
|
python
|
def _nice_fieldnames(all_columns, index_columns):
"Indexes on the left, other fields in alphabetical order on the right."
non_index_columns = set(all_columns).difference(index_columns)
return index_columns + sorted(non_index_columns)
|
Indexes on the left, other fields in alphabetical order on the right.
|
train
|
https://github.com/larsyencken/csvdiff/blob/163dd9da676a8e5f926a935803726340261f03ae/csvdiff/__init__.py#L81-L84
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# __init__.py
# csvdiff
#
import sys
from typing.io import TextIO
import io
import click
from . import records, patch, error
__author__ = 'Lars Yencken'
__email__ = 'lars@yencken.org'
__version__ = '0.3.1'
# exit codes for the command-line
EXIT_SAME = 0
EXIT_DIFFERENT = 1
EXIT_ERROR = 2
def diff_files(from_file, to_file, index_columns, sep=',', ignored_columns=None):
"""
Diff two CSV files, returning the patch which transforms one into the
other.
"""
with open(from_file) as from_stream:
with open(to_file) as to_stream:
from_records = records.load(from_stream, sep=sep)
to_records = records.load(to_stream, sep=sep)
return patch.create(from_records, to_records, index_columns,
ignore_columns=ignored_columns)
def diff_records(from_records, to_records, index_columns):
"""
Diff two sequences of dictionary records, returning the patch which
transforms one into the other.
"""
return patch.create(from_records, to_records, index_columns)
def patch_file(patch_stream: TextIO, fromcsv_stream: TextIO, tocsv_stream: TextIO,
strict: bool = True, sep: str = ','):
"""
Apply the patch to the source CSV file, and save the result to the target
file.
"""
diff = patch.load(patch_stream)
from_records = records.load(fromcsv_stream, sep=sep)
to_records = patch.apply(diff, from_records, strict=strict)
# what order should the columns be in?
if to_records:
# have data, use a nice ordering
all_columns = to_records[0].keys()
index_columns = diff['_index']
fieldnames = _nice_fieldnames(all_columns, index_columns)
else:
# no data, use the original order
fieldnames = from_records.fieldnames
records.save(to_records, fieldnames, tocsv_stream)
def patch_records(diff, from_records, strict=True):
"""
Apply the patch to the sequence of records, returning the transformed
records.
"""
return patch.apply(diff, from_records, strict=strict)
class CSVType(click.ParamType):
name = 'csv'
def convert(self, value, param, ctx):
if isinstance(value, bytes):
try:
enc = getattr(sys.stdin, 'encoding', None)
if enc is not None:
value = value.decode(enc)
except UnicodeError:
try:
value = value.decode(sys.getfilesystemencoding())
except UnicodeError:
value = value.decode('utf-8', 'replace')
return value.split(',')
return value.split(',')
def __repr__(self):
return 'CSV'
@click.command()
@click.argument('index_columns', type=CSVType())
@click.argument('from_csv', type=click.Path(exists=True))
@click.argument('to_csv', type=click.Path(exists=True))
@click.option('--style',
type=click.Choice(['compact', 'pretty', 'summary']),
default='compact',
help=('Instead of the default compact output, pretty-print '
'or give a summary instead'))
@click.option('--output', '-o', type=click.Path(),
help='Output to a file instead of stdout')
@click.option('--quiet', '-q', is_flag=True,
help="Don't output anything, just use exit codes")
@click.option('--sep', default=',',
help='Separator to use between fields [default: comma]')
@click.option('--ignore-columns', '-i', type=CSVType(),
help='a comma seperated list of columns to ignore from the comparison')
@click.option('--significance', type=int,
help='Ignore numeric changes less than this number of significant figures')
def csvdiff_cmd(index_columns, from_csv, to_csv, style=None, output=None,
sep=',', quiet=False, ignore_columns=None, significance=None):
"""
Compare two csv files to see what rows differ between them. The files
are each expected to have a header row, and for each row to be uniquely
identified by one or more indexing columns.
"""
if ignore_columns is not None:
for i in ignore_columns:
if i in index_columns:
error.abort("You can't ignore an index column")
ostream = (open(output, 'w') if output
else io.StringIO() if quiet
else sys.stdout)
try:
if style == 'summary':
_diff_and_summarize(from_csv, to_csv, index_columns, ostream,
sep=sep, ignored_columns=ignore_columns,
significance=significance)
else:
compact = (style == 'compact')
_diff_files_to_stream(from_csv, to_csv, index_columns, ostream,
compact=compact, sep=sep, ignored_columns=ignore_columns,
significance=significance)
except records.InvalidKeyError as e:
error.abort(e.args[0])
finally:
ostream.close()
def _diff_files_to_stream(from_csv, to_csv, index_columns, ostream,
compact=False, sep=',', ignored_columns=None,
significance=None):
diff = diff_files(from_csv, to_csv, index_columns, sep=sep, ignored_columns=ignored_columns)
if significance is not None:
diff = patch.filter_significance(diff, significance)
patch.save(diff, ostream, compact=compact)
exit_code = (EXIT_SAME
if patch.is_empty(diff)
else EXIT_DIFFERENT)
sys.exit(exit_code)
def _diff_and_summarize(from_csv, to_csv, index_columns, stream=sys.stdout,
sep=',', ignored_columns=None, significance=None):
"""
Print a summary of the difference between the two files.
"""
from_records = list(records.load(from_csv, sep=sep))
to_records = records.load(to_csv, sep=sep)
diff = patch.create(from_records, to_records, index_columns, ignored_columns)
if significance is not None:
diff = patch.filter_significance(diff, significance)
_summarize_diff(diff, len(from_records), stream=stream)
exit_code = (EXIT_SAME
if patch.is_empty(diff)
else EXIT_DIFFERENT)
sys.exit(exit_code)
def _summarize_diff(diff, orig_size, stream=sys.stdout):
if orig_size == 0:
# slightly arbitrary when the original data was empty
orig_size = 1
n_removed = len(diff['removed'])
n_added = len(diff['added'])
n_changed = len(diff['changed'])
if n_removed or n_added or n_changed:
print(u'%d rows removed (%.01f%%)' % (
n_removed, 100 * n_removed / orig_size
), file=stream)
print(u'%d rows added (%.01f%%)' % (
n_added, 100 * n_added / orig_size
), file=stream)
print(u'%d rows changed (%.01f%%)' % (
n_changed, 100 * n_changed / orig_size
), file=stream)
else:
print(u'files are identical', file=stream)
@click.command()
@click.argument('input_csv', type=click.Path(exists=True))
@click.option('--input', '-i', type=click.Path(exists=True),
help='Read the JSON patch from the given file.')
@click.option('--output', '-o', type=click.Path(),
help='Write the transformed CSV to the given file.')
@click.option('--strict/--no-strict', default=True,
help='Whether or not to tolerate a changed source document '
'(default: strict)')
def csvpatch_cmd(input_csv, input=None, output=None, strict=True):
"""
Apply the changes from a csvdiff patch to an existing CSV file.
"""
patch_stream = (sys.stdin
if input is None
else open(input))
tocsv_stream = (sys.stdout
if output is None
else open(output, 'w'))
fromcsv_stream = open(input_csv)
try:
patch_file(patch_stream, fromcsv_stream, tocsv_stream, strict=strict)
except patch.InvalidPatchError as e:
error.abort('reading patch, {0}'.format(e.args[0]))
finally:
patch_stream.close()
fromcsv_stream.close()
tocsv_stream.close()
|
larsyencken/csvdiff
|
csvdiff/__init__.py
|
csvdiff_cmd
|
python
|
def csvdiff_cmd(index_columns, from_csv, to_csv, style=None, output=None,
sep=',', quiet=False, ignore_columns=None, significance=None):
if ignore_columns is not None:
for i in ignore_columns:
if i in index_columns:
error.abort("You can't ignore an index column")
ostream = (open(output, 'w') if output
else io.StringIO() if quiet
else sys.stdout)
try:
if style == 'summary':
_diff_and_summarize(from_csv, to_csv, index_columns, ostream,
sep=sep, ignored_columns=ignore_columns,
significance=significance)
else:
compact = (style == 'compact')
_diff_files_to_stream(from_csv, to_csv, index_columns, ostream,
compact=compact, sep=sep, ignored_columns=ignore_columns,
significance=significance)
except records.InvalidKeyError as e:
error.abort(e.args[0])
finally:
ostream.close()
|
Compare two csv files to see what rows differ between them. The files
are each expected to have a header row, and for each row to be uniquely
identified by one or more indexing columns.
|
train
|
https://github.com/larsyencken/csvdiff/blob/163dd9da676a8e5f926a935803726340261f03ae/csvdiff/__init__.py#L128-L160
|
[
"def abort(message=None):\n if DEBUG:\n raise FatalError(message)\n\n print('ERROR: {0}'.format(message), file=sys.stderr)\n sys.exit(2)\n",
"def _diff_and_summarize(from_csv, to_csv, index_columns, stream=sys.stdout,\n sep=',', ignored_columns=None, significance=None):\n \"\"\"\n Print a summary of the difference between the two files.\n \"\"\"\n from_records = list(records.load(from_csv, sep=sep))\n to_records = records.load(to_csv, sep=sep)\n\n diff = patch.create(from_records, to_records, index_columns, ignored_columns)\n if significance is not None:\n diff = patch.filter_significance(diff, significance)\n\n _summarize_diff(diff, len(from_records), stream=stream)\n exit_code = (EXIT_SAME\n if patch.is_empty(diff)\n else EXIT_DIFFERENT)\n sys.exit(exit_code)\n",
"def _diff_files_to_stream(from_csv, to_csv, index_columns, ostream,\n compact=False, sep=',', ignored_columns=None,\n significance=None):\n diff = diff_files(from_csv, to_csv, index_columns, sep=sep, ignored_columns=ignored_columns)\n\n if significance is not None:\n diff = patch.filter_significance(diff, significance)\n\n patch.save(diff, ostream, compact=compact)\n exit_code = (EXIT_SAME\n if patch.is_empty(diff)\n else EXIT_DIFFERENT)\n sys.exit(exit_code)\n"
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# __init__.py
# csvdiff
#
import sys
from typing.io import TextIO
import io
import click
from . import records, patch, error
__author__ = 'Lars Yencken'
__email__ = 'lars@yencken.org'
__version__ = '0.3.1'
# exit codes for the command-line
EXIT_SAME = 0
EXIT_DIFFERENT = 1
EXIT_ERROR = 2
def diff_files(from_file, to_file, index_columns, sep=',', ignored_columns=None):
"""
Diff two CSV files, returning the patch which transforms one into the
other.
"""
with open(from_file) as from_stream:
with open(to_file) as to_stream:
from_records = records.load(from_stream, sep=sep)
to_records = records.load(to_stream, sep=sep)
return patch.create(from_records, to_records, index_columns,
ignore_columns=ignored_columns)
def diff_records(from_records, to_records, index_columns):
"""
Diff two sequences of dictionary records, returning the patch which
transforms one into the other.
"""
return patch.create(from_records, to_records, index_columns)
def patch_file(patch_stream: TextIO, fromcsv_stream: TextIO, tocsv_stream: TextIO,
strict: bool = True, sep: str = ','):
"""
Apply the patch to the source CSV file, and save the result to the target
file.
"""
diff = patch.load(patch_stream)
from_records = records.load(fromcsv_stream, sep=sep)
to_records = patch.apply(diff, from_records, strict=strict)
# what order should the columns be in?
if to_records:
# have data, use a nice ordering
all_columns = to_records[0].keys()
index_columns = diff['_index']
fieldnames = _nice_fieldnames(all_columns, index_columns)
else:
# no data, use the original order
fieldnames = from_records.fieldnames
records.save(to_records, fieldnames, tocsv_stream)
def patch_records(diff, from_records, strict=True):
"""
Apply the patch to the sequence of records, returning the transformed
records.
"""
return patch.apply(diff, from_records, strict=strict)
def _nice_fieldnames(all_columns, index_columns):
"Indexes on the left, other fields in alphabetical order on the right."
non_index_columns = set(all_columns).difference(index_columns)
return index_columns + sorted(non_index_columns)
class CSVType(click.ParamType):
name = 'csv'
def convert(self, value, param, ctx):
if isinstance(value, bytes):
try:
enc = getattr(sys.stdin, 'encoding', None)
if enc is not None:
value = value.decode(enc)
except UnicodeError:
try:
value = value.decode(sys.getfilesystemencoding())
except UnicodeError:
value = value.decode('utf-8', 'replace')
return value.split(',')
return value.split(',')
def __repr__(self):
return 'CSV'
@click.command()
@click.argument('index_columns', type=CSVType())
@click.argument('from_csv', type=click.Path(exists=True))
@click.argument('to_csv', type=click.Path(exists=True))
@click.option('--style',
type=click.Choice(['compact', 'pretty', 'summary']),
default='compact',
help=('Instead of the default compact output, pretty-print '
'or give a summary instead'))
@click.option('--output', '-o', type=click.Path(),
help='Output to a file instead of stdout')
@click.option('--quiet', '-q', is_flag=True,
help="Don't output anything, just use exit codes")
@click.option('--sep', default=',',
help='Separator to use between fields [default: comma]')
@click.option('--ignore-columns', '-i', type=CSVType(),
help='a comma seperated list of columns to ignore from the comparison')
@click.option('--significance', type=int,
help='Ignore numeric changes less than this number of significant figures')
def _diff_files_to_stream(from_csv, to_csv, index_columns, ostream,
compact=False, sep=',', ignored_columns=None,
significance=None):
diff = diff_files(from_csv, to_csv, index_columns, sep=sep, ignored_columns=ignored_columns)
if significance is not None:
diff = patch.filter_significance(diff, significance)
patch.save(diff, ostream, compact=compact)
exit_code = (EXIT_SAME
if patch.is_empty(diff)
else EXIT_DIFFERENT)
sys.exit(exit_code)
def _diff_and_summarize(from_csv, to_csv, index_columns, stream=sys.stdout,
sep=',', ignored_columns=None, significance=None):
"""
Print a summary of the difference between the two files.
"""
from_records = list(records.load(from_csv, sep=sep))
to_records = records.load(to_csv, sep=sep)
diff = patch.create(from_records, to_records, index_columns, ignored_columns)
if significance is not None:
diff = patch.filter_significance(diff, significance)
_summarize_diff(diff, len(from_records), stream=stream)
exit_code = (EXIT_SAME
if patch.is_empty(diff)
else EXIT_DIFFERENT)
sys.exit(exit_code)
def _summarize_diff(diff, orig_size, stream=sys.stdout):
if orig_size == 0:
# slightly arbitrary when the original data was empty
orig_size = 1
n_removed = len(diff['removed'])
n_added = len(diff['added'])
n_changed = len(diff['changed'])
if n_removed or n_added or n_changed:
print(u'%d rows removed (%.01f%%)' % (
n_removed, 100 * n_removed / orig_size
), file=stream)
print(u'%d rows added (%.01f%%)' % (
n_added, 100 * n_added / orig_size
), file=stream)
print(u'%d rows changed (%.01f%%)' % (
n_changed, 100 * n_changed / orig_size
), file=stream)
else:
print(u'files are identical', file=stream)
@click.command()
@click.argument('input_csv', type=click.Path(exists=True))
@click.option('--input', '-i', type=click.Path(exists=True),
help='Read the JSON patch from the given file.')
@click.option('--output', '-o', type=click.Path(),
help='Write the transformed CSV to the given file.')
@click.option('--strict/--no-strict', default=True,
help='Whether or not to tolerate a changed source document '
'(default: strict)')
def csvpatch_cmd(input_csv, input=None, output=None, strict=True):
"""
Apply the changes from a csvdiff patch to an existing CSV file.
"""
patch_stream = (sys.stdin
if input is None
else open(input))
tocsv_stream = (sys.stdout
if output is None
else open(output, 'w'))
fromcsv_stream = open(input_csv)
try:
patch_file(patch_stream, fromcsv_stream, tocsv_stream, strict=strict)
except patch.InvalidPatchError as e:
error.abort('reading patch, {0}'.format(e.args[0]))
finally:
patch_stream.close()
fromcsv_stream.close()
tocsv_stream.close()
|
larsyencken/csvdiff
|
csvdiff/__init__.py
|
_diff_and_summarize
|
python
|
def _diff_and_summarize(from_csv, to_csv, index_columns, stream=sys.stdout,
sep=',', ignored_columns=None, significance=None):
from_records = list(records.load(from_csv, sep=sep))
to_records = records.load(to_csv, sep=sep)
diff = patch.create(from_records, to_records, index_columns, ignored_columns)
if significance is not None:
diff = patch.filter_significance(diff, significance)
_summarize_diff(diff, len(from_records), stream=stream)
exit_code = (EXIT_SAME
if patch.is_empty(diff)
else EXIT_DIFFERENT)
sys.exit(exit_code)
|
Print a summary of the difference between the two files.
|
train
|
https://github.com/larsyencken/csvdiff/blob/163dd9da676a8e5f926a935803726340261f03ae/csvdiff/__init__.py#L178-L194
|
[
"def load(file_or_stream: Any, sep: str = ',') -> SafeDictReader:\n istream = (open(file_or_stream)\n if not hasattr(file_or_stream, 'read')\n else file_or_stream)\n return SafeDictReader(istream, sep=sep)\n",
"def create(from_records, to_records, index_columns, ignore_columns=None):\n \"\"\"\n Diff two sets of records, using the index columns as the primary key for\n both datasets.\n \"\"\"\n from_indexed = records.index(from_records, index_columns)\n to_indexed = records.index(to_records, index_columns)\n\n if ignore_columns is not None:\n from_indexed = records.filter_ignored(from_indexed, ignore_columns)\n to_indexed = records.filter_ignored(to_indexed, ignore_columns)\n\n return create_indexed(from_indexed, to_indexed, index_columns)\n",
"def filter_significance(diff, significance):\n \"\"\"\n Prune any changes in the patch which are due to numeric changes less than this level of\n significance.\n \"\"\"\n changed = diff['changed']\n\n # remove individual field changes that are significant\n reduced = [{'key': delta['key'],\n 'fields': {k: v\n for k, v in delta['fields'].items()\n if _is_significant(v, significance)}}\n for delta in changed]\n\n # call a key changed only if it still has significant changes\n filtered = [delta for delta in reduced if delta['fields']]\n\n diff = diff.copy()\n diff['changed'] = filtered\n return diff\n",
"def is_empty(diff):\n \"Are there any actual differences encoded in the delta?\"\n return not any([diff['added'], diff['changed'], diff['removed']])\n",
"def _summarize_diff(diff, orig_size, stream=sys.stdout):\n if orig_size == 0:\n # slightly arbitrary when the original data was empty\n orig_size = 1\n\n n_removed = len(diff['removed'])\n n_added = len(diff['added'])\n n_changed = len(diff['changed'])\n\n if n_removed or n_added or n_changed:\n print(u'%d rows removed (%.01f%%)' % (\n n_removed, 100 * n_removed / orig_size\n ), file=stream)\n print(u'%d rows added (%.01f%%)' % (\n n_added, 100 * n_added / orig_size\n ), file=stream)\n print(u'%d rows changed (%.01f%%)' % (\n n_changed, 100 * n_changed / orig_size\n ), file=stream)\n else:\n print(u'files are identical', file=stream)\n"
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# __init__.py
# csvdiff
#
import sys
from typing.io import TextIO
import io
import click
from . import records, patch, error
__author__ = 'Lars Yencken'
__email__ = 'lars@yencken.org'
__version__ = '0.3.1'
# exit codes for the command-line
EXIT_SAME = 0
EXIT_DIFFERENT = 1
EXIT_ERROR = 2
def diff_files(from_file, to_file, index_columns, sep=',', ignored_columns=None):
"""
Diff two CSV files, returning the patch which transforms one into the
other.
"""
with open(from_file) as from_stream:
with open(to_file) as to_stream:
from_records = records.load(from_stream, sep=sep)
to_records = records.load(to_stream, sep=sep)
return patch.create(from_records, to_records, index_columns,
ignore_columns=ignored_columns)
def diff_records(from_records, to_records, index_columns):
"""
Diff two sequences of dictionary records, returning the patch which
transforms one into the other.
"""
return patch.create(from_records, to_records, index_columns)
def patch_file(patch_stream: TextIO, fromcsv_stream: TextIO, tocsv_stream: TextIO,
strict: bool = True, sep: str = ','):
"""
Apply the patch to the source CSV file, and save the result to the target
file.
"""
diff = patch.load(patch_stream)
from_records = records.load(fromcsv_stream, sep=sep)
to_records = patch.apply(diff, from_records, strict=strict)
# what order should the columns be in?
if to_records:
# have data, use a nice ordering
all_columns = to_records[0].keys()
index_columns = diff['_index']
fieldnames = _nice_fieldnames(all_columns, index_columns)
else:
# no data, use the original order
fieldnames = from_records.fieldnames
records.save(to_records, fieldnames, tocsv_stream)
def patch_records(diff, from_records, strict=True):
"""
Apply the patch to the sequence of records, returning the transformed
records.
"""
return patch.apply(diff, from_records, strict=strict)
def _nice_fieldnames(all_columns, index_columns):
"Indexes on the left, other fields in alphabetical order on the right."
non_index_columns = set(all_columns).difference(index_columns)
return index_columns + sorted(non_index_columns)
class CSVType(click.ParamType):
name = 'csv'
def convert(self, value, param, ctx):
if isinstance(value, bytes):
try:
enc = getattr(sys.stdin, 'encoding', None)
if enc is not None:
value = value.decode(enc)
except UnicodeError:
try:
value = value.decode(sys.getfilesystemencoding())
except UnicodeError:
value = value.decode('utf-8', 'replace')
return value.split(',')
return value.split(',')
def __repr__(self):
return 'CSV'
@click.command()
@click.argument('index_columns', type=CSVType())
@click.argument('from_csv', type=click.Path(exists=True))
@click.argument('to_csv', type=click.Path(exists=True))
@click.option('--style',
type=click.Choice(['compact', 'pretty', 'summary']),
default='compact',
help=('Instead of the default compact output, pretty-print '
'or give a summary instead'))
@click.option('--output', '-o', type=click.Path(),
help='Output to a file instead of stdout')
@click.option('--quiet', '-q', is_flag=True,
help="Don't output anything, just use exit codes")
@click.option('--sep', default=',',
help='Separator to use between fields [default: comma]')
@click.option('--ignore-columns', '-i', type=CSVType(),
help='a comma seperated list of columns to ignore from the comparison')
@click.option('--significance', type=int,
help='Ignore numeric changes less than this number of significant figures')
def csvdiff_cmd(index_columns, from_csv, to_csv, style=None, output=None,
sep=',', quiet=False, ignore_columns=None, significance=None):
"""
Compare two csv files to see what rows differ between them. The files
are each expected to have a header row, and for each row to be uniquely
identified by one or more indexing columns.
"""
if ignore_columns is not None:
for i in ignore_columns:
if i in index_columns:
error.abort("You can't ignore an index column")
ostream = (open(output, 'w') if output
else io.StringIO() if quiet
else sys.stdout)
try:
if style == 'summary':
_diff_and_summarize(from_csv, to_csv, index_columns, ostream,
sep=sep, ignored_columns=ignore_columns,
significance=significance)
else:
compact = (style == 'compact')
_diff_files_to_stream(from_csv, to_csv, index_columns, ostream,
compact=compact, sep=sep, ignored_columns=ignore_columns,
significance=significance)
except records.InvalidKeyError as e:
error.abort(e.args[0])
finally:
ostream.close()
def _diff_files_to_stream(from_csv, to_csv, index_columns, ostream,
compact=False, sep=',', ignored_columns=None,
significance=None):
diff = diff_files(from_csv, to_csv, index_columns, sep=sep, ignored_columns=ignored_columns)
if significance is not None:
diff = patch.filter_significance(diff, significance)
patch.save(diff, ostream, compact=compact)
exit_code = (EXIT_SAME
if patch.is_empty(diff)
else EXIT_DIFFERENT)
sys.exit(exit_code)
def _summarize_diff(diff, orig_size, stream=sys.stdout):
if orig_size == 0:
# slightly arbitrary when the original data was empty
orig_size = 1
n_removed = len(diff['removed'])
n_added = len(diff['added'])
n_changed = len(diff['changed'])
if n_removed or n_added or n_changed:
print(u'%d rows removed (%.01f%%)' % (
n_removed, 100 * n_removed / orig_size
), file=stream)
print(u'%d rows added (%.01f%%)' % (
n_added, 100 * n_added / orig_size
), file=stream)
print(u'%d rows changed (%.01f%%)' % (
n_changed, 100 * n_changed / orig_size
), file=stream)
else:
print(u'files are identical', file=stream)
@click.command()
@click.argument('input_csv', type=click.Path(exists=True))
@click.option('--input', '-i', type=click.Path(exists=True),
help='Read the JSON patch from the given file.')
@click.option('--output', '-o', type=click.Path(),
help='Write the transformed CSV to the given file.')
@click.option('--strict/--no-strict', default=True,
help='Whether or not to tolerate a changed source document '
'(default: strict)')
def csvpatch_cmd(input_csv, input=None, output=None, strict=True):
"""
Apply the changes from a csvdiff patch to an existing CSV file.
"""
patch_stream = (sys.stdin
if input is None
else open(input))
tocsv_stream = (sys.stdout
if output is None
else open(output, 'w'))
fromcsv_stream = open(input_csv)
try:
patch_file(patch_stream, fromcsv_stream, tocsv_stream, strict=strict)
except patch.InvalidPatchError as e:
error.abort('reading patch, {0}'.format(e.args[0]))
finally:
patch_stream.close()
fromcsv_stream.close()
tocsv_stream.close()
|
larsyencken/csvdiff
|
csvdiff/__init__.py
|
csvpatch_cmd
|
python
|
def csvpatch_cmd(input_csv, input=None, output=None, strict=True):
patch_stream = (sys.stdin
if input is None
else open(input))
tocsv_stream = (sys.stdout
if output is None
else open(output, 'w'))
fromcsv_stream = open(input_csv)
try:
patch_file(patch_stream, fromcsv_stream, tocsv_stream, strict=strict)
except patch.InvalidPatchError as e:
error.abort('reading patch, {0}'.format(e.args[0]))
finally:
patch_stream.close()
fromcsv_stream.close()
tocsv_stream.close()
|
Apply the changes from a csvdiff patch to an existing CSV file.
|
train
|
https://github.com/larsyencken/csvdiff/blob/163dd9da676a8e5f926a935803726340261f03ae/csvdiff/__init__.py#L229-L250
|
[
"def patch_file(patch_stream: TextIO, fromcsv_stream: TextIO, tocsv_stream: TextIO,\n strict: bool = True, sep: str = ','):\n \"\"\"\n Apply the patch to the source CSV file, and save the result to the target\n file.\n \"\"\"\n diff = patch.load(patch_stream)\n\n from_records = records.load(fromcsv_stream, sep=sep)\n to_records = patch.apply(diff, from_records, strict=strict)\n\n # what order should the columns be in?\n if to_records:\n # have data, use a nice ordering\n all_columns = to_records[0].keys()\n index_columns = diff['_index']\n fieldnames = _nice_fieldnames(all_columns, index_columns)\n else:\n # no data, use the original order\n fieldnames = from_records.fieldnames\n\n records.save(to_records, fieldnames, tocsv_stream)\n",
"def abort(message=None):\n if DEBUG:\n raise FatalError(message)\n\n print('ERROR: {0}'.format(message), file=sys.stderr)\n sys.exit(2)\n"
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# __init__.py
# csvdiff
#
import sys
from typing.io import TextIO
import io
import click
from . import records, patch, error
__author__ = 'Lars Yencken'
__email__ = 'lars@yencken.org'
__version__ = '0.3.1'
# exit codes for the command-line
EXIT_SAME = 0
EXIT_DIFFERENT = 1
EXIT_ERROR = 2
def diff_files(from_file, to_file, index_columns, sep=',', ignored_columns=None):
"""
Diff two CSV files, returning the patch which transforms one into the
other.
"""
with open(from_file) as from_stream:
with open(to_file) as to_stream:
from_records = records.load(from_stream, sep=sep)
to_records = records.load(to_stream, sep=sep)
return patch.create(from_records, to_records, index_columns,
ignore_columns=ignored_columns)
def diff_records(from_records, to_records, index_columns):
"""
Diff two sequences of dictionary records, returning the patch which
transforms one into the other.
"""
return patch.create(from_records, to_records, index_columns)
def patch_file(patch_stream: TextIO, fromcsv_stream: TextIO, tocsv_stream: TextIO,
strict: bool = True, sep: str = ','):
"""
Apply the patch to the source CSV file, and save the result to the target
file.
"""
diff = patch.load(patch_stream)
from_records = records.load(fromcsv_stream, sep=sep)
to_records = patch.apply(diff, from_records, strict=strict)
# what order should the columns be in?
if to_records:
# have data, use a nice ordering
all_columns = to_records[0].keys()
index_columns = diff['_index']
fieldnames = _nice_fieldnames(all_columns, index_columns)
else:
# no data, use the original order
fieldnames = from_records.fieldnames
records.save(to_records, fieldnames, tocsv_stream)
def patch_records(diff, from_records, strict=True):
"""
Apply the patch to the sequence of records, returning the transformed
records.
"""
return patch.apply(diff, from_records, strict=strict)
def _nice_fieldnames(all_columns, index_columns):
"Indexes on the left, other fields in alphabetical order on the right."
non_index_columns = set(all_columns).difference(index_columns)
return index_columns + sorted(non_index_columns)
class CSVType(click.ParamType):
name = 'csv'
def convert(self, value, param, ctx):
if isinstance(value, bytes):
try:
enc = getattr(sys.stdin, 'encoding', None)
if enc is not None:
value = value.decode(enc)
except UnicodeError:
try:
value = value.decode(sys.getfilesystemencoding())
except UnicodeError:
value = value.decode('utf-8', 'replace')
return value.split(',')
return value.split(',')
def __repr__(self):
return 'CSV'
@click.command()
@click.argument('index_columns', type=CSVType())
@click.argument('from_csv', type=click.Path(exists=True))
@click.argument('to_csv', type=click.Path(exists=True))
@click.option('--style',
type=click.Choice(['compact', 'pretty', 'summary']),
default='compact',
help=('Instead of the default compact output, pretty-print '
'or give a summary instead'))
@click.option('--output', '-o', type=click.Path(),
help='Output to a file instead of stdout')
@click.option('--quiet', '-q', is_flag=True,
help="Don't output anything, just use exit codes")
@click.option('--sep', default=',',
help='Separator to use between fields [default: comma]')
@click.option('--ignore-columns', '-i', type=CSVType(),
help='a comma seperated list of columns to ignore from the comparison')
@click.option('--significance', type=int,
help='Ignore numeric changes less than this number of significant figures')
def csvdiff_cmd(index_columns, from_csv, to_csv, style=None, output=None,
sep=',', quiet=False, ignore_columns=None, significance=None):
"""
Compare two csv files to see what rows differ between them. The files
are each expected to have a header row, and for each row to be uniquely
identified by one or more indexing columns.
"""
if ignore_columns is not None:
for i in ignore_columns:
if i in index_columns:
error.abort("You can't ignore an index column")
ostream = (open(output, 'w') if output
else io.StringIO() if quiet
else sys.stdout)
try:
if style == 'summary':
_diff_and_summarize(from_csv, to_csv, index_columns, ostream,
sep=sep, ignored_columns=ignore_columns,
significance=significance)
else:
compact = (style == 'compact')
_diff_files_to_stream(from_csv, to_csv, index_columns, ostream,
compact=compact, sep=sep, ignored_columns=ignore_columns,
significance=significance)
except records.InvalidKeyError as e:
error.abort(e.args[0])
finally:
ostream.close()
def _diff_files_to_stream(from_csv, to_csv, index_columns, ostream,
compact=False, sep=',', ignored_columns=None,
significance=None):
diff = diff_files(from_csv, to_csv, index_columns, sep=sep, ignored_columns=ignored_columns)
if significance is not None:
diff = patch.filter_significance(diff, significance)
patch.save(diff, ostream, compact=compact)
exit_code = (EXIT_SAME
if patch.is_empty(diff)
else EXIT_DIFFERENT)
sys.exit(exit_code)
def _diff_and_summarize(from_csv, to_csv, index_columns, stream=sys.stdout,
sep=',', ignored_columns=None, significance=None):
"""
Print a summary of the difference between the two files.
"""
from_records = list(records.load(from_csv, sep=sep))
to_records = records.load(to_csv, sep=sep)
diff = patch.create(from_records, to_records, index_columns, ignored_columns)
if significance is not None:
diff = patch.filter_significance(diff, significance)
_summarize_diff(diff, len(from_records), stream=stream)
exit_code = (EXIT_SAME
if patch.is_empty(diff)
else EXIT_DIFFERENT)
sys.exit(exit_code)
def _summarize_diff(diff, orig_size, stream=sys.stdout):
if orig_size == 0:
# slightly arbitrary when the original data was empty
orig_size = 1
n_removed = len(diff['removed'])
n_added = len(diff['added'])
n_changed = len(diff['changed'])
if n_removed or n_added or n_changed:
print(u'%d rows removed (%.01f%%)' % (
n_removed, 100 * n_removed / orig_size
), file=stream)
print(u'%d rows added (%.01f%%)' % (
n_added, 100 * n_added / orig_size
), file=stream)
print(u'%d rows changed (%.01f%%)' % (
n_changed, 100 * n_changed / orig_size
), file=stream)
else:
print(u'files are identical', file=stream)
@click.command()
@click.argument('input_csv', type=click.Path(exists=True))
@click.option('--input', '-i', type=click.Path(exists=True),
help='Read the JSON patch from the given file.')
@click.option('--output', '-o', type=click.Path(),
help='Write the transformed CSV to the given file.')
@click.option('--strict/--no-strict', default=True,
help='Whether or not to tolerate a changed source document '
'(default: strict)')
|
larsyencken/csvdiff
|
csvdiff/records.py
|
sort
|
python
|
def sort(records: Sequence[Record]) -> List[Record]:
"Sort records into a canonical order, suitable for comparison."
return sorted(records, key=_record_key)
|
Sort records into a canonical order, suitable for comparison.
|
train
|
https://github.com/larsyencken/csvdiff/blob/163dd9da676a8e5f926a935803726340261f03ae/csvdiff/records.py#L86-L88
| null |
# -*- coding: utf-8 -*-
#
# records.py
# csvdiff
#
from typing.io import TextIO
from typing import Any, Dict, Tuple, Iterator, List, Sequence
import csv
import sys
from . import error
Column = str
PrimaryKey = Tuple[str, ...]
Record = Dict[Column, Any]
Index = Dict[PrimaryKey, Record]
class InvalidKeyError(Exception):
pass
class SafeDictReader:
"""
A CSV reader that streams records but gives nice errors if lines fail to parse.
"""
def __init__(self, istream: TextIO, sep: str = ',') -> None:
# bump the built-in limits on field sizes
csv.field_size_limit(2**24)
self.reader = csv.DictReader(istream, delimiter=sep)
def __iter__(self) -> Iterator[Record]:
for lineno, r in enumerate(self.reader, 2):
if any(k is None for k in r):
error.abort('CSV parse error on line {}'.format(lineno))
yield dict(r)
@property
def fieldnames(self):
return self.reader._fieldnames
def load(file_or_stream: Any, sep: str = ',') -> SafeDictReader:
istream = (open(file_or_stream)
if not hasattr(file_or_stream, 'read')
else file_or_stream)
return SafeDictReader(istream, sep=sep)
def index(record_seq: Iterator[Record], index_columns: List[str]) -> Index:
if not index_columns:
raise InvalidKeyError('must provide on or more columns to index on')
try:
obj = {
tuple(r[i] for i in index_columns): r
for r in record_seq
}
return obj
except KeyError as k:
raise InvalidKeyError('invalid column name {k} as key'.format(k=k))
def filter_ignored(index: Index, ignore_columns: List[Column]) -> Index:
for record in index.values():
# edit the record in-place
for column in ignore_columns:
del record[column]
return index
def save(records: Sequence[Record], fieldnames: List[Column], ostream: TextIO):
writer = csv.DictWriter(ostream, fieldnames)
writer.writeheader()
for r in records:
writer.writerow(r)
def _record_key(record: Record) -> List[Tuple[Column, str]]:
"An orderable representation of this record."
return sorted(record.items())
|
larsyencken/csvdiff
|
csvdiff/records.py
|
_record_key
|
python
|
def _record_key(record: Record) -> List[Tuple[Column, str]]:
"An orderable representation of this record."
return sorted(record.items())
|
An orderable representation of this record.
|
train
|
https://github.com/larsyencken/csvdiff/blob/163dd9da676a8e5f926a935803726340261f03ae/csvdiff/records.py#L91-L93
| null |
# -*- coding: utf-8 -*-
#
# records.py
# csvdiff
#
from typing.io import TextIO
from typing import Any, Dict, Tuple, Iterator, List, Sequence
import csv
import sys
from . import error
Column = str
PrimaryKey = Tuple[str, ...]
Record = Dict[Column, Any]
Index = Dict[PrimaryKey, Record]
class InvalidKeyError(Exception):
pass
class SafeDictReader:
"""
A CSV reader that streams records but gives nice errors if lines fail to parse.
"""
def __init__(self, istream: TextIO, sep: str = ',') -> None:
# bump the built-in limits on field sizes
csv.field_size_limit(2**24)
self.reader = csv.DictReader(istream, delimiter=sep)
def __iter__(self) -> Iterator[Record]:
for lineno, r in enumerate(self.reader, 2):
if any(k is None for k in r):
error.abort('CSV parse error on line {}'.format(lineno))
yield dict(r)
@property
def fieldnames(self):
return self.reader._fieldnames
def load(file_or_stream: Any, sep: str = ',') -> SafeDictReader:
istream = (open(file_or_stream)
if not hasattr(file_or_stream, 'read')
else file_or_stream)
return SafeDictReader(istream, sep=sep)
def index(record_seq: Iterator[Record], index_columns: List[str]) -> Index:
if not index_columns:
raise InvalidKeyError('must provide on or more columns to index on')
try:
obj = {
tuple(r[i] for i in index_columns): r
for r in record_seq
}
return obj
except KeyError as k:
raise InvalidKeyError('invalid column name {k} as key'.format(k=k))
def filter_ignored(index: Index, ignore_columns: List[Column]) -> Index:
for record in index.values():
# edit the record in-place
for column in ignore_columns:
del record[column]
return index
def save(records: Sequence[Record], fieldnames: List[Column], ostream: TextIO):
writer = csv.DictWriter(ostream, fieldnames)
writer.writeheader()
for r in records:
writer.writerow(r)
def sort(records: Sequence[Record]) -> List[Record]:
"Sort records into a canonical order, suitable for comparison."
return sorted(records, key=_record_key)
|
rhelmot/nclib
|
nclib/process.py
|
Process.launch
|
python
|
def launch(program, sock, stderr=True, cwd=None, env=None):
if stderr is True:
err = sock # redirect to socket
elif stderr is False:
err = open(os.devnull, 'wb') # hide
elif stderr is None:
err = None # redirect to console
p = subprocess.Popen(program,
shell=type(program) not in (list, tuple),
stdin=sock, stdout=sock, stderr=err,
cwd=cwd, env=env,
close_fds=True)
sock.close()
return p
|
A static method for launching a process that is connected to a given
socket. Same rules from the Process constructor apply.
|
train
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/process.py#L85-L104
| null |
class Process(Netcat):
"""
A mechanism for launching a local process and interacting with it
programatically. This class is a subclass of the basic `Netcat` object so
you may use any method from that class to interact with the process you've
launched!
:param program: The program to launch. Can be either a list of strings,
in which case those strings will become the program
argv, or a single string, in which case the shell will
be used to launch the program.
:param stderr: How the program's stderr stream should behave. True
(default) will redirect stderr to the output socket,
unifying it with stdout. False will redirect it to
/dev/null. None will not touch it, causing it to appear
on your terminal.
:param cwd: The working directory to execute the program in
:param env: The environment to execute the program in, as a
dictionary
:param protocol: The socket protocol to use. 'tcp' by default, can also
be 'udp'
Any additional keyword arguments will be passed to the constructor of
Netcat.
WARNING: If you provide a string and not a list as the description for the
program to launch, then the pid we know about will be associated with the
shell that launches the program, not the program itself.
*Example:* Launch the `cat` process and send it a greeting. Print out its
response. Close the socket and the process exits with status 0.
>>> from nclib import Process
>>> cat = Process('cat')
>>> cat.send('Hello world!')
>>> print(cat.recv())
b'Hello world!'
>>> cat.close()
>>> print(cat.poll())
0
"""
def __init__(self, program,
protocol='tcp',
stderr=True,
cwd=None,
env=None,
**kwargs):
x, y = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM if protocol == 'tcp' else socket.SOCK_DGRAM)
self._subprocess = self.launch(program, y, stderr=stderr, cwd=cwd, env=env)
self.pid = self._subprocess.pid
super(Process, self).__init__(sock=x, server='local program %s' % program, **kwargs)
def poll(self):
"""
Return the exit code of the proces, or None if it has not exited.
"""
return self._subprocess.poll()
def wait(self):
"""
Wait for the process to exit and return its exit code.
"""
return self._subprocess.wait()
def send_signal(self, sig):
"""
Send the signal `sig` to the process.
"""
return self._subprocess.send_signal(sig)
def kill(self):
"""
Terminate the process.
"""
return self._subprocess.kill()
@staticmethod
|
rhelmot/nclib
|
nclib/server.py
|
UDPServer.respond
|
python
|
def respond(self, packet, peer, flags=0):
self.sock.sendto(packet, flags, peer)
|
Send a message back to a peer.
:param packet: The data to send
:param peer: The address to send to, as a tuple (host, port)
:param flags: Any sending flags you want to use for some reason
|
train
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/server.py#L75-L83
| null |
class UDPServer(object):
"""
A simple UDP server model. Iterating over it will yield of tuples of
datagrams and peer addresses. To respond, use the respond method, which
takes the response and the peer address.
:param bindto: The address to bind to, a tuple (host, port)
:param dgram_size: The size of the datagram to receive. This is
important! If you send a message longer than the
receiver's receiving size, the rest of the message
will be silently lost! Default is 4096.
Here is a simple echo server example:
>>> from nclib import UDPServer
>>> server = UDPServer(('0.0.0.0', 1337))
>>> for message, peer in server:
... server.respond(message, peer) # or submit to a thread pool for async handling...
"""
def __init__(self, bindto, dgram_size=4096):
self.addr = bindto
self.dgram_size = dgram_size
self.sock = socket.socket(type=socket.SOCK_DGRAM)
def __iter__(self):
while True:
packet, peer = self.sock.recvfrom(self.dgram_size)
yield packet, peer
def close(self):
"""
Tear down this server and release its resources
"""
return self.sock.close()
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat._parse_target
|
python
|
def _parse_target(target, listen, udp, ipv6):
if isinstance(target, str):
if target.startswith('nc '):
out_host = None
out_port = None
try:
opts, pieces = getopt.getopt(target.split()[1:], 'u46lp:',
[])
except getopt.GetoptError as exc:
raise ValueError(exc)
for opt, arg in opts:
if opt == '-u':
udp = True
elif opt == '-4':
ipv6 = False
elif opt == '-6':
ipv6 = True
elif opt == '-l':
listen = True
elif opt == '-p':
out_port = int(arg)
else:
assert False, "unhandled option"
if not pieces:
pass
elif len(pieces) == 1:
if listen and pieces[0].isdigit():
out_port = int(pieces[0])
else:
out_host = pieces[0]
elif len(pieces) == 2 and pieces[1].isdigit():
out_host = pieces[0]
out_port = int(pieces[1])
else:
raise ValueError("Bad cmdline: %s" % target)
if out_host is None:
if listen:
out_host = '::' if ipv6 else '0.0.0.0'
else:
raise ValueError("Missing address: %s" % target)
if out_port is None:
raise ValueError("Missing port: %s" % target)
if _is_ipv6_addr(out_host):
ipv6 = True
return (out_host, out_port), listen, udp, ipv6
elif PROTOCAL_RE.match(target) is not None:
parsed = urlparse(target)
port = None
try:
scheme_udp, scheme_ipv6, scheme_port = KNOWN_SCHEMES[parsed.scheme]
except KeyError:
raise ValueError("Unknown scheme: %s" % parsed.scheme)
if scheme_udp is not None:
udp = scheme_udp
if scheme_ipv6 is not None:
ipv6 = scheme_ipv6
if scheme_port is not None:
port = scheme_port
if parsed.netloc.startswith('['):
addr, extra = parsed.netloc[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
if ':' in parsed.netloc:
addr, port = parsed.netloc.split(':', 1)
port = int(port)
else:
addr = parsed.netloc
if addr is None or port is None:
raise ValueError("Can't parse addr/port from %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
else:
if target.startswith('['):
addr, extra = target[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
port = None
else:
if ':' in target:
addr, port = target.split(':', 1)
port = int(port)
else:
addr = target
port = None
if port is None:
raise ValueError("No port given: %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
elif isinstance(target, (int, long)):
if listen:
out_port = target
else:
raise ValueError("Can't deal with number as connection address")
return ('::' if ipv6 else '0.0.0.0', out_port), listen, udp, ipv6
elif isinstance(target, tuple):
if len(target) >= 1 and isinstance(target[0], str) and _is_ipv6_addr(target[0]):
ipv6 = True
return target, listen, udp, ipv6
else:
raise ValueError("Can't parse target: %r" % target)
|
Takes the basic version of the user args and extract as much data as
possible from target. Returns a tuple that is its arguments but
sanitized.
|
train
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L232-L361
|
[
"def _is_ipv6_addr(addr):\n try:\n socket.inet_pton(socket.AF_INET6, addr)\n except socket.error:\n return False\n else:\n return True\n"
] |
class Netcat(object):
"""
This is the main class you will use to interact with a peer over the
network! You may instanciate this class to either connect to a server or
listen for a one-off client.
One of the following must be passed in order to initialize a Netcat
object:
:param connect: the address/port to connect to
:param listen: the address/port to bind to for listening
:param sock: a python socket or pipe object to wrap
For ``connect`` and ``listen``, they accept basically any argument format
known to mankind. If you find an input format you think would be useful but
isn't accepted, let me know :P
Additionally, the following options modify the behavior of the object:
:param sock_send: If this is specified, this Netcat object will act
as a multiplexer/demultiplexer, using the "normal"
channel for receiving and this channel for sending.
This should be specified as a python socket or pipe
object.
.. warning:: Using ``sock_send`` will cause issues if
you pass this object into a context which
expects to be able to use its
``.fileno()``.
:param udp: Set to True to use udp connections when using the
connect or listen parameters
:param ipv6: Force using ipv6 when using the connect or listen
parameters
:param verbose: Set to True to log data sent/received. The echo_*
properties on this object can be tweaked to
describe exactly what you want logged.
:param log_send: Pass a file-like object open for writing and all
data sent over the socket will be written to it.
:param log_recv: Pass a file-like object open for writing and all
data recieved from the socket will be written to it.
:param raise_timeout:
Whether to raise a NetcatTimeout exception when a
timeout is received. The default is to return the
empty string and set self.timed_out = True
:param retry: Whether to continuously retry establishing a
connection if it fails.
:param log_yield: Control when logging messages are generated on
recv. By default, logging is done when data is
received from the socket, and may be buffered.
By setting this to true, logging is done when data
is yielded to the user, either directly from the
socket or from a buffer.
Any data that is extracted from the target address will override the
options specified here. For example, a url with the ``http:// scheme``
will go over tcp and port 80.
Some properties that may be tweaked to change the logging behavior:
- nc.echo_headers controls whether to print a header describing each
network operation before the data (True)
- nc.echo_perline controls whether the data should be split on newlines
for logging (True)
- nc.echo_sending controls whether to log data on send (True)
- nc.echo_recving controls whether to log data on recv (True)
- nc.echo_hex controls whether to log data hex-encoded (False)
- nc.echo_send_prefix controls a prefix to print before each logged
line of sent data ('>> ')
- nc.echo_recv_prefix controls a prefix to print before each logged
line of received data ('<< ')
Note that these settings ONLY affect the console logging triggered by
the verbose parameter. They don't do anything to the logging triggered
by `log_send` and `log_recv`, which are meant to provide pristine
untouched records of network traffic.
*Example 1:* Send a greeting to a UDP server listening at 192.168.3.6:8888
and log the response as hex:
>>> nc = nclib.Netcat(('192.168.3.6', 8888), udp=True, verbose=True)
>>> nc.echo_hex = True
>>> nc.send(b'\\x00\\x0dHello, world!')
======== Sending (15) ========
>> 00 0D 48 65 6C 6C 6F 2C 20 77 6F 72 6C 64 21 |..Hello, world! |
>>> nc.recv()
======== Receiving 4096B or until timeout (default) ========
<< 00 57 68 65 6C 6C 6F 20 66 72 69 65 6E 64 2E 20 |.Whello friend. |
<< 74 69 6D 65 20 69 73 20 73 68 6F 72 74 2E 20 70 |time is short. p|
<< 6C 65 61 73 65 20 64 6F 20 6E 6F 74 20 77 6F 72 |lease do not wor|
<< 72 79 2C 20 79 6F 75 20 77 69 6C 6C 20 66 69 6E |ry, you will fin|
<< 64 20 79 6F 75 72 20 77 61 79 2E 20 62 75 74 20 |d your way. but |
<< 64 6F 20 68 75 72 72 79 2E |do hurry. |
*Example 2:* Listen for a local TCP connection on port 1234, allow the user
to interact with the client. Log the entire interaction to log.txt.
>>> logfile = open('log.txt', 'wb')
>>> nc = nclib.Netcat(listen=('localhost', 1234), log_send=logfile, log_recv=logfile)
>>> nc.interact()
"""
def __init__(self,
connect=None,
sock=None,
listen=None,
server=None,
sock_send=None,
udp=False,
ipv6=False,
verbose=0,
log_send=None,
log_recv=None,
raise_timeout=False,
retry=False,
log_yield=False):
self.buf = b''
self.verbose = verbose
self.log_send = log_send
self.log_recv = log_recv
self.log_yield = log_yield
self.echo_headers = True
self.echo_perline = True
self.echo_sending = True
self.echo_recving = True
self.echo_hex = False
self.echo_send_prefix = '>> '
self.echo_recv_prefix = '<< '
self.sock = None
self._sock_send = sock_send
self.peer = None
# case: Netcat(host, port)
if isinstance(connect, str) and isinstance(listen, int):
connect = (connect, listen)
# case: Netcat(sock)
if isinstance(connect, socket.socket):
sock = connect
connect = None
# deprecated server kwarg
if server is not None:
connect = server
if sock is None and listen is None and connect is None:
raise ValueError('Not enough arguments, need at least an '
'address or a socket or a listening address!')
## we support passing connect as the "name" of the socket
#if sock is not None and (listen is not None or connect is not None):
# raise ValueError("connect or listen arguments may not be "
# "provided if sock is provided")
if listen is not None and connect is not None:
raise ValueError("connect and listen arguments cannot be provided at the same time")
if sock is None:
if listen is not None:
target = listen
listen = True
else:
target = connect
listen = False
target, listen, udp, ipv6 = self._parse_target(target, listen, udp, ipv6)
self._connect(target, listen, udp, ipv6, retry)
else:
self.sock = sock
self.peer = connect
try:
self._timeout = self.sock.gettimeout()
except AttributeError:
self._timeout = None
self.timed_out = False # set when an operation times out
self._raise_timeout = raise_timeout
@property
def sock_send(self):
if self._sock_send is None:
return self.sock
else:
return self._sock_send
@sock_send.setter
def sock_send(self, val):
self._sock_send = val
@staticmethod
def _connect(self, target, listen, udp, ipv6, retry):
"""
Takes target/listen/udp/ipv6 and sets self.sock and self.peer
"""
ty = socket.SOCK_DGRAM if udp else socket.SOCK_STREAM
fam = socket.AF_INET6 if ipv6 else socket.AF_INET
self.sock = socket.socket(fam, ty)
if listen:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(target)
if not udp:
self.sock.listen(1)
conn, addr = self.sock.accept()
self.sock.close()
self.sock = conn
self.peer = addr
else:
self.buf, self.peer = self.sock.recvfrom(1024)
self.sock.connect(self.peer)
self._log_recv(self.buf, False)
if self.verbose:
self._print_verbose('Connection from %s accepted' % str(self.peer))
else:
while True:
try:
self.sock.connect(target)
except (socket.gaierror, socket.herror) as exc:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
except socket.error as exc:
if retry:
time.sleep(0.2)
else:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
else:
break
self.peer = target
def close(self):
"""
Close the socket.
"""
if self._sock_send is not None:
self._sock_send.close()
return self.sock.close()
# inconsistent between sockets and files. support both
@property
def closed(self):
return self._closed
@property
def _closed(self):
if hasattr(self.sock_send, 'closed'):
return self.sock_send.closed
elif hasattr(self.sock_send, '_closed'):
return self.sock_send._closed
else:
return False # ???
def shutdown(self, how=socket.SHUT_RDWR):
"""
Send a shutdown signal for both reading and writing, or whatever
socket.SHUT_* constant you like.
Shutdown differs from closing in that it explicitly changes the state of
the socket resource to closed, whereas closing will only decrement the
number of peers on this end of the socket, since sockets can be a
resource shared by multiple peers on a single OS. When the number of
peers reaches zero, the socket is closed, but not deallocated, so you
still need to call close. (except that this is python and close is
automatically called on the deletion of the socket)
http://stackoverflow.com/questions/409783/socket-shutdown-vs-socket-close
"""
if self._sock_send is not None:
self._sock_send.shutdown(how)
return self.sock.shutdown(how)
def shutdown_rd(self):
"""
Send a shutdown signal for reading - you may no longer read from this
socket.
"""
if self._sock_send is not None:
self.sock.close()
else:
return self.shutdown(socket.SHUT_RD)
def shutdown_wr(self):
"""
Send a shutdown signal for writing - you may no longer write to this
socket.
"""
if self._sock_send is not None:
self._sock_send.close()
else:
return self.shutdown(socket.SHUT_WR)
def fileno(self):
"""
Return the file descriptor associated with this socket
"""
if self._sock_send is not None:
raise UserWarning("Calling fileno when there are in fact two filenos")
return self.sock.fileno()
def _print_verbose(self, s):
assert isinstance(s, str), "s should be str"
sys.stdout.write(s + '\n')
def _print_header(self, header):
if self.verbose and self.echo_headers:
self._print_verbose(header)
def _print_recv_header(self, fmt, timeout, *args):
if self.verbose and self.echo_headers:
if timeout == 'default':
timeout = self._timeout
if timeout is not None:
timeout_text = ' or until timeout ({0})'.format(timeout)
else:
timeout_text = ''
self._print_verbose(fmt.format(*args, timeout_text=timeout_text))
def _log_something(self, data, prefix):
if self.echo_perline:
if self.echo_hex:
self._print_hex_lines(data, prefix)
else:
self._print_lines(data, prefix)
else:
if self.echo_hex:
if hasattr(data, 'hex'):
self._print_verbose(prefix + data.hex())
else:
self._print_verbose(prefix + data.encode('hex'))
else:
self._print_verbose(prefix + str(data))
def _log_recv(self, data, yielding):
if yielding == self.log_yield:
if self.verbose and self.echo_recving:
self._log_something(data, self.echo_recv_prefix)
if self.log_recv:
self.log_recv.write(data)
def _log_send(self, data):
if self.verbose and self.echo_sending:
self._log_something(data, self.echo_send_prefix)
if self.log_send:
self.log_send.write(data)
def _print_lines(self, s, prefix):
for line in s.split(b'\n'):
self._print_verbose(prefix + str(line))
@staticmethod
def _to_spaced_hex(s):
if isinstance(s, str):
return ' '.join('%02X' % ord(a) for a in s)
if isinstance(s, bytes):
return ' '.join('%02X' % a for a in s)
raise TypeError('expected str or bytes instance')
@staticmethod
def _to_printable_str(s):
if isinstance(s, str):
return ''.join(a if ' ' <= a <= '~' else '.' for a in s)
if isinstance(s, bytes):
return ''.join(chr(a) if ord(' ') <= a <= ord('~') else '.' for a in s)
raise TypeError('expected str or bytes instance')
def _print_hex_lines(self, s, prefix):
for i in range(0, len(s), 16):
block = s[i:i+16]
spaced_hex = self._to_spaced_hex(block)
printable_str = self._to_printable_str(block)
self._print_verbose('%s%-47s |%-16s|' % (prefix, spaced_hex, printable_str))
def settimeout(self, timeout):
"""
Set the default timeout in seconds to use for subsequent socket
operations
"""
self._timeout = timeout
self._settimeout(timeout)
def _send(self, data):
if hasattr(self.sock_send, 'send'):
return self.sock_send.send(data)
elif hasattr(self.sock_send, 'write'):
return self.sock_send.write(data) # pylint: disable=no-member
else:
raise ValueError("I don't know how to write to this stream!")
def _recv(self, size):
if hasattr(self.sock, 'recv'):
return self.sock.recv(size)
elif hasattr(self.sock, 'read'):
return self.sock.read(size) # pylint: disable=no-member
else:
raise ValueError("I don't know how to read from this stream!")
def _recv_predicate(self, predicate, timeout='default', raise_eof=True):
"""
Receive until predicate returns a positive integer.
The returned number is the size to return.
"""
if timeout == 'default':
timeout = self._timeout
self.timed_out = False
start = time.time()
try:
while True:
cut_at = predicate(self.buf)
if cut_at > 0:
break
if timeout is not None:
time_elapsed = time.time() - start
if time_elapsed > timeout:
raise socket.timeout
self._settimeout(timeout - time_elapsed)
data = self._recv(4096)
self._log_recv(data, False)
self.buf += data
if not data:
if raise_eof:
raise NetcatError("Connection dropped!")
cut_at = len(self.buf)
break
except KeyboardInterrupt:
self._print_header('\n======== Connection interrupted! ========')
raise
except socket.timeout:
self.timed_out = True
if self._raise_timeout:
raise NetcatTimeout()
return b''
except socket.error as exc:
raise NetcatError('Socket error: %r' % exc)
self._settimeout(self._timeout)
ret = self.buf[:cut_at]
self.buf = self.buf[cut_at:]
self._log_recv(ret, True)
return ret
def _settimeout(self, timeout):
"""
Internal method - catches failures when working with non-timeoutable
streams, like files
"""
try:
self.sock.settimeout(timeout)
except AttributeError:
pass
def gettimeout(self):
"""
Retrieve the timeout currently associated with the socket
"""
return self._timeout
def flush(self):
# no buffering
pass
def recv(self, n=4096, timeout='default'):
"""
Receive at most n bytes (default 4096) from the socket
Aliases: read, get
"""
self._print_recv_header(
'======== Receiving {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: min(n, len(s)), timeout)
def recv_until(self, s, max_size=None, timeout='default'):
"""
Recieve data from the socket until the given substring is observed.
Data in the same datagram as the substring, following the substring,
will not be returned and will be cached for future receives.
Aliases: read_until, readuntil, recvuntil
"""
self._print_recv_header(
'======== Receiving until {0}{timeout_text} ========', timeout, repr(s))
if max_size is None:
max_size = 2 ** 62
def _predicate(buf):
try:
return min(buf.index(s) + len(s), max_size)
except ValueError:
return 0 if len(buf) < max_size else max_size
return self._recv_predicate(_predicate, timeout)
def recv_all(self, timeout='default'):
"""
Return all data recieved until connection closes.
Aliases: read_all, readall, recvall
"""
self._print_recv_header('======== Receiving until close{timeout_text} ========', timeout)
return self._recv_predicate(lambda s: 0, timeout, raise_eof=False)
def recv_exactly(self, n, timeout='default'):
"""
Recieve exactly n bytes
Aliases: read_exactly, readexactly, recvexactly
"""
self._print_recv_header(
'======== Receiving until exactly {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: n if len(s) >= n else 0, timeout)
def send(self, s):
"""
Sends all the given data to the socket.
Aliases: write, put, sendall, send_all
"""
self._print_header('======== Sending ({0}) ========'.format(len(s)))
self._log_send(s)
out = len(s)
while s:
s = s[self._send(s):]
return out
def interact(self, insock=sys.stdin, outsock=sys.stdout):
"""
Connects the socket to the terminal for user interaction.
Alternate input and output files may be specified.
This method cannot be used with a timeout.
Aliases: interactive, interaction
"""
self._print_header('======== Beginning interactive session ========')
if hasattr(outsock, 'buffer'):
outsock = outsock.buffer # pylint: disable=no-member
self.timed_out = False
save_verbose = self.verbose
self.verbose = 0
try:
if self.buf:
outsock.write(self.buf)
outsock.flush()
self.buf = b''
while True:
readable_socks = select(self.sock, insock)
for readable in readable_socks:
if readable is insock:
data = os.read(insock.fileno(), 4096)
self.send(data)
if not data:
raise NetcatError
else:
data = self.recv(timeout=None)
outsock.write(data)
outsock.flush()
if not data:
raise NetcatError
except KeyboardInterrupt:
self.verbose = save_verbose
self._print_header('\n======== Connection interrupted! ========')
raise
except (socket.error, NetcatError):
self.verbose = save_verbose
self._print_header('\n======== Connection dropped! ========')
finally:
self.verbose = save_verbose
LINE_ENDING = b'\n'
def recv_line(self, max_size=None, timeout='default', ending=None):
"""
Recieve until the next newline , default "\\n". The newline string can
be changed by changing ``nc.LINE_ENDING``. The newline will be returned
as part of the string.
Aliases: recvline, readline, read_line, readln, recvln
"""
if ending is None:
ending = self.LINE_ENDING
return self.recv_until(ending, max_size, timeout)
def send_line(self, line, ending=None):
"""
Write the string to the wire, followed by a newline. The newline string
can be changed by changing ``nc.LINE_ENDING``.
Aliases: sendline, writeline, write_line, writeln, sendln
"""
if ending is None:
ending = self.LINE_ENDING
return self.send(line + ending)
read = recv
get = recv
write = send
put = send
sendall = send
send_all = send
read_until = recv_until
readuntil = recv_until
recvuntil = recv_until
read_all = recv_all
readall = recv_all
recvall = recv_all
read_exactly = recv_exactly
readexactly = recv_exactly
recvexactly = recv_exactly
interactive = interact
ineraction = interact
recvline = recv_line
readline = recv_line
read_line = recv_line
readln = recv_line
recvln = recv_line
sendline = send_line
writeline = send_line
write_line = send_line
writeln = send_line
sendln = send_line
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat._connect
|
python
|
def _connect(self, target, listen, udp, ipv6, retry):
ty = socket.SOCK_DGRAM if udp else socket.SOCK_STREAM
fam = socket.AF_INET6 if ipv6 else socket.AF_INET
self.sock = socket.socket(fam, ty)
if listen:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(target)
if not udp:
self.sock.listen(1)
conn, addr = self.sock.accept()
self.sock.close()
self.sock = conn
self.peer = addr
else:
self.buf, self.peer = self.sock.recvfrom(1024)
self.sock.connect(self.peer)
self._log_recv(self.buf, False)
if self.verbose:
self._print_verbose('Connection from %s accepted' % str(self.peer))
else:
while True:
try:
self.sock.connect(target)
except (socket.gaierror, socket.herror) as exc:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
except socket.error as exc:
if retry:
time.sleep(0.2)
else:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
else:
break
self.peer = target
|
Takes target/listen/udp/ipv6 and sets self.sock and self.peer
|
train
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L363-L400
|
[
"def _print_verbose(self, s):\n assert isinstance(s, str), \"s should be str\"\n sys.stdout.write(s + '\\n')\n",
"def _log_recv(self, data, yielding):\n if yielding == self.log_yield:\n if self.verbose and self.echo_recving:\n self._log_something(data, self.echo_recv_prefix)\n if self.log_recv:\n self.log_recv.write(data)\n"
] |
class Netcat(object):
"""
This is the main class you will use to interact with a peer over the
network! You may instanciate this class to either connect to a server or
listen for a one-off client.
One of the following must be passed in order to initialize a Netcat
object:
:param connect: the address/port to connect to
:param listen: the address/port to bind to for listening
:param sock: a python socket or pipe object to wrap
For ``connect`` and ``listen``, they accept basically any argument format
known to mankind. If you find an input format you think would be useful but
isn't accepted, let me know :P
Additionally, the following options modify the behavior of the object:
:param sock_send: If this is specified, this Netcat object will act
as a multiplexer/demultiplexer, using the "normal"
channel for receiving and this channel for sending.
This should be specified as a python socket or pipe
object.
.. warning:: Using ``sock_send`` will cause issues if
you pass this object into a context which
expects to be able to use its
``.fileno()``.
:param udp: Set to True to use udp connections when using the
connect or listen parameters
:param ipv6: Force using ipv6 when using the connect or listen
parameters
:param verbose: Set to True to log data sent/received. The echo_*
properties on this object can be tweaked to
describe exactly what you want logged.
:param log_send: Pass a file-like object open for writing and all
data sent over the socket will be written to it.
:param log_recv: Pass a file-like object open for writing and all
data recieved from the socket will be written to it.
:param raise_timeout:
Whether to raise a NetcatTimeout exception when a
timeout is received. The default is to return the
empty string and set self.timed_out = True
:param retry: Whether to continuously retry establishing a
connection if it fails.
:param log_yield: Control when logging messages are generated on
recv. By default, logging is done when data is
received from the socket, and may be buffered.
By setting this to true, logging is done when data
is yielded to the user, either directly from the
socket or from a buffer.
Any data that is extracted from the target address will override the
options specified here. For example, a url with the ``http:// scheme``
will go over tcp and port 80.
Some properties that may be tweaked to change the logging behavior:
- nc.echo_headers controls whether to print a header describing each
network operation before the data (True)
- nc.echo_perline controls whether the data should be split on newlines
for logging (True)
- nc.echo_sending controls whether to log data on send (True)
- nc.echo_recving controls whether to log data on recv (True)
- nc.echo_hex controls whether to log data hex-encoded (False)
- nc.echo_send_prefix controls a prefix to print before each logged
line of sent data ('>> ')
- nc.echo_recv_prefix controls a prefix to print before each logged
line of received data ('<< ')
Note that these settings ONLY affect the console logging triggered by
the verbose parameter. They don't do anything to the logging triggered
by `log_send` and `log_recv`, which are meant to provide pristine
untouched records of network traffic.
*Example 1:* Send a greeting to a UDP server listening at 192.168.3.6:8888
and log the response as hex:
>>> nc = nclib.Netcat(('192.168.3.6', 8888), udp=True, verbose=True)
>>> nc.echo_hex = True
>>> nc.send(b'\\x00\\x0dHello, world!')
======== Sending (15) ========
>> 00 0D 48 65 6C 6C 6F 2C 20 77 6F 72 6C 64 21 |..Hello, world! |
>>> nc.recv()
======== Receiving 4096B or until timeout (default) ========
<< 00 57 68 65 6C 6C 6F 20 66 72 69 65 6E 64 2E 20 |.Whello friend. |
<< 74 69 6D 65 20 69 73 20 73 68 6F 72 74 2E 20 70 |time is short. p|
<< 6C 65 61 73 65 20 64 6F 20 6E 6F 74 20 77 6F 72 |lease do not wor|
<< 72 79 2C 20 79 6F 75 20 77 69 6C 6C 20 66 69 6E |ry, you will fin|
<< 64 20 79 6F 75 72 20 77 61 79 2E 20 62 75 74 20 |d your way. but |
<< 64 6F 20 68 75 72 72 79 2E |do hurry. |
*Example 2:* Listen for a local TCP connection on port 1234, allow the user
to interact with the client. Log the entire interaction to log.txt.
>>> logfile = open('log.txt', 'wb')
>>> nc = nclib.Netcat(listen=('localhost', 1234), log_send=logfile, log_recv=logfile)
>>> nc.interact()
"""
def __init__(self,
connect=None,
sock=None,
listen=None,
server=None,
sock_send=None,
udp=False,
ipv6=False,
verbose=0,
log_send=None,
log_recv=None,
raise_timeout=False,
retry=False,
log_yield=False):
self.buf = b''
self.verbose = verbose
self.log_send = log_send
self.log_recv = log_recv
self.log_yield = log_yield
self.echo_headers = True
self.echo_perline = True
self.echo_sending = True
self.echo_recving = True
self.echo_hex = False
self.echo_send_prefix = '>> '
self.echo_recv_prefix = '<< '
self.sock = None
self._sock_send = sock_send
self.peer = None
# case: Netcat(host, port)
if isinstance(connect, str) and isinstance(listen, int):
connect = (connect, listen)
# case: Netcat(sock)
if isinstance(connect, socket.socket):
sock = connect
connect = None
# deprecated server kwarg
if server is not None:
connect = server
if sock is None and listen is None and connect is None:
raise ValueError('Not enough arguments, need at least an '
'address or a socket or a listening address!')
## we support passing connect as the "name" of the socket
#if sock is not None and (listen is not None or connect is not None):
# raise ValueError("connect or listen arguments may not be "
# "provided if sock is provided")
if listen is not None and connect is not None:
raise ValueError("connect and listen arguments cannot be provided at the same time")
if sock is None:
if listen is not None:
target = listen
listen = True
else:
target = connect
listen = False
target, listen, udp, ipv6 = self._parse_target(target, listen, udp, ipv6)
self._connect(target, listen, udp, ipv6, retry)
else:
self.sock = sock
self.peer = connect
try:
self._timeout = self.sock.gettimeout()
except AttributeError:
self._timeout = None
self.timed_out = False # set when an operation times out
self._raise_timeout = raise_timeout
@property
def sock_send(self):
if self._sock_send is None:
return self.sock
else:
return self._sock_send
@sock_send.setter
def sock_send(self, val):
self._sock_send = val
@staticmethod
def _parse_target(target, listen, udp, ipv6):
"""
Takes the basic version of the user args and extract as much data as
possible from target. Returns a tuple that is its arguments but
sanitized.
"""
if isinstance(target, str):
if target.startswith('nc '):
out_host = None
out_port = None
try:
opts, pieces = getopt.getopt(target.split()[1:], 'u46lp:',
[])
except getopt.GetoptError as exc:
raise ValueError(exc)
for opt, arg in opts:
if opt == '-u':
udp = True
elif opt == '-4':
ipv6 = False
elif opt == '-6':
ipv6 = True
elif opt == '-l':
listen = True
elif opt == '-p':
out_port = int(arg)
else:
assert False, "unhandled option"
if not pieces:
pass
elif len(pieces) == 1:
if listen and pieces[0].isdigit():
out_port = int(pieces[0])
else:
out_host = pieces[0]
elif len(pieces) == 2 and pieces[1].isdigit():
out_host = pieces[0]
out_port = int(pieces[1])
else:
raise ValueError("Bad cmdline: %s" % target)
if out_host is None:
if listen:
out_host = '::' if ipv6 else '0.0.0.0'
else:
raise ValueError("Missing address: %s" % target)
if out_port is None:
raise ValueError("Missing port: %s" % target)
if _is_ipv6_addr(out_host):
ipv6 = True
return (out_host, out_port), listen, udp, ipv6
elif PROTOCAL_RE.match(target) is not None:
parsed = urlparse(target)
port = None
try:
scheme_udp, scheme_ipv6, scheme_port = KNOWN_SCHEMES[parsed.scheme]
except KeyError:
raise ValueError("Unknown scheme: %s" % parsed.scheme)
if scheme_udp is not None:
udp = scheme_udp
if scheme_ipv6 is not None:
ipv6 = scheme_ipv6
if scheme_port is not None:
port = scheme_port
if parsed.netloc.startswith('['):
addr, extra = parsed.netloc[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
if ':' in parsed.netloc:
addr, port = parsed.netloc.split(':', 1)
port = int(port)
else:
addr = parsed.netloc
if addr is None or port is None:
raise ValueError("Can't parse addr/port from %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
else:
if target.startswith('['):
addr, extra = target[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
port = None
else:
if ':' in target:
addr, port = target.split(':', 1)
port = int(port)
else:
addr = target
port = None
if port is None:
raise ValueError("No port given: %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
elif isinstance(target, (int, long)):
if listen:
out_port = target
else:
raise ValueError("Can't deal with number as connection address")
return ('::' if ipv6 else '0.0.0.0', out_port), listen, udp, ipv6
elif isinstance(target, tuple):
if len(target) >= 1 and isinstance(target[0], str) and _is_ipv6_addr(target[0]):
ipv6 = True
return target, listen, udp, ipv6
else:
raise ValueError("Can't parse target: %r" % target)
def close(self):
"""
Close the socket.
"""
if self._sock_send is not None:
self._sock_send.close()
return self.sock.close()
# inconsistent between sockets and files. support both
@property
def closed(self):
return self._closed
@property
def _closed(self):
if hasattr(self.sock_send, 'closed'):
return self.sock_send.closed
elif hasattr(self.sock_send, '_closed'):
return self.sock_send._closed
else:
return False # ???
def shutdown(self, how=socket.SHUT_RDWR):
"""
Send a shutdown signal for both reading and writing, or whatever
socket.SHUT_* constant you like.
Shutdown differs from closing in that it explicitly changes the state of
the socket resource to closed, whereas closing will only decrement the
number of peers on this end of the socket, since sockets can be a
resource shared by multiple peers on a single OS. When the number of
peers reaches zero, the socket is closed, but not deallocated, so you
still need to call close. (except that this is python and close is
automatically called on the deletion of the socket)
http://stackoverflow.com/questions/409783/socket-shutdown-vs-socket-close
"""
if self._sock_send is not None:
self._sock_send.shutdown(how)
return self.sock.shutdown(how)
def shutdown_rd(self):
"""
Send a shutdown signal for reading - you may no longer read from this
socket.
"""
if self._sock_send is not None:
self.sock.close()
else:
return self.shutdown(socket.SHUT_RD)
def shutdown_wr(self):
"""
Send a shutdown signal for writing - you may no longer write to this
socket.
"""
if self._sock_send is not None:
self._sock_send.close()
else:
return self.shutdown(socket.SHUT_WR)
def fileno(self):
"""
Return the file descriptor associated with this socket
"""
if self._sock_send is not None:
raise UserWarning("Calling fileno when there are in fact two filenos")
return self.sock.fileno()
def _print_verbose(self, s):
assert isinstance(s, str), "s should be str"
sys.stdout.write(s + '\n')
def _print_header(self, header):
if self.verbose and self.echo_headers:
self._print_verbose(header)
def _print_recv_header(self, fmt, timeout, *args):
if self.verbose and self.echo_headers:
if timeout == 'default':
timeout = self._timeout
if timeout is not None:
timeout_text = ' or until timeout ({0})'.format(timeout)
else:
timeout_text = ''
self._print_verbose(fmt.format(*args, timeout_text=timeout_text))
def _log_something(self, data, prefix):
if self.echo_perline:
if self.echo_hex:
self._print_hex_lines(data, prefix)
else:
self._print_lines(data, prefix)
else:
if self.echo_hex:
if hasattr(data, 'hex'):
self._print_verbose(prefix + data.hex())
else:
self._print_verbose(prefix + data.encode('hex'))
else:
self._print_verbose(prefix + str(data))
def _log_recv(self, data, yielding):
if yielding == self.log_yield:
if self.verbose and self.echo_recving:
self._log_something(data, self.echo_recv_prefix)
if self.log_recv:
self.log_recv.write(data)
def _log_send(self, data):
if self.verbose and self.echo_sending:
self._log_something(data, self.echo_send_prefix)
if self.log_send:
self.log_send.write(data)
def _print_lines(self, s, prefix):
for line in s.split(b'\n'):
self._print_verbose(prefix + str(line))
@staticmethod
def _to_spaced_hex(s):
if isinstance(s, str):
return ' '.join('%02X' % ord(a) for a in s)
if isinstance(s, bytes):
return ' '.join('%02X' % a for a in s)
raise TypeError('expected str or bytes instance')
@staticmethod
def _to_printable_str(s):
if isinstance(s, str):
return ''.join(a if ' ' <= a <= '~' else '.' for a in s)
if isinstance(s, bytes):
return ''.join(chr(a) if ord(' ') <= a <= ord('~') else '.' for a in s)
raise TypeError('expected str or bytes instance')
def _print_hex_lines(self, s, prefix):
for i in range(0, len(s), 16):
block = s[i:i+16]
spaced_hex = self._to_spaced_hex(block)
printable_str = self._to_printable_str(block)
self._print_verbose('%s%-47s |%-16s|' % (prefix, spaced_hex, printable_str))
def settimeout(self, timeout):
"""
Set the default timeout in seconds to use for subsequent socket
operations
"""
self._timeout = timeout
self._settimeout(timeout)
def _send(self, data):
if hasattr(self.sock_send, 'send'):
return self.sock_send.send(data)
elif hasattr(self.sock_send, 'write'):
return self.sock_send.write(data) # pylint: disable=no-member
else:
raise ValueError("I don't know how to write to this stream!")
def _recv(self, size):
if hasattr(self.sock, 'recv'):
return self.sock.recv(size)
elif hasattr(self.sock, 'read'):
return self.sock.read(size) # pylint: disable=no-member
else:
raise ValueError("I don't know how to read from this stream!")
def _recv_predicate(self, predicate, timeout='default', raise_eof=True):
"""
Receive until predicate returns a positive integer.
The returned number is the size to return.
"""
if timeout == 'default':
timeout = self._timeout
self.timed_out = False
start = time.time()
try:
while True:
cut_at = predicate(self.buf)
if cut_at > 0:
break
if timeout is not None:
time_elapsed = time.time() - start
if time_elapsed > timeout:
raise socket.timeout
self._settimeout(timeout - time_elapsed)
data = self._recv(4096)
self._log_recv(data, False)
self.buf += data
if not data:
if raise_eof:
raise NetcatError("Connection dropped!")
cut_at = len(self.buf)
break
except KeyboardInterrupt:
self._print_header('\n======== Connection interrupted! ========')
raise
except socket.timeout:
self.timed_out = True
if self._raise_timeout:
raise NetcatTimeout()
return b''
except socket.error as exc:
raise NetcatError('Socket error: %r' % exc)
self._settimeout(self._timeout)
ret = self.buf[:cut_at]
self.buf = self.buf[cut_at:]
self._log_recv(ret, True)
return ret
def _settimeout(self, timeout):
"""
Internal method - catches failures when working with non-timeoutable
streams, like files
"""
try:
self.sock.settimeout(timeout)
except AttributeError:
pass
def gettimeout(self):
"""
Retrieve the timeout currently associated with the socket
"""
return self._timeout
def flush(self):
# no buffering
pass
def recv(self, n=4096, timeout='default'):
"""
Receive at most n bytes (default 4096) from the socket
Aliases: read, get
"""
self._print_recv_header(
'======== Receiving {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: min(n, len(s)), timeout)
def recv_until(self, s, max_size=None, timeout='default'):
"""
Recieve data from the socket until the given substring is observed.
Data in the same datagram as the substring, following the substring,
will not be returned and will be cached for future receives.
Aliases: read_until, readuntil, recvuntil
"""
self._print_recv_header(
'======== Receiving until {0}{timeout_text} ========', timeout, repr(s))
if max_size is None:
max_size = 2 ** 62
def _predicate(buf):
try:
return min(buf.index(s) + len(s), max_size)
except ValueError:
return 0 if len(buf) < max_size else max_size
return self._recv_predicate(_predicate, timeout)
def recv_all(self, timeout='default'):
"""
Return all data recieved until connection closes.
Aliases: read_all, readall, recvall
"""
self._print_recv_header('======== Receiving until close{timeout_text} ========', timeout)
return self._recv_predicate(lambda s: 0, timeout, raise_eof=False)
def recv_exactly(self, n, timeout='default'):
"""
Recieve exactly n bytes
Aliases: read_exactly, readexactly, recvexactly
"""
self._print_recv_header(
'======== Receiving until exactly {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: n if len(s) >= n else 0, timeout)
def send(self, s):
"""
Sends all the given data to the socket.
Aliases: write, put, sendall, send_all
"""
self._print_header('======== Sending ({0}) ========'.format(len(s)))
self._log_send(s)
out = len(s)
while s:
s = s[self._send(s):]
return out
def interact(self, insock=sys.stdin, outsock=sys.stdout):
"""
Connects the socket to the terminal for user interaction.
Alternate input and output files may be specified.
This method cannot be used with a timeout.
Aliases: interactive, interaction
"""
self._print_header('======== Beginning interactive session ========')
if hasattr(outsock, 'buffer'):
outsock = outsock.buffer # pylint: disable=no-member
self.timed_out = False
save_verbose = self.verbose
self.verbose = 0
try:
if self.buf:
outsock.write(self.buf)
outsock.flush()
self.buf = b''
while True:
readable_socks = select(self.sock, insock)
for readable in readable_socks:
if readable is insock:
data = os.read(insock.fileno(), 4096)
self.send(data)
if not data:
raise NetcatError
else:
data = self.recv(timeout=None)
outsock.write(data)
outsock.flush()
if not data:
raise NetcatError
except KeyboardInterrupt:
self.verbose = save_verbose
self._print_header('\n======== Connection interrupted! ========')
raise
except (socket.error, NetcatError):
self.verbose = save_verbose
self._print_header('\n======== Connection dropped! ========')
finally:
self.verbose = save_verbose
LINE_ENDING = b'\n'
def recv_line(self, max_size=None, timeout='default', ending=None):
"""
Recieve until the next newline , default "\\n". The newline string can
be changed by changing ``nc.LINE_ENDING``. The newline will be returned
as part of the string.
Aliases: recvline, readline, read_line, readln, recvln
"""
if ending is None:
ending = self.LINE_ENDING
return self.recv_until(ending, max_size, timeout)
def send_line(self, line, ending=None):
"""
Write the string to the wire, followed by a newline. The newline string
can be changed by changing ``nc.LINE_ENDING``.
Aliases: sendline, writeline, write_line, writeln, sendln
"""
if ending is None:
ending = self.LINE_ENDING
return self.send(line + ending)
read = recv
get = recv
write = send
put = send
sendall = send
send_all = send
read_until = recv_until
readuntil = recv_until
recvuntil = recv_until
read_all = recv_all
readall = recv_all
recvall = recv_all
read_exactly = recv_exactly
readexactly = recv_exactly
recvexactly = recv_exactly
interactive = interact
ineraction = interact
recvline = recv_line
readline = recv_line
read_line = recv_line
readln = recv_line
recvln = recv_line
sendline = send_line
writeline = send_line
write_line = send_line
writeln = send_line
sendln = send_line
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat.close
|
python
|
def close(self):
if self._sock_send is not None:
self._sock_send.close()
return self.sock.close()
|
Close the socket.
|
train
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L402-L408
| null |
class Netcat(object):
"""
This is the main class you will use to interact with a peer over the
network! You may instanciate this class to either connect to a server or
listen for a one-off client.
One of the following must be passed in order to initialize a Netcat
object:
:param connect: the address/port to connect to
:param listen: the address/port to bind to for listening
:param sock: a python socket or pipe object to wrap
For ``connect`` and ``listen``, they accept basically any argument format
known to mankind. If you find an input format you think would be useful but
isn't accepted, let me know :P
Additionally, the following options modify the behavior of the object:
:param sock_send: If this is specified, this Netcat object will act
as a multiplexer/demultiplexer, using the "normal"
channel for receiving and this channel for sending.
This should be specified as a python socket or pipe
object.
.. warning:: Using ``sock_send`` will cause issues if
you pass this object into a context which
expects to be able to use its
``.fileno()``.
:param udp: Set to True to use udp connections when using the
connect or listen parameters
:param ipv6: Force using ipv6 when using the connect or listen
parameters
:param verbose: Set to True to log data sent/received. The echo_*
properties on this object can be tweaked to
describe exactly what you want logged.
:param log_send: Pass a file-like object open for writing and all
data sent over the socket will be written to it.
:param log_recv: Pass a file-like object open for writing and all
data recieved from the socket will be written to it.
:param raise_timeout:
Whether to raise a NetcatTimeout exception when a
timeout is received. The default is to return the
empty string and set self.timed_out = True
:param retry: Whether to continuously retry establishing a
connection if it fails.
:param log_yield: Control when logging messages are generated on
recv. By default, logging is done when data is
received from the socket, and may be buffered.
By setting this to true, logging is done when data
is yielded to the user, either directly from the
socket or from a buffer.
Any data that is extracted from the target address will override the
options specified here. For example, a url with the ``http:// scheme``
will go over tcp and port 80.
Some properties that may be tweaked to change the logging behavior:
- nc.echo_headers controls whether to print a header describing each
network operation before the data (True)
- nc.echo_perline controls whether the data should be split on newlines
for logging (True)
- nc.echo_sending controls whether to log data on send (True)
- nc.echo_recving controls whether to log data on recv (True)
- nc.echo_hex controls whether to log data hex-encoded (False)
- nc.echo_send_prefix controls a prefix to print before each logged
line of sent data ('>> ')
- nc.echo_recv_prefix controls a prefix to print before each logged
line of received data ('<< ')
Note that these settings ONLY affect the console logging triggered by
the verbose parameter. They don't do anything to the logging triggered
by `log_send` and `log_recv`, which are meant to provide pristine
untouched records of network traffic.
*Example 1:* Send a greeting to a UDP server listening at 192.168.3.6:8888
and log the response as hex:
>>> nc = nclib.Netcat(('192.168.3.6', 8888), udp=True, verbose=True)
>>> nc.echo_hex = True
>>> nc.send(b'\\x00\\x0dHello, world!')
======== Sending (15) ========
>> 00 0D 48 65 6C 6C 6F 2C 20 77 6F 72 6C 64 21 |..Hello, world! |
>>> nc.recv()
======== Receiving 4096B or until timeout (default) ========
<< 00 57 68 65 6C 6C 6F 20 66 72 69 65 6E 64 2E 20 |.Whello friend. |
<< 74 69 6D 65 20 69 73 20 73 68 6F 72 74 2E 20 70 |time is short. p|
<< 6C 65 61 73 65 20 64 6F 20 6E 6F 74 20 77 6F 72 |lease do not wor|
<< 72 79 2C 20 79 6F 75 20 77 69 6C 6C 20 66 69 6E |ry, you will fin|
<< 64 20 79 6F 75 72 20 77 61 79 2E 20 62 75 74 20 |d your way. but |
<< 64 6F 20 68 75 72 72 79 2E |do hurry. |
*Example 2:* Listen for a local TCP connection on port 1234, allow the user
to interact with the client. Log the entire interaction to log.txt.
>>> logfile = open('log.txt', 'wb')
>>> nc = nclib.Netcat(listen=('localhost', 1234), log_send=logfile, log_recv=logfile)
>>> nc.interact()
"""
def __init__(self,
connect=None,
sock=None,
listen=None,
server=None,
sock_send=None,
udp=False,
ipv6=False,
verbose=0,
log_send=None,
log_recv=None,
raise_timeout=False,
retry=False,
log_yield=False):
self.buf = b''
self.verbose = verbose
self.log_send = log_send
self.log_recv = log_recv
self.log_yield = log_yield
self.echo_headers = True
self.echo_perline = True
self.echo_sending = True
self.echo_recving = True
self.echo_hex = False
self.echo_send_prefix = '>> '
self.echo_recv_prefix = '<< '
self.sock = None
self._sock_send = sock_send
self.peer = None
# case: Netcat(host, port)
if isinstance(connect, str) and isinstance(listen, int):
connect = (connect, listen)
# case: Netcat(sock)
if isinstance(connect, socket.socket):
sock = connect
connect = None
# deprecated server kwarg
if server is not None:
connect = server
if sock is None and listen is None and connect is None:
raise ValueError('Not enough arguments, need at least an '
'address or a socket or a listening address!')
## we support passing connect as the "name" of the socket
#if sock is not None and (listen is not None or connect is not None):
# raise ValueError("connect or listen arguments may not be "
# "provided if sock is provided")
if listen is not None and connect is not None:
raise ValueError("connect and listen arguments cannot be provided at the same time")
if sock is None:
if listen is not None:
target = listen
listen = True
else:
target = connect
listen = False
target, listen, udp, ipv6 = self._parse_target(target, listen, udp, ipv6)
self._connect(target, listen, udp, ipv6, retry)
else:
self.sock = sock
self.peer = connect
try:
self._timeout = self.sock.gettimeout()
except AttributeError:
self._timeout = None
self.timed_out = False # set when an operation times out
self._raise_timeout = raise_timeout
@property
def sock_send(self):
if self._sock_send is None:
return self.sock
else:
return self._sock_send
@sock_send.setter
def sock_send(self, val):
self._sock_send = val
@staticmethod
def _parse_target(target, listen, udp, ipv6):
"""
Takes the basic version of the user args and extract as much data as
possible from target. Returns a tuple that is its arguments but
sanitized.
"""
if isinstance(target, str):
if target.startswith('nc '):
out_host = None
out_port = None
try:
opts, pieces = getopt.getopt(target.split()[1:], 'u46lp:',
[])
except getopt.GetoptError as exc:
raise ValueError(exc)
for opt, arg in opts:
if opt == '-u':
udp = True
elif opt == '-4':
ipv6 = False
elif opt == '-6':
ipv6 = True
elif opt == '-l':
listen = True
elif opt == '-p':
out_port = int(arg)
else:
assert False, "unhandled option"
if not pieces:
pass
elif len(pieces) == 1:
if listen and pieces[0].isdigit():
out_port = int(pieces[0])
else:
out_host = pieces[0]
elif len(pieces) == 2 and pieces[1].isdigit():
out_host = pieces[0]
out_port = int(pieces[1])
else:
raise ValueError("Bad cmdline: %s" % target)
if out_host is None:
if listen:
out_host = '::' if ipv6 else '0.0.0.0'
else:
raise ValueError("Missing address: %s" % target)
if out_port is None:
raise ValueError("Missing port: %s" % target)
if _is_ipv6_addr(out_host):
ipv6 = True
return (out_host, out_port), listen, udp, ipv6
elif PROTOCAL_RE.match(target) is not None:
parsed = urlparse(target)
port = None
try:
scheme_udp, scheme_ipv6, scheme_port = KNOWN_SCHEMES[parsed.scheme]
except KeyError:
raise ValueError("Unknown scheme: %s" % parsed.scheme)
if scheme_udp is not None:
udp = scheme_udp
if scheme_ipv6 is not None:
ipv6 = scheme_ipv6
if scheme_port is not None:
port = scheme_port
if parsed.netloc.startswith('['):
addr, extra = parsed.netloc[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
if ':' in parsed.netloc:
addr, port = parsed.netloc.split(':', 1)
port = int(port)
else:
addr = parsed.netloc
if addr is None or port is None:
raise ValueError("Can't parse addr/port from %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
else:
if target.startswith('['):
addr, extra = target[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
port = None
else:
if ':' in target:
addr, port = target.split(':', 1)
port = int(port)
else:
addr = target
port = None
if port is None:
raise ValueError("No port given: %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
elif isinstance(target, (int, long)):
if listen:
out_port = target
else:
raise ValueError("Can't deal with number as connection address")
return ('::' if ipv6 else '0.0.0.0', out_port), listen, udp, ipv6
elif isinstance(target, tuple):
if len(target) >= 1 and isinstance(target[0], str) and _is_ipv6_addr(target[0]):
ipv6 = True
return target, listen, udp, ipv6
else:
raise ValueError("Can't parse target: %r" % target)
def _connect(self, target, listen, udp, ipv6, retry):
"""
Takes target/listen/udp/ipv6 and sets self.sock and self.peer
"""
ty = socket.SOCK_DGRAM if udp else socket.SOCK_STREAM
fam = socket.AF_INET6 if ipv6 else socket.AF_INET
self.sock = socket.socket(fam, ty)
if listen:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(target)
if not udp:
self.sock.listen(1)
conn, addr = self.sock.accept()
self.sock.close()
self.sock = conn
self.peer = addr
else:
self.buf, self.peer = self.sock.recvfrom(1024)
self.sock.connect(self.peer)
self._log_recv(self.buf, False)
if self.verbose:
self._print_verbose('Connection from %s accepted' % str(self.peer))
else:
while True:
try:
self.sock.connect(target)
except (socket.gaierror, socket.herror) as exc:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
except socket.error as exc:
if retry:
time.sleep(0.2)
else:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
else:
break
self.peer = target
# inconsistent between sockets and files. support both
@property
def closed(self):
return self._closed
@property
def _closed(self):
if hasattr(self.sock_send, 'closed'):
return self.sock_send.closed
elif hasattr(self.sock_send, '_closed'):
return self.sock_send._closed
else:
return False # ???
def shutdown(self, how=socket.SHUT_RDWR):
"""
Send a shutdown signal for both reading and writing, or whatever
socket.SHUT_* constant you like.
Shutdown differs from closing in that it explicitly changes the state of
the socket resource to closed, whereas closing will only decrement the
number of peers on this end of the socket, since sockets can be a
resource shared by multiple peers on a single OS. When the number of
peers reaches zero, the socket is closed, but not deallocated, so you
still need to call close. (except that this is python and close is
automatically called on the deletion of the socket)
http://stackoverflow.com/questions/409783/socket-shutdown-vs-socket-close
"""
if self._sock_send is not None:
self._sock_send.shutdown(how)
return self.sock.shutdown(how)
def shutdown_rd(self):
"""
Send a shutdown signal for reading - you may no longer read from this
socket.
"""
if self._sock_send is not None:
self.sock.close()
else:
return self.shutdown(socket.SHUT_RD)
def shutdown_wr(self):
"""
Send a shutdown signal for writing - you may no longer write to this
socket.
"""
if self._sock_send is not None:
self._sock_send.close()
else:
return self.shutdown(socket.SHUT_WR)
def fileno(self):
"""
Return the file descriptor associated with this socket
"""
if self._sock_send is not None:
raise UserWarning("Calling fileno when there are in fact two filenos")
return self.sock.fileno()
def _print_verbose(self, s):
assert isinstance(s, str), "s should be str"
sys.stdout.write(s + '\n')
def _print_header(self, header):
if self.verbose and self.echo_headers:
self._print_verbose(header)
def _print_recv_header(self, fmt, timeout, *args):
if self.verbose and self.echo_headers:
if timeout == 'default':
timeout = self._timeout
if timeout is not None:
timeout_text = ' or until timeout ({0})'.format(timeout)
else:
timeout_text = ''
self._print_verbose(fmt.format(*args, timeout_text=timeout_text))
def _log_something(self, data, prefix):
if self.echo_perline:
if self.echo_hex:
self._print_hex_lines(data, prefix)
else:
self._print_lines(data, prefix)
else:
if self.echo_hex:
if hasattr(data, 'hex'):
self._print_verbose(prefix + data.hex())
else:
self._print_verbose(prefix + data.encode('hex'))
else:
self._print_verbose(prefix + str(data))
def _log_recv(self, data, yielding):
if yielding == self.log_yield:
if self.verbose and self.echo_recving:
self._log_something(data, self.echo_recv_prefix)
if self.log_recv:
self.log_recv.write(data)
def _log_send(self, data):
if self.verbose and self.echo_sending:
self._log_something(data, self.echo_send_prefix)
if self.log_send:
self.log_send.write(data)
def _print_lines(self, s, prefix):
for line in s.split(b'\n'):
self._print_verbose(prefix + str(line))
@staticmethod
def _to_spaced_hex(s):
if isinstance(s, str):
return ' '.join('%02X' % ord(a) for a in s)
if isinstance(s, bytes):
return ' '.join('%02X' % a for a in s)
raise TypeError('expected str or bytes instance')
@staticmethod
def _to_printable_str(s):
if isinstance(s, str):
return ''.join(a if ' ' <= a <= '~' else '.' for a in s)
if isinstance(s, bytes):
return ''.join(chr(a) if ord(' ') <= a <= ord('~') else '.' for a in s)
raise TypeError('expected str or bytes instance')
def _print_hex_lines(self, s, prefix):
for i in range(0, len(s), 16):
block = s[i:i+16]
spaced_hex = self._to_spaced_hex(block)
printable_str = self._to_printable_str(block)
self._print_verbose('%s%-47s |%-16s|' % (prefix, spaced_hex, printable_str))
def settimeout(self, timeout):
"""
Set the default timeout in seconds to use for subsequent socket
operations
"""
self._timeout = timeout
self._settimeout(timeout)
def _send(self, data):
if hasattr(self.sock_send, 'send'):
return self.sock_send.send(data)
elif hasattr(self.sock_send, 'write'):
return self.sock_send.write(data) # pylint: disable=no-member
else:
raise ValueError("I don't know how to write to this stream!")
def _recv(self, size):
if hasattr(self.sock, 'recv'):
return self.sock.recv(size)
elif hasattr(self.sock, 'read'):
return self.sock.read(size) # pylint: disable=no-member
else:
raise ValueError("I don't know how to read from this stream!")
def _recv_predicate(self, predicate, timeout='default', raise_eof=True):
"""
Receive until predicate returns a positive integer.
The returned number is the size to return.
"""
if timeout == 'default':
timeout = self._timeout
self.timed_out = False
start = time.time()
try:
while True:
cut_at = predicate(self.buf)
if cut_at > 0:
break
if timeout is not None:
time_elapsed = time.time() - start
if time_elapsed > timeout:
raise socket.timeout
self._settimeout(timeout - time_elapsed)
data = self._recv(4096)
self._log_recv(data, False)
self.buf += data
if not data:
if raise_eof:
raise NetcatError("Connection dropped!")
cut_at = len(self.buf)
break
except KeyboardInterrupt:
self._print_header('\n======== Connection interrupted! ========')
raise
except socket.timeout:
self.timed_out = True
if self._raise_timeout:
raise NetcatTimeout()
return b''
except socket.error as exc:
raise NetcatError('Socket error: %r' % exc)
self._settimeout(self._timeout)
ret = self.buf[:cut_at]
self.buf = self.buf[cut_at:]
self._log_recv(ret, True)
return ret
def _settimeout(self, timeout):
"""
Internal method - catches failures when working with non-timeoutable
streams, like files
"""
try:
self.sock.settimeout(timeout)
except AttributeError:
pass
def gettimeout(self):
"""
Retrieve the timeout currently associated with the socket
"""
return self._timeout
def flush(self):
# no buffering
pass
def recv(self, n=4096, timeout='default'):
"""
Receive at most n bytes (default 4096) from the socket
Aliases: read, get
"""
self._print_recv_header(
'======== Receiving {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: min(n, len(s)), timeout)
def recv_until(self, s, max_size=None, timeout='default'):
"""
Recieve data from the socket until the given substring is observed.
Data in the same datagram as the substring, following the substring,
will not be returned and will be cached for future receives.
Aliases: read_until, readuntil, recvuntil
"""
self._print_recv_header(
'======== Receiving until {0}{timeout_text} ========', timeout, repr(s))
if max_size is None:
max_size = 2 ** 62
def _predicate(buf):
try:
return min(buf.index(s) + len(s), max_size)
except ValueError:
return 0 if len(buf) < max_size else max_size
return self._recv_predicate(_predicate, timeout)
def recv_all(self, timeout='default'):
"""
Return all data recieved until connection closes.
Aliases: read_all, readall, recvall
"""
self._print_recv_header('======== Receiving until close{timeout_text} ========', timeout)
return self._recv_predicate(lambda s: 0, timeout, raise_eof=False)
def recv_exactly(self, n, timeout='default'):
"""
Recieve exactly n bytes
Aliases: read_exactly, readexactly, recvexactly
"""
self._print_recv_header(
'======== Receiving until exactly {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: n if len(s) >= n else 0, timeout)
def send(self, s):
"""
Sends all the given data to the socket.
Aliases: write, put, sendall, send_all
"""
self._print_header('======== Sending ({0}) ========'.format(len(s)))
self._log_send(s)
out = len(s)
while s:
s = s[self._send(s):]
return out
def interact(self, insock=sys.stdin, outsock=sys.stdout):
"""
Connects the socket to the terminal for user interaction.
Alternate input and output files may be specified.
This method cannot be used with a timeout.
Aliases: interactive, interaction
"""
self._print_header('======== Beginning interactive session ========')
if hasattr(outsock, 'buffer'):
outsock = outsock.buffer # pylint: disable=no-member
self.timed_out = False
save_verbose = self.verbose
self.verbose = 0
try:
if self.buf:
outsock.write(self.buf)
outsock.flush()
self.buf = b''
while True:
readable_socks = select(self.sock, insock)
for readable in readable_socks:
if readable is insock:
data = os.read(insock.fileno(), 4096)
self.send(data)
if not data:
raise NetcatError
else:
data = self.recv(timeout=None)
outsock.write(data)
outsock.flush()
if not data:
raise NetcatError
except KeyboardInterrupt:
self.verbose = save_verbose
self._print_header('\n======== Connection interrupted! ========')
raise
except (socket.error, NetcatError):
self.verbose = save_verbose
self._print_header('\n======== Connection dropped! ========')
finally:
self.verbose = save_verbose
LINE_ENDING = b'\n'
def recv_line(self, max_size=None, timeout='default', ending=None):
"""
Recieve until the next newline , default "\\n". The newline string can
be changed by changing ``nc.LINE_ENDING``. The newline will be returned
as part of the string.
Aliases: recvline, readline, read_line, readln, recvln
"""
if ending is None:
ending = self.LINE_ENDING
return self.recv_until(ending, max_size, timeout)
def send_line(self, line, ending=None):
"""
Write the string to the wire, followed by a newline. The newline string
can be changed by changing ``nc.LINE_ENDING``.
Aliases: sendline, writeline, write_line, writeln, sendln
"""
if ending is None:
ending = self.LINE_ENDING
return self.send(line + ending)
read = recv
get = recv
write = send
put = send
sendall = send
send_all = send
read_until = recv_until
readuntil = recv_until
recvuntil = recv_until
read_all = recv_all
readall = recv_all
recvall = recv_all
read_exactly = recv_exactly
readexactly = recv_exactly
recvexactly = recv_exactly
interactive = interact
ineraction = interact
recvline = recv_line
readline = recv_line
read_line = recv_line
readln = recv_line
recvln = recv_line
sendline = send_line
writeline = send_line
write_line = send_line
writeln = send_line
sendln = send_line
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat.shutdown
|
python
|
def shutdown(self, how=socket.SHUT_RDWR):
if self._sock_send is not None:
self._sock_send.shutdown(how)
return self.sock.shutdown(how)
|
Send a shutdown signal for both reading and writing, or whatever
socket.SHUT_* constant you like.
Shutdown differs from closing in that it explicitly changes the state of
the socket resource to closed, whereas closing will only decrement the
number of peers on this end of the socket, since sockets can be a
resource shared by multiple peers on a single OS. When the number of
peers reaches zero, the socket is closed, but not deallocated, so you
still need to call close. (except that this is python and close is
automatically called on the deletion of the socket)
http://stackoverflow.com/questions/409783/socket-shutdown-vs-socket-close
|
train
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L424-L441
| null |
class Netcat(object):
"""
This is the main class you will use to interact with a peer over the
network! You may instanciate this class to either connect to a server or
listen for a one-off client.
One of the following must be passed in order to initialize a Netcat
object:
:param connect: the address/port to connect to
:param listen: the address/port to bind to for listening
:param sock: a python socket or pipe object to wrap
For ``connect`` and ``listen``, they accept basically any argument format
known to mankind. If you find an input format you think would be useful but
isn't accepted, let me know :P
Additionally, the following options modify the behavior of the object:
:param sock_send: If this is specified, this Netcat object will act
as a multiplexer/demultiplexer, using the "normal"
channel for receiving and this channel for sending.
This should be specified as a python socket or pipe
object.
.. warning:: Using ``sock_send`` will cause issues if
you pass this object into a context which
expects to be able to use its
``.fileno()``.
:param udp: Set to True to use udp connections when using the
connect or listen parameters
:param ipv6: Force using ipv6 when using the connect or listen
parameters
:param verbose: Set to True to log data sent/received. The echo_*
properties on this object can be tweaked to
describe exactly what you want logged.
:param log_send: Pass a file-like object open for writing and all
data sent over the socket will be written to it.
:param log_recv: Pass a file-like object open for writing and all
data recieved from the socket will be written to it.
:param raise_timeout:
Whether to raise a NetcatTimeout exception when a
timeout is received. The default is to return the
empty string and set self.timed_out = True
:param retry: Whether to continuously retry establishing a
connection if it fails.
:param log_yield: Control when logging messages are generated on
recv. By default, logging is done when data is
received from the socket, and may be buffered.
By setting this to true, logging is done when data
is yielded to the user, either directly from the
socket or from a buffer.
Any data that is extracted from the target address will override the
options specified here. For example, a url with the ``http:// scheme``
will go over tcp and port 80.
Some properties that may be tweaked to change the logging behavior:
- nc.echo_headers controls whether to print a header describing each
network operation before the data (True)
- nc.echo_perline controls whether the data should be split on newlines
for logging (True)
- nc.echo_sending controls whether to log data on send (True)
- nc.echo_recving controls whether to log data on recv (True)
- nc.echo_hex controls whether to log data hex-encoded (False)
- nc.echo_send_prefix controls a prefix to print before each logged
line of sent data ('>> ')
- nc.echo_recv_prefix controls a prefix to print before each logged
line of received data ('<< ')
Note that these settings ONLY affect the console logging triggered by
the verbose parameter. They don't do anything to the logging triggered
by `log_send` and `log_recv`, which are meant to provide pristine
untouched records of network traffic.
*Example 1:* Send a greeting to a UDP server listening at 192.168.3.6:8888
and log the response as hex:
>>> nc = nclib.Netcat(('192.168.3.6', 8888), udp=True, verbose=True)
>>> nc.echo_hex = True
>>> nc.send(b'\\x00\\x0dHello, world!')
======== Sending (15) ========
>> 00 0D 48 65 6C 6C 6F 2C 20 77 6F 72 6C 64 21 |..Hello, world! |
>>> nc.recv()
======== Receiving 4096B or until timeout (default) ========
<< 00 57 68 65 6C 6C 6F 20 66 72 69 65 6E 64 2E 20 |.Whello friend. |
<< 74 69 6D 65 20 69 73 20 73 68 6F 72 74 2E 20 70 |time is short. p|
<< 6C 65 61 73 65 20 64 6F 20 6E 6F 74 20 77 6F 72 |lease do not wor|
<< 72 79 2C 20 79 6F 75 20 77 69 6C 6C 20 66 69 6E |ry, you will fin|
<< 64 20 79 6F 75 72 20 77 61 79 2E 20 62 75 74 20 |d your way. but |
<< 64 6F 20 68 75 72 72 79 2E |do hurry. |
*Example 2:* Listen for a local TCP connection on port 1234, allow the user
to interact with the client. Log the entire interaction to log.txt.
>>> logfile = open('log.txt', 'wb')
>>> nc = nclib.Netcat(listen=('localhost', 1234), log_send=logfile, log_recv=logfile)
>>> nc.interact()
"""
def __init__(self,
connect=None,
sock=None,
listen=None,
server=None,
sock_send=None,
udp=False,
ipv6=False,
verbose=0,
log_send=None,
log_recv=None,
raise_timeout=False,
retry=False,
log_yield=False):
self.buf = b''
self.verbose = verbose
self.log_send = log_send
self.log_recv = log_recv
self.log_yield = log_yield
self.echo_headers = True
self.echo_perline = True
self.echo_sending = True
self.echo_recving = True
self.echo_hex = False
self.echo_send_prefix = '>> '
self.echo_recv_prefix = '<< '
self.sock = None
self._sock_send = sock_send
self.peer = None
# case: Netcat(host, port)
if isinstance(connect, str) and isinstance(listen, int):
connect = (connect, listen)
# case: Netcat(sock)
if isinstance(connect, socket.socket):
sock = connect
connect = None
# deprecated server kwarg
if server is not None:
connect = server
if sock is None and listen is None and connect is None:
raise ValueError('Not enough arguments, need at least an '
'address or a socket or a listening address!')
## we support passing connect as the "name" of the socket
#if sock is not None and (listen is not None or connect is not None):
# raise ValueError("connect or listen arguments may not be "
# "provided if sock is provided")
if listen is not None and connect is not None:
raise ValueError("connect and listen arguments cannot be provided at the same time")
if sock is None:
if listen is not None:
target = listen
listen = True
else:
target = connect
listen = False
target, listen, udp, ipv6 = self._parse_target(target, listen, udp, ipv6)
self._connect(target, listen, udp, ipv6, retry)
else:
self.sock = sock
self.peer = connect
try:
self._timeout = self.sock.gettimeout()
except AttributeError:
self._timeout = None
self.timed_out = False # set when an operation times out
self._raise_timeout = raise_timeout
@property
def sock_send(self):
if self._sock_send is None:
return self.sock
else:
return self._sock_send
@sock_send.setter
def sock_send(self, val):
self._sock_send = val
@staticmethod
def _parse_target(target, listen, udp, ipv6):
"""
Takes the basic version of the user args and extract as much data as
possible from target. Returns a tuple that is its arguments but
sanitized.
"""
if isinstance(target, str):
if target.startswith('nc '):
out_host = None
out_port = None
try:
opts, pieces = getopt.getopt(target.split()[1:], 'u46lp:',
[])
except getopt.GetoptError as exc:
raise ValueError(exc)
for opt, arg in opts:
if opt == '-u':
udp = True
elif opt == '-4':
ipv6 = False
elif opt == '-6':
ipv6 = True
elif opt == '-l':
listen = True
elif opt == '-p':
out_port = int(arg)
else:
assert False, "unhandled option"
if not pieces:
pass
elif len(pieces) == 1:
if listen and pieces[0].isdigit():
out_port = int(pieces[0])
else:
out_host = pieces[0]
elif len(pieces) == 2 and pieces[1].isdigit():
out_host = pieces[0]
out_port = int(pieces[1])
else:
raise ValueError("Bad cmdline: %s" % target)
if out_host is None:
if listen:
out_host = '::' if ipv6 else '0.0.0.0'
else:
raise ValueError("Missing address: %s" % target)
if out_port is None:
raise ValueError("Missing port: %s" % target)
if _is_ipv6_addr(out_host):
ipv6 = True
return (out_host, out_port), listen, udp, ipv6
elif PROTOCAL_RE.match(target) is not None:
parsed = urlparse(target)
port = None
try:
scheme_udp, scheme_ipv6, scheme_port = KNOWN_SCHEMES[parsed.scheme]
except KeyError:
raise ValueError("Unknown scheme: %s" % parsed.scheme)
if scheme_udp is not None:
udp = scheme_udp
if scheme_ipv6 is not None:
ipv6 = scheme_ipv6
if scheme_port is not None:
port = scheme_port
if parsed.netloc.startswith('['):
addr, extra = parsed.netloc[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
if ':' in parsed.netloc:
addr, port = parsed.netloc.split(':', 1)
port = int(port)
else:
addr = parsed.netloc
if addr is None or port is None:
raise ValueError("Can't parse addr/port from %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
else:
if target.startswith('['):
addr, extra = target[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
port = None
else:
if ':' in target:
addr, port = target.split(':', 1)
port = int(port)
else:
addr = target
port = None
if port is None:
raise ValueError("No port given: %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
elif isinstance(target, (int, long)):
if listen:
out_port = target
else:
raise ValueError("Can't deal with number as connection address")
return ('::' if ipv6 else '0.0.0.0', out_port), listen, udp, ipv6
elif isinstance(target, tuple):
if len(target) >= 1 and isinstance(target[0], str) and _is_ipv6_addr(target[0]):
ipv6 = True
return target, listen, udp, ipv6
else:
raise ValueError("Can't parse target: %r" % target)
def _connect(self, target, listen, udp, ipv6, retry):
"""
Takes target/listen/udp/ipv6 and sets self.sock and self.peer
"""
ty = socket.SOCK_DGRAM if udp else socket.SOCK_STREAM
fam = socket.AF_INET6 if ipv6 else socket.AF_INET
self.sock = socket.socket(fam, ty)
if listen:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(target)
if not udp:
self.sock.listen(1)
conn, addr = self.sock.accept()
self.sock.close()
self.sock = conn
self.peer = addr
else:
self.buf, self.peer = self.sock.recvfrom(1024)
self.sock.connect(self.peer)
self._log_recv(self.buf, False)
if self.verbose:
self._print_verbose('Connection from %s accepted' % str(self.peer))
else:
while True:
try:
self.sock.connect(target)
except (socket.gaierror, socket.herror) as exc:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
except socket.error as exc:
if retry:
time.sleep(0.2)
else:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
else:
break
self.peer = target
def close(self):
"""
Close the socket.
"""
if self._sock_send is not None:
self._sock_send.close()
return self.sock.close()
# inconsistent between sockets and files. support both
@property
def closed(self):
return self._closed
@property
def _closed(self):
if hasattr(self.sock_send, 'closed'):
return self.sock_send.closed
elif hasattr(self.sock_send, '_closed'):
return self.sock_send._closed
else:
return False # ???
def shutdown_rd(self):
"""
Send a shutdown signal for reading - you may no longer read from this
socket.
"""
if self._sock_send is not None:
self.sock.close()
else:
return self.shutdown(socket.SHUT_RD)
def shutdown_wr(self):
"""
Send a shutdown signal for writing - you may no longer write to this
socket.
"""
if self._sock_send is not None:
self._sock_send.close()
else:
return self.shutdown(socket.SHUT_WR)
def fileno(self):
"""
Return the file descriptor associated with this socket
"""
if self._sock_send is not None:
raise UserWarning("Calling fileno when there are in fact two filenos")
return self.sock.fileno()
def _print_verbose(self, s):
assert isinstance(s, str), "s should be str"
sys.stdout.write(s + '\n')
def _print_header(self, header):
if self.verbose and self.echo_headers:
self._print_verbose(header)
def _print_recv_header(self, fmt, timeout, *args):
if self.verbose and self.echo_headers:
if timeout == 'default':
timeout = self._timeout
if timeout is not None:
timeout_text = ' or until timeout ({0})'.format(timeout)
else:
timeout_text = ''
self._print_verbose(fmt.format(*args, timeout_text=timeout_text))
def _log_something(self, data, prefix):
if self.echo_perline:
if self.echo_hex:
self._print_hex_lines(data, prefix)
else:
self._print_lines(data, prefix)
else:
if self.echo_hex:
if hasattr(data, 'hex'):
self._print_verbose(prefix + data.hex())
else:
self._print_verbose(prefix + data.encode('hex'))
else:
self._print_verbose(prefix + str(data))
def _log_recv(self, data, yielding):
if yielding == self.log_yield:
if self.verbose and self.echo_recving:
self._log_something(data, self.echo_recv_prefix)
if self.log_recv:
self.log_recv.write(data)
def _log_send(self, data):
if self.verbose and self.echo_sending:
self._log_something(data, self.echo_send_prefix)
if self.log_send:
self.log_send.write(data)
def _print_lines(self, s, prefix):
for line in s.split(b'\n'):
self._print_verbose(prefix + str(line))
@staticmethod
def _to_spaced_hex(s):
if isinstance(s, str):
return ' '.join('%02X' % ord(a) for a in s)
if isinstance(s, bytes):
return ' '.join('%02X' % a for a in s)
raise TypeError('expected str or bytes instance')
@staticmethod
def _to_printable_str(s):
if isinstance(s, str):
return ''.join(a if ' ' <= a <= '~' else '.' for a in s)
if isinstance(s, bytes):
return ''.join(chr(a) if ord(' ') <= a <= ord('~') else '.' for a in s)
raise TypeError('expected str or bytes instance')
def _print_hex_lines(self, s, prefix):
for i in range(0, len(s), 16):
block = s[i:i+16]
spaced_hex = self._to_spaced_hex(block)
printable_str = self._to_printable_str(block)
self._print_verbose('%s%-47s |%-16s|' % (prefix, spaced_hex, printable_str))
def settimeout(self, timeout):
"""
Set the default timeout in seconds to use for subsequent socket
operations
"""
self._timeout = timeout
self._settimeout(timeout)
def _send(self, data):
if hasattr(self.sock_send, 'send'):
return self.sock_send.send(data)
elif hasattr(self.sock_send, 'write'):
return self.sock_send.write(data) # pylint: disable=no-member
else:
raise ValueError("I don't know how to write to this stream!")
def _recv(self, size):
if hasattr(self.sock, 'recv'):
return self.sock.recv(size)
elif hasattr(self.sock, 'read'):
return self.sock.read(size) # pylint: disable=no-member
else:
raise ValueError("I don't know how to read from this stream!")
def _recv_predicate(self, predicate, timeout='default', raise_eof=True):
"""
Receive until predicate returns a positive integer.
The returned number is the size to return.
"""
if timeout == 'default':
timeout = self._timeout
self.timed_out = False
start = time.time()
try:
while True:
cut_at = predicate(self.buf)
if cut_at > 0:
break
if timeout is not None:
time_elapsed = time.time() - start
if time_elapsed > timeout:
raise socket.timeout
self._settimeout(timeout - time_elapsed)
data = self._recv(4096)
self._log_recv(data, False)
self.buf += data
if not data:
if raise_eof:
raise NetcatError("Connection dropped!")
cut_at = len(self.buf)
break
except KeyboardInterrupt:
self._print_header('\n======== Connection interrupted! ========')
raise
except socket.timeout:
self.timed_out = True
if self._raise_timeout:
raise NetcatTimeout()
return b''
except socket.error as exc:
raise NetcatError('Socket error: %r' % exc)
self._settimeout(self._timeout)
ret = self.buf[:cut_at]
self.buf = self.buf[cut_at:]
self._log_recv(ret, True)
return ret
def _settimeout(self, timeout):
"""
Internal method - catches failures when working with non-timeoutable
streams, like files
"""
try:
self.sock.settimeout(timeout)
except AttributeError:
pass
def gettimeout(self):
"""
Retrieve the timeout currently associated with the socket
"""
return self._timeout
def flush(self):
# no buffering
pass
def recv(self, n=4096, timeout='default'):
"""
Receive at most n bytes (default 4096) from the socket
Aliases: read, get
"""
self._print_recv_header(
'======== Receiving {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: min(n, len(s)), timeout)
def recv_until(self, s, max_size=None, timeout='default'):
"""
Recieve data from the socket until the given substring is observed.
Data in the same datagram as the substring, following the substring,
will not be returned and will be cached for future receives.
Aliases: read_until, readuntil, recvuntil
"""
self._print_recv_header(
'======== Receiving until {0}{timeout_text} ========', timeout, repr(s))
if max_size is None:
max_size = 2 ** 62
def _predicate(buf):
try:
return min(buf.index(s) + len(s), max_size)
except ValueError:
return 0 if len(buf) < max_size else max_size
return self._recv_predicate(_predicate, timeout)
def recv_all(self, timeout='default'):
"""
Return all data recieved until connection closes.
Aliases: read_all, readall, recvall
"""
self._print_recv_header('======== Receiving until close{timeout_text} ========', timeout)
return self._recv_predicate(lambda s: 0, timeout, raise_eof=False)
def recv_exactly(self, n, timeout='default'):
"""
Recieve exactly n bytes
Aliases: read_exactly, readexactly, recvexactly
"""
self._print_recv_header(
'======== Receiving until exactly {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: n if len(s) >= n else 0, timeout)
def send(self, s):
"""
Sends all the given data to the socket.
Aliases: write, put, sendall, send_all
"""
self._print_header('======== Sending ({0}) ========'.format(len(s)))
self._log_send(s)
out = len(s)
while s:
s = s[self._send(s):]
return out
def interact(self, insock=sys.stdin, outsock=sys.stdout):
"""
Connects the socket to the terminal for user interaction.
Alternate input and output files may be specified.
This method cannot be used with a timeout.
Aliases: interactive, interaction
"""
self._print_header('======== Beginning interactive session ========')
if hasattr(outsock, 'buffer'):
outsock = outsock.buffer # pylint: disable=no-member
self.timed_out = False
save_verbose = self.verbose
self.verbose = 0
try:
if self.buf:
outsock.write(self.buf)
outsock.flush()
self.buf = b''
while True:
readable_socks = select(self.sock, insock)
for readable in readable_socks:
if readable is insock:
data = os.read(insock.fileno(), 4096)
self.send(data)
if not data:
raise NetcatError
else:
data = self.recv(timeout=None)
outsock.write(data)
outsock.flush()
if not data:
raise NetcatError
except KeyboardInterrupt:
self.verbose = save_verbose
self._print_header('\n======== Connection interrupted! ========')
raise
except (socket.error, NetcatError):
self.verbose = save_verbose
self._print_header('\n======== Connection dropped! ========')
finally:
self.verbose = save_verbose
LINE_ENDING = b'\n'
def recv_line(self, max_size=None, timeout='default', ending=None):
"""
Recieve until the next newline , default "\\n". The newline string can
be changed by changing ``nc.LINE_ENDING``. The newline will be returned
as part of the string.
Aliases: recvline, readline, read_line, readln, recvln
"""
if ending is None:
ending = self.LINE_ENDING
return self.recv_until(ending, max_size, timeout)
def send_line(self, line, ending=None):
"""
Write the string to the wire, followed by a newline. The newline string
can be changed by changing ``nc.LINE_ENDING``.
Aliases: sendline, writeline, write_line, writeln, sendln
"""
if ending is None:
ending = self.LINE_ENDING
return self.send(line + ending)
read = recv
get = recv
write = send
put = send
sendall = send
send_all = send
read_until = recv_until
readuntil = recv_until
recvuntil = recv_until
read_all = recv_all
readall = recv_all
recvall = recv_all
read_exactly = recv_exactly
readexactly = recv_exactly
recvexactly = recv_exactly
interactive = interact
ineraction = interact
recvline = recv_line
readline = recv_line
read_line = recv_line
readln = recv_line
recvln = recv_line
sendline = send_line
writeline = send_line
write_line = send_line
writeln = send_line
sendln = send_line
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat.shutdown_rd
|
python
|
def shutdown_rd(self):
if self._sock_send is not None:
self.sock.close()
else:
return self.shutdown(socket.SHUT_RD)
|
Send a shutdown signal for reading - you may no longer read from this
socket.
|
train
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L443-L451
|
[
"def shutdown(self, how=socket.SHUT_RDWR):\n \"\"\"\n Send a shutdown signal for both reading and writing, or whatever\n socket.SHUT_* constant you like.\n\n Shutdown differs from closing in that it explicitly changes the state of\n the socket resource to closed, whereas closing will only decrement the\n number of peers on this end of the socket, since sockets can be a\n resource shared by multiple peers on a single OS. When the number of\n peers reaches zero, the socket is closed, but not deallocated, so you\n still need to call close. (except that this is python and close is\n automatically called on the deletion of the socket)\n\n http://stackoverflow.com/questions/409783/socket-shutdown-vs-socket-close\n \"\"\"\n if self._sock_send is not None:\n self._sock_send.shutdown(how)\n return self.sock.shutdown(how)\n"
] |
class Netcat(object):
"""
This is the main class you will use to interact with a peer over the
network! You may instanciate this class to either connect to a server or
listen for a one-off client.
One of the following must be passed in order to initialize a Netcat
object:
:param connect: the address/port to connect to
:param listen: the address/port to bind to for listening
:param sock: a python socket or pipe object to wrap
For ``connect`` and ``listen``, they accept basically any argument format
known to mankind. If you find an input format you think would be useful but
isn't accepted, let me know :P
Additionally, the following options modify the behavior of the object:
:param sock_send: If this is specified, this Netcat object will act
as a multiplexer/demultiplexer, using the "normal"
channel for receiving and this channel for sending.
This should be specified as a python socket or pipe
object.
.. warning:: Using ``sock_send`` will cause issues if
you pass this object into a context which
expects to be able to use its
``.fileno()``.
:param udp: Set to True to use udp connections when using the
connect or listen parameters
:param ipv6: Force using ipv6 when using the connect or listen
parameters
:param verbose: Set to True to log data sent/received. The echo_*
properties on this object can be tweaked to
describe exactly what you want logged.
:param log_send: Pass a file-like object open for writing and all
data sent over the socket will be written to it.
:param log_recv: Pass a file-like object open for writing and all
data recieved from the socket will be written to it.
:param raise_timeout:
Whether to raise a NetcatTimeout exception when a
timeout is received. The default is to return the
empty string and set self.timed_out = True
:param retry: Whether to continuously retry establishing a
connection if it fails.
:param log_yield: Control when logging messages are generated on
recv. By default, logging is done when data is
received from the socket, and may be buffered.
By setting this to true, logging is done when data
is yielded to the user, either directly from the
socket or from a buffer.
Any data that is extracted from the target address will override the
options specified here. For example, a url with the ``http:// scheme``
will go over tcp and port 80.
Some properties that may be tweaked to change the logging behavior:
- nc.echo_headers controls whether to print a header describing each
network operation before the data (True)
- nc.echo_perline controls whether the data should be split on newlines
for logging (True)
- nc.echo_sending controls whether to log data on send (True)
- nc.echo_recving controls whether to log data on recv (True)
- nc.echo_hex controls whether to log data hex-encoded (False)
- nc.echo_send_prefix controls a prefix to print before each logged
line of sent data ('>> ')
- nc.echo_recv_prefix controls a prefix to print before each logged
line of received data ('<< ')
Note that these settings ONLY affect the console logging triggered by
the verbose parameter. They don't do anything to the logging triggered
by `log_send` and `log_recv`, which are meant to provide pristine
untouched records of network traffic.
*Example 1:* Send a greeting to a UDP server listening at 192.168.3.6:8888
and log the response as hex:
>>> nc = nclib.Netcat(('192.168.3.6', 8888), udp=True, verbose=True)
>>> nc.echo_hex = True
>>> nc.send(b'\\x00\\x0dHello, world!')
======== Sending (15) ========
>> 00 0D 48 65 6C 6C 6F 2C 20 77 6F 72 6C 64 21 |..Hello, world! |
>>> nc.recv()
======== Receiving 4096B or until timeout (default) ========
<< 00 57 68 65 6C 6C 6F 20 66 72 69 65 6E 64 2E 20 |.Whello friend. |
<< 74 69 6D 65 20 69 73 20 73 68 6F 72 74 2E 20 70 |time is short. p|
<< 6C 65 61 73 65 20 64 6F 20 6E 6F 74 20 77 6F 72 |lease do not wor|
<< 72 79 2C 20 79 6F 75 20 77 69 6C 6C 20 66 69 6E |ry, you will fin|
<< 64 20 79 6F 75 72 20 77 61 79 2E 20 62 75 74 20 |d your way. but |
<< 64 6F 20 68 75 72 72 79 2E |do hurry. |
*Example 2:* Listen for a local TCP connection on port 1234, allow the user
to interact with the client. Log the entire interaction to log.txt.
>>> logfile = open('log.txt', 'wb')
>>> nc = nclib.Netcat(listen=('localhost', 1234), log_send=logfile, log_recv=logfile)
>>> nc.interact()
"""
def __init__(self,
connect=None,
sock=None,
listen=None,
server=None,
sock_send=None,
udp=False,
ipv6=False,
verbose=0,
log_send=None,
log_recv=None,
raise_timeout=False,
retry=False,
log_yield=False):
self.buf = b''
self.verbose = verbose
self.log_send = log_send
self.log_recv = log_recv
self.log_yield = log_yield
self.echo_headers = True
self.echo_perline = True
self.echo_sending = True
self.echo_recving = True
self.echo_hex = False
self.echo_send_prefix = '>> '
self.echo_recv_prefix = '<< '
self.sock = None
self._sock_send = sock_send
self.peer = None
# case: Netcat(host, port)
if isinstance(connect, str) and isinstance(listen, int):
connect = (connect, listen)
# case: Netcat(sock)
if isinstance(connect, socket.socket):
sock = connect
connect = None
# deprecated server kwarg
if server is not None:
connect = server
if sock is None and listen is None and connect is None:
raise ValueError('Not enough arguments, need at least an '
'address or a socket or a listening address!')
## we support passing connect as the "name" of the socket
#if sock is not None and (listen is not None or connect is not None):
# raise ValueError("connect or listen arguments may not be "
# "provided if sock is provided")
if listen is not None and connect is not None:
raise ValueError("connect and listen arguments cannot be provided at the same time")
if sock is None:
if listen is not None:
target = listen
listen = True
else:
target = connect
listen = False
target, listen, udp, ipv6 = self._parse_target(target, listen, udp, ipv6)
self._connect(target, listen, udp, ipv6, retry)
else:
self.sock = sock
self.peer = connect
try:
self._timeout = self.sock.gettimeout()
except AttributeError:
self._timeout = None
self.timed_out = False # set when an operation times out
self._raise_timeout = raise_timeout
@property
def sock_send(self):
if self._sock_send is None:
return self.sock
else:
return self._sock_send
@sock_send.setter
def sock_send(self, val):
self._sock_send = val
@staticmethod
def _parse_target(target, listen, udp, ipv6):
"""
Takes the basic version of the user args and extract as much data as
possible from target. Returns a tuple that is its arguments but
sanitized.
"""
if isinstance(target, str):
if target.startswith('nc '):
out_host = None
out_port = None
try:
opts, pieces = getopt.getopt(target.split()[1:], 'u46lp:',
[])
except getopt.GetoptError as exc:
raise ValueError(exc)
for opt, arg in opts:
if opt == '-u':
udp = True
elif opt == '-4':
ipv6 = False
elif opt == '-6':
ipv6 = True
elif opt == '-l':
listen = True
elif opt == '-p':
out_port = int(arg)
else:
assert False, "unhandled option"
if not pieces:
pass
elif len(pieces) == 1:
if listen and pieces[0].isdigit():
out_port = int(pieces[0])
else:
out_host = pieces[0]
elif len(pieces) == 2 and pieces[1].isdigit():
out_host = pieces[0]
out_port = int(pieces[1])
else:
raise ValueError("Bad cmdline: %s" % target)
if out_host is None:
if listen:
out_host = '::' if ipv6 else '0.0.0.0'
else:
raise ValueError("Missing address: %s" % target)
if out_port is None:
raise ValueError("Missing port: %s" % target)
if _is_ipv6_addr(out_host):
ipv6 = True
return (out_host, out_port), listen, udp, ipv6
elif PROTOCAL_RE.match(target) is not None:
parsed = urlparse(target)
port = None
try:
scheme_udp, scheme_ipv6, scheme_port = KNOWN_SCHEMES[parsed.scheme]
except KeyError:
raise ValueError("Unknown scheme: %s" % parsed.scheme)
if scheme_udp is not None:
udp = scheme_udp
if scheme_ipv6 is not None:
ipv6 = scheme_ipv6
if scheme_port is not None:
port = scheme_port
if parsed.netloc.startswith('['):
addr, extra = parsed.netloc[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
if ':' in parsed.netloc:
addr, port = parsed.netloc.split(':', 1)
port = int(port)
else:
addr = parsed.netloc
if addr is None or port is None:
raise ValueError("Can't parse addr/port from %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
else:
if target.startswith('['):
addr, extra = target[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
port = None
else:
if ':' in target:
addr, port = target.split(':', 1)
port = int(port)
else:
addr = target
port = None
if port is None:
raise ValueError("No port given: %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
elif isinstance(target, (int, long)):
if listen:
out_port = target
else:
raise ValueError("Can't deal with number as connection address")
return ('::' if ipv6 else '0.0.0.0', out_port), listen, udp, ipv6
elif isinstance(target, tuple):
if len(target) >= 1 and isinstance(target[0], str) and _is_ipv6_addr(target[0]):
ipv6 = True
return target, listen, udp, ipv6
else:
raise ValueError("Can't parse target: %r" % target)
def _connect(self, target, listen, udp, ipv6, retry):
"""
Takes target/listen/udp/ipv6 and sets self.sock and self.peer
"""
ty = socket.SOCK_DGRAM if udp else socket.SOCK_STREAM
fam = socket.AF_INET6 if ipv6 else socket.AF_INET
self.sock = socket.socket(fam, ty)
if listen:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(target)
if not udp:
self.sock.listen(1)
conn, addr = self.sock.accept()
self.sock.close()
self.sock = conn
self.peer = addr
else:
self.buf, self.peer = self.sock.recvfrom(1024)
self.sock.connect(self.peer)
self._log_recv(self.buf, False)
if self.verbose:
self._print_verbose('Connection from %s accepted' % str(self.peer))
else:
while True:
try:
self.sock.connect(target)
except (socket.gaierror, socket.herror) as exc:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
except socket.error as exc:
if retry:
time.sleep(0.2)
else:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
else:
break
self.peer = target
def close(self):
"""
Close the socket.
"""
if self._sock_send is not None:
self._sock_send.close()
return self.sock.close()
# inconsistent between sockets and files. support both
@property
def closed(self):
return self._closed
@property
def _closed(self):
if hasattr(self.sock_send, 'closed'):
return self.sock_send.closed
elif hasattr(self.sock_send, '_closed'):
return self.sock_send._closed
else:
return False # ???
def shutdown(self, how=socket.SHUT_RDWR):
"""
Send a shutdown signal for both reading and writing, or whatever
socket.SHUT_* constant you like.
Shutdown differs from closing in that it explicitly changes the state of
the socket resource to closed, whereas closing will only decrement the
number of peers on this end of the socket, since sockets can be a
resource shared by multiple peers on a single OS. When the number of
peers reaches zero, the socket is closed, but not deallocated, so you
still need to call close. (except that this is python and close is
automatically called on the deletion of the socket)
http://stackoverflow.com/questions/409783/socket-shutdown-vs-socket-close
"""
if self._sock_send is not None:
self._sock_send.shutdown(how)
return self.sock.shutdown(how)
def shutdown_wr(self):
"""
Send a shutdown signal for writing - you may no longer write to this
socket.
"""
if self._sock_send is not None:
self._sock_send.close()
else:
return self.shutdown(socket.SHUT_WR)
def fileno(self):
"""
Return the file descriptor associated with this socket
"""
if self._sock_send is not None:
raise UserWarning("Calling fileno when there are in fact two filenos")
return self.sock.fileno()
def _print_verbose(self, s):
assert isinstance(s, str), "s should be str"
sys.stdout.write(s + '\n')
def _print_header(self, header):
if self.verbose and self.echo_headers:
self._print_verbose(header)
def _print_recv_header(self, fmt, timeout, *args):
if self.verbose and self.echo_headers:
if timeout == 'default':
timeout = self._timeout
if timeout is not None:
timeout_text = ' or until timeout ({0})'.format(timeout)
else:
timeout_text = ''
self._print_verbose(fmt.format(*args, timeout_text=timeout_text))
def _log_something(self, data, prefix):
if self.echo_perline:
if self.echo_hex:
self._print_hex_lines(data, prefix)
else:
self._print_lines(data, prefix)
else:
if self.echo_hex:
if hasattr(data, 'hex'):
self._print_verbose(prefix + data.hex())
else:
self._print_verbose(prefix + data.encode('hex'))
else:
self._print_verbose(prefix + str(data))
def _log_recv(self, data, yielding):
if yielding == self.log_yield:
if self.verbose and self.echo_recving:
self._log_something(data, self.echo_recv_prefix)
if self.log_recv:
self.log_recv.write(data)
def _log_send(self, data):
if self.verbose and self.echo_sending:
self._log_something(data, self.echo_send_prefix)
if self.log_send:
self.log_send.write(data)
def _print_lines(self, s, prefix):
for line in s.split(b'\n'):
self._print_verbose(prefix + str(line))
@staticmethod
def _to_spaced_hex(s):
if isinstance(s, str):
return ' '.join('%02X' % ord(a) for a in s)
if isinstance(s, bytes):
return ' '.join('%02X' % a for a in s)
raise TypeError('expected str or bytes instance')
@staticmethod
def _to_printable_str(s):
if isinstance(s, str):
return ''.join(a if ' ' <= a <= '~' else '.' for a in s)
if isinstance(s, bytes):
return ''.join(chr(a) if ord(' ') <= a <= ord('~') else '.' for a in s)
raise TypeError('expected str or bytes instance')
def _print_hex_lines(self, s, prefix):
for i in range(0, len(s), 16):
block = s[i:i+16]
spaced_hex = self._to_spaced_hex(block)
printable_str = self._to_printable_str(block)
self._print_verbose('%s%-47s |%-16s|' % (prefix, spaced_hex, printable_str))
def settimeout(self, timeout):
"""
Set the default timeout in seconds to use for subsequent socket
operations
"""
self._timeout = timeout
self._settimeout(timeout)
def _send(self, data):
if hasattr(self.sock_send, 'send'):
return self.sock_send.send(data)
elif hasattr(self.sock_send, 'write'):
return self.sock_send.write(data) # pylint: disable=no-member
else:
raise ValueError("I don't know how to write to this stream!")
def _recv(self, size):
if hasattr(self.sock, 'recv'):
return self.sock.recv(size)
elif hasattr(self.sock, 'read'):
return self.sock.read(size) # pylint: disable=no-member
else:
raise ValueError("I don't know how to read from this stream!")
def _recv_predicate(self, predicate, timeout='default', raise_eof=True):
"""
Receive until predicate returns a positive integer.
The returned number is the size to return.
"""
if timeout == 'default':
timeout = self._timeout
self.timed_out = False
start = time.time()
try:
while True:
cut_at = predicate(self.buf)
if cut_at > 0:
break
if timeout is not None:
time_elapsed = time.time() - start
if time_elapsed > timeout:
raise socket.timeout
self._settimeout(timeout - time_elapsed)
data = self._recv(4096)
self._log_recv(data, False)
self.buf += data
if not data:
if raise_eof:
raise NetcatError("Connection dropped!")
cut_at = len(self.buf)
break
except KeyboardInterrupt:
self._print_header('\n======== Connection interrupted! ========')
raise
except socket.timeout:
self.timed_out = True
if self._raise_timeout:
raise NetcatTimeout()
return b''
except socket.error as exc:
raise NetcatError('Socket error: %r' % exc)
self._settimeout(self._timeout)
ret = self.buf[:cut_at]
self.buf = self.buf[cut_at:]
self._log_recv(ret, True)
return ret
def _settimeout(self, timeout):
"""
Internal method - catches failures when working with non-timeoutable
streams, like files
"""
try:
self.sock.settimeout(timeout)
except AttributeError:
pass
def gettimeout(self):
"""
Retrieve the timeout currently associated with the socket
"""
return self._timeout
def flush(self):
# no buffering
pass
def recv(self, n=4096, timeout='default'):
"""
Receive at most n bytes (default 4096) from the socket
Aliases: read, get
"""
self._print_recv_header(
'======== Receiving {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: min(n, len(s)), timeout)
def recv_until(self, s, max_size=None, timeout='default'):
"""
Recieve data from the socket until the given substring is observed.
Data in the same datagram as the substring, following the substring,
will not be returned and will be cached for future receives.
Aliases: read_until, readuntil, recvuntil
"""
self._print_recv_header(
'======== Receiving until {0}{timeout_text} ========', timeout, repr(s))
if max_size is None:
max_size = 2 ** 62
def _predicate(buf):
try:
return min(buf.index(s) + len(s), max_size)
except ValueError:
return 0 if len(buf) < max_size else max_size
return self._recv_predicate(_predicate, timeout)
def recv_all(self, timeout='default'):
"""
Return all data recieved until connection closes.
Aliases: read_all, readall, recvall
"""
self._print_recv_header('======== Receiving until close{timeout_text} ========', timeout)
return self._recv_predicate(lambda s: 0, timeout, raise_eof=False)
def recv_exactly(self, n, timeout='default'):
"""
Recieve exactly n bytes
Aliases: read_exactly, readexactly, recvexactly
"""
self._print_recv_header(
'======== Receiving until exactly {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: n if len(s) >= n else 0, timeout)
def send(self, s):
"""
Sends all the given data to the socket.
Aliases: write, put, sendall, send_all
"""
self._print_header('======== Sending ({0}) ========'.format(len(s)))
self._log_send(s)
out = len(s)
while s:
s = s[self._send(s):]
return out
def interact(self, insock=sys.stdin, outsock=sys.stdout):
"""
Connects the socket to the terminal for user interaction.
Alternate input and output files may be specified.
This method cannot be used with a timeout.
Aliases: interactive, interaction
"""
self._print_header('======== Beginning interactive session ========')
if hasattr(outsock, 'buffer'):
outsock = outsock.buffer # pylint: disable=no-member
self.timed_out = False
save_verbose = self.verbose
self.verbose = 0
try:
if self.buf:
outsock.write(self.buf)
outsock.flush()
self.buf = b''
while True:
readable_socks = select(self.sock, insock)
for readable in readable_socks:
if readable is insock:
data = os.read(insock.fileno(), 4096)
self.send(data)
if not data:
raise NetcatError
else:
data = self.recv(timeout=None)
outsock.write(data)
outsock.flush()
if not data:
raise NetcatError
except KeyboardInterrupt:
self.verbose = save_verbose
self._print_header('\n======== Connection interrupted! ========')
raise
except (socket.error, NetcatError):
self.verbose = save_verbose
self._print_header('\n======== Connection dropped! ========')
finally:
self.verbose = save_verbose
LINE_ENDING = b'\n'
def recv_line(self, max_size=None, timeout='default', ending=None):
"""
Recieve until the next newline , default "\\n". The newline string can
be changed by changing ``nc.LINE_ENDING``. The newline will be returned
as part of the string.
Aliases: recvline, readline, read_line, readln, recvln
"""
if ending is None:
ending = self.LINE_ENDING
return self.recv_until(ending, max_size, timeout)
def send_line(self, line, ending=None):
"""
Write the string to the wire, followed by a newline. The newline string
can be changed by changing ``nc.LINE_ENDING``.
Aliases: sendline, writeline, write_line, writeln, sendln
"""
if ending is None:
ending = self.LINE_ENDING
return self.send(line + ending)
read = recv
get = recv
write = send
put = send
sendall = send
send_all = send
read_until = recv_until
readuntil = recv_until
recvuntil = recv_until
read_all = recv_all
readall = recv_all
recvall = recv_all
read_exactly = recv_exactly
readexactly = recv_exactly
recvexactly = recv_exactly
interactive = interact
ineraction = interact
recvline = recv_line
readline = recv_line
read_line = recv_line
readln = recv_line
recvln = recv_line
sendline = send_line
writeline = send_line
write_line = send_line
writeln = send_line
sendln = send_line
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat.shutdown_wr
|
python
|
def shutdown_wr(self):
if self._sock_send is not None:
self._sock_send.close()
else:
return self.shutdown(socket.SHUT_WR)
|
Send a shutdown signal for writing - you may no longer write to this
socket.
|
train
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L453-L461
|
[
"def shutdown(self, how=socket.SHUT_RDWR):\n \"\"\"\n Send a shutdown signal for both reading and writing, or whatever\n socket.SHUT_* constant you like.\n\n Shutdown differs from closing in that it explicitly changes the state of\n the socket resource to closed, whereas closing will only decrement the\n number of peers on this end of the socket, since sockets can be a\n resource shared by multiple peers on a single OS. When the number of\n peers reaches zero, the socket is closed, but not deallocated, so you\n still need to call close. (except that this is python and close is\n automatically called on the deletion of the socket)\n\n http://stackoverflow.com/questions/409783/socket-shutdown-vs-socket-close\n \"\"\"\n if self._sock_send is not None:\n self._sock_send.shutdown(how)\n return self.sock.shutdown(how)\n"
] |
class Netcat(object):
"""
This is the main class you will use to interact with a peer over the
network! You may instanciate this class to either connect to a server or
listen for a one-off client.
One of the following must be passed in order to initialize a Netcat
object:
:param connect: the address/port to connect to
:param listen: the address/port to bind to for listening
:param sock: a python socket or pipe object to wrap
For ``connect`` and ``listen``, they accept basically any argument format
known to mankind. If you find an input format you think would be useful but
isn't accepted, let me know :P
Additionally, the following options modify the behavior of the object:
:param sock_send: If this is specified, this Netcat object will act
as a multiplexer/demultiplexer, using the "normal"
channel for receiving and this channel for sending.
This should be specified as a python socket or pipe
object.
.. warning:: Using ``sock_send`` will cause issues if
you pass this object into a context which
expects to be able to use its
``.fileno()``.
:param udp: Set to True to use udp connections when using the
connect or listen parameters
:param ipv6: Force using ipv6 when using the connect or listen
parameters
:param verbose: Set to True to log data sent/received. The echo_*
properties on this object can be tweaked to
describe exactly what you want logged.
:param log_send: Pass a file-like object open for writing and all
data sent over the socket will be written to it.
:param log_recv: Pass a file-like object open for writing and all
data recieved from the socket will be written to it.
:param raise_timeout:
Whether to raise a NetcatTimeout exception when a
timeout is received. The default is to return the
empty string and set self.timed_out = True
:param retry: Whether to continuously retry establishing a
connection if it fails.
:param log_yield: Control when logging messages are generated on
recv. By default, logging is done when data is
received from the socket, and may be buffered.
By setting this to true, logging is done when data
is yielded to the user, either directly from the
socket or from a buffer.
Any data that is extracted from the target address will override the
options specified here. For example, a url with the ``http:// scheme``
will go over tcp and port 80.
Some properties that may be tweaked to change the logging behavior:
- nc.echo_headers controls whether to print a header describing each
network operation before the data (True)
- nc.echo_perline controls whether the data should be split on newlines
for logging (True)
- nc.echo_sending controls whether to log data on send (True)
- nc.echo_recving controls whether to log data on recv (True)
- nc.echo_hex controls whether to log data hex-encoded (False)
- nc.echo_send_prefix controls a prefix to print before each logged
line of sent data ('>> ')
- nc.echo_recv_prefix controls a prefix to print before each logged
line of received data ('<< ')
Note that these settings ONLY affect the console logging triggered by
the verbose parameter. They don't do anything to the logging triggered
by `log_send` and `log_recv`, which are meant to provide pristine
untouched records of network traffic.
*Example 1:* Send a greeting to a UDP server listening at 192.168.3.6:8888
and log the response as hex:
>>> nc = nclib.Netcat(('192.168.3.6', 8888), udp=True, verbose=True)
>>> nc.echo_hex = True
>>> nc.send(b'\\x00\\x0dHello, world!')
======== Sending (15) ========
>> 00 0D 48 65 6C 6C 6F 2C 20 77 6F 72 6C 64 21 |..Hello, world! |
>>> nc.recv()
======== Receiving 4096B or until timeout (default) ========
<< 00 57 68 65 6C 6C 6F 20 66 72 69 65 6E 64 2E 20 |.Whello friend. |
<< 74 69 6D 65 20 69 73 20 73 68 6F 72 74 2E 20 70 |time is short. p|
<< 6C 65 61 73 65 20 64 6F 20 6E 6F 74 20 77 6F 72 |lease do not wor|
<< 72 79 2C 20 79 6F 75 20 77 69 6C 6C 20 66 69 6E |ry, you will fin|
<< 64 20 79 6F 75 72 20 77 61 79 2E 20 62 75 74 20 |d your way. but |
<< 64 6F 20 68 75 72 72 79 2E |do hurry. |
*Example 2:* Listen for a local TCP connection on port 1234, allow the user
to interact with the client. Log the entire interaction to log.txt.
>>> logfile = open('log.txt', 'wb')
>>> nc = nclib.Netcat(listen=('localhost', 1234), log_send=logfile, log_recv=logfile)
>>> nc.interact()
"""
def __init__(self,
connect=None,
sock=None,
listen=None,
server=None,
sock_send=None,
udp=False,
ipv6=False,
verbose=0,
log_send=None,
log_recv=None,
raise_timeout=False,
retry=False,
log_yield=False):
self.buf = b''
self.verbose = verbose
self.log_send = log_send
self.log_recv = log_recv
self.log_yield = log_yield
self.echo_headers = True
self.echo_perline = True
self.echo_sending = True
self.echo_recving = True
self.echo_hex = False
self.echo_send_prefix = '>> '
self.echo_recv_prefix = '<< '
self.sock = None
self._sock_send = sock_send
self.peer = None
# case: Netcat(host, port)
if isinstance(connect, str) and isinstance(listen, int):
connect = (connect, listen)
# case: Netcat(sock)
if isinstance(connect, socket.socket):
sock = connect
connect = None
# deprecated server kwarg
if server is not None:
connect = server
if sock is None and listen is None and connect is None:
raise ValueError('Not enough arguments, need at least an '
'address or a socket or a listening address!')
## we support passing connect as the "name" of the socket
#if sock is not None and (listen is not None or connect is not None):
# raise ValueError("connect or listen arguments may not be "
# "provided if sock is provided")
if listen is not None and connect is not None:
raise ValueError("connect and listen arguments cannot be provided at the same time")
if sock is None:
if listen is not None:
target = listen
listen = True
else:
target = connect
listen = False
target, listen, udp, ipv6 = self._parse_target(target, listen, udp, ipv6)
self._connect(target, listen, udp, ipv6, retry)
else:
self.sock = sock
self.peer = connect
try:
self._timeout = self.sock.gettimeout()
except AttributeError:
self._timeout = None
self.timed_out = False # set when an operation times out
self._raise_timeout = raise_timeout
@property
def sock_send(self):
if self._sock_send is None:
return self.sock
else:
return self._sock_send
@sock_send.setter
def sock_send(self, val):
self._sock_send = val
@staticmethod
def _parse_target(target, listen, udp, ipv6):
"""
Takes the basic version of the user args and extract as much data as
possible from target. Returns a tuple that is its arguments but
sanitized.
"""
if isinstance(target, str):
if target.startswith('nc '):
out_host = None
out_port = None
try:
opts, pieces = getopt.getopt(target.split()[1:], 'u46lp:',
[])
except getopt.GetoptError as exc:
raise ValueError(exc)
for opt, arg in opts:
if opt == '-u':
udp = True
elif opt == '-4':
ipv6 = False
elif opt == '-6':
ipv6 = True
elif opt == '-l':
listen = True
elif opt == '-p':
out_port = int(arg)
else:
assert False, "unhandled option"
if not pieces:
pass
elif len(pieces) == 1:
if listen and pieces[0].isdigit():
out_port = int(pieces[0])
else:
out_host = pieces[0]
elif len(pieces) == 2 and pieces[1].isdigit():
out_host = pieces[0]
out_port = int(pieces[1])
else:
raise ValueError("Bad cmdline: %s" % target)
if out_host is None:
if listen:
out_host = '::' if ipv6 else '0.0.0.0'
else:
raise ValueError("Missing address: %s" % target)
if out_port is None:
raise ValueError("Missing port: %s" % target)
if _is_ipv6_addr(out_host):
ipv6 = True
return (out_host, out_port), listen, udp, ipv6
elif PROTOCAL_RE.match(target) is not None:
parsed = urlparse(target)
port = None
try:
scheme_udp, scheme_ipv6, scheme_port = KNOWN_SCHEMES[parsed.scheme]
except KeyError:
raise ValueError("Unknown scheme: %s" % parsed.scheme)
if scheme_udp is not None:
udp = scheme_udp
if scheme_ipv6 is not None:
ipv6 = scheme_ipv6
if scheme_port is not None:
port = scheme_port
if parsed.netloc.startswith('['):
addr, extra = parsed.netloc[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
if ':' in parsed.netloc:
addr, port = parsed.netloc.split(':', 1)
port = int(port)
else:
addr = parsed.netloc
if addr is None or port is None:
raise ValueError("Can't parse addr/port from %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
else:
if target.startswith('['):
addr, extra = target[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
port = None
else:
if ':' in target:
addr, port = target.split(':', 1)
port = int(port)
else:
addr = target
port = None
if port is None:
raise ValueError("No port given: %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
elif isinstance(target, (int, long)):
if listen:
out_port = target
else:
raise ValueError("Can't deal with number as connection address")
return ('::' if ipv6 else '0.0.0.0', out_port), listen, udp, ipv6
elif isinstance(target, tuple):
if len(target) >= 1 and isinstance(target[0], str) and _is_ipv6_addr(target[0]):
ipv6 = True
return target, listen, udp, ipv6
else:
raise ValueError("Can't parse target: %r" % target)
def _connect(self, target, listen, udp, ipv6, retry):
"""
Takes target/listen/udp/ipv6 and sets self.sock and self.peer
"""
ty = socket.SOCK_DGRAM if udp else socket.SOCK_STREAM
fam = socket.AF_INET6 if ipv6 else socket.AF_INET
self.sock = socket.socket(fam, ty)
if listen:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(target)
if not udp:
self.sock.listen(1)
conn, addr = self.sock.accept()
self.sock.close()
self.sock = conn
self.peer = addr
else:
self.buf, self.peer = self.sock.recvfrom(1024)
self.sock.connect(self.peer)
self._log_recv(self.buf, False)
if self.verbose:
self._print_verbose('Connection from %s accepted' % str(self.peer))
else:
while True:
try:
self.sock.connect(target)
except (socket.gaierror, socket.herror) as exc:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
except socket.error as exc:
if retry:
time.sleep(0.2)
else:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
else:
break
self.peer = target
def close(self):
"""
Close the socket.
"""
if self._sock_send is not None:
self._sock_send.close()
return self.sock.close()
# inconsistent between sockets and files. support both
@property
def closed(self):
return self._closed
@property
def _closed(self):
if hasattr(self.sock_send, 'closed'):
return self.sock_send.closed
elif hasattr(self.sock_send, '_closed'):
return self.sock_send._closed
else:
return False # ???
def shutdown(self, how=socket.SHUT_RDWR):
"""
Send a shutdown signal for both reading and writing, or whatever
socket.SHUT_* constant you like.
Shutdown differs from closing in that it explicitly changes the state of
the socket resource to closed, whereas closing will only decrement the
number of peers on this end of the socket, since sockets can be a
resource shared by multiple peers on a single OS. When the number of
peers reaches zero, the socket is closed, but not deallocated, so you
still need to call close. (except that this is python and close is
automatically called on the deletion of the socket)
http://stackoverflow.com/questions/409783/socket-shutdown-vs-socket-close
"""
if self._sock_send is not None:
self._sock_send.shutdown(how)
return self.sock.shutdown(how)
def shutdown_rd(self):
"""
Send a shutdown signal for reading - you may no longer read from this
socket.
"""
if self._sock_send is not None:
self.sock.close()
else:
return self.shutdown(socket.SHUT_RD)
def fileno(self):
"""
Return the file descriptor associated with this socket
"""
if self._sock_send is not None:
raise UserWarning("Calling fileno when there are in fact two filenos")
return self.sock.fileno()
def _print_verbose(self, s):
assert isinstance(s, str), "s should be str"
sys.stdout.write(s + '\n')
def _print_header(self, header):
if self.verbose and self.echo_headers:
self._print_verbose(header)
def _print_recv_header(self, fmt, timeout, *args):
if self.verbose and self.echo_headers:
if timeout == 'default':
timeout = self._timeout
if timeout is not None:
timeout_text = ' or until timeout ({0})'.format(timeout)
else:
timeout_text = ''
self._print_verbose(fmt.format(*args, timeout_text=timeout_text))
def _log_something(self, data, prefix):
if self.echo_perline:
if self.echo_hex:
self._print_hex_lines(data, prefix)
else:
self._print_lines(data, prefix)
else:
if self.echo_hex:
if hasattr(data, 'hex'):
self._print_verbose(prefix + data.hex())
else:
self._print_verbose(prefix + data.encode('hex'))
else:
self._print_verbose(prefix + str(data))
def _log_recv(self, data, yielding):
if yielding == self.log_yield:
if self.verbose and self.echo_recving:
self._log_something(data, self.echo_recv_prefix)
if self.log_recv:
self.log_recv.write(data)
def _log_send(self, data):
if self.verbose and self.echo_sending:
self._log_something(data, self.echo_send_prefix)
if self.log_send:
self.log_send.write(data)
def _print_lines(self, s, prefix):
for line in s.split(b'\n'):
self._print_verbose(prefix + str(line))
@staticmethod
def _to_spaced_hex(s):
if isinstance(s, str):
return ' '.join('%02X' % ord(a) for a in s)
if isinstance(s, bytes):
return ' '.join('%02X' % a for a in s)
raise TypeError('expected str or bytes instance')
@staticmethod
def _to_printable_str(s):
if isinstance(s, str):
return ''.join(a if ' ' <= a <= '~' else '.' for a in s)
if isinstance(s, bytes):
return ''.join(chr(a) if ord(' ') <= a <= ord('~') else '.' for a in s)
raise TypeError('expected str or bytes instance')
def _print_hex_lines(self, s, prefix):
for i in range(0, len(s), 16):
block = s[i:i+16]
spaced_hex = self._to_spaced_hex(block)
printable_str = self._to_printable_str(block)
self._print_verbose('%s%-47s |%-16s|' % (prefix, spaced_hex, printable_str))
def settimeout(self, timeout):
"""
Set the default timeout in seconds to use for subsequent socket
operations
"""
self._timeout = timeout
self._settimeout(timeout)
def _send(self, data):
if hasattr(self.sock_send, 'send'):
return self.sock_send.send(data)
elif hasattr(self.sock_send, 'write'):
return self.sock_send.write(data) # pylint: disable=no-member
else:
raise ValueError("I don't know how to write to this stream!")
def _recv(self, size):
if hasattr(self.sock, 'recv'):
return self.sock.recv(size)
elif hasattr(self.sock, 'read'):
return self.sock.read(size) # pylint: disable=no-member
else:
raise ValueError("I don't know how to read from this stream!")
def _recv_predicate(self, predicate, timeout='default', raise_eof=True):
"""
Receive until predicate returns a positive integer.
The returned number is the size to return.
"""
if timeout == 'default':
timeout = self._timeout
self.timed_out = False
start = time.time()
try:
while True:
cut_at = predicate(self.buf)
if cut_at > 0:
break
if timeout is not None:
time_elapsed = time.time() - start
if time_elapsed > timeout:
raise socket.timeout
self._settimeout(timeout - time_elapsed)
data = self._recv(4096)
self._log_recv(data, False)
self.buf += data
if not data:
if raise_eof:
raise NetcatError("Connection dropped!")
cut_at = len(self.buf)
break
except KeyboardInterrupt:
self._print_header('\n======== Connection interrupted! ========')
raise
except socket.timeout:
self.timed_out = True
if self._raise_timeout:
raise NetcatTimeout()
return b''
except socket.error as exc:
raise NetcatError('Socket error: %r' % exc)
self._settimeout(self._timeout)
ret = self.buf[:cut_at]
self.buf = self.buf[cut_at:]
self._log_recv(ret, True)
return ret
def _settimeout(self, timeout):
"""
Internal method - catches failures when working with non-timeoutable
streams, like files
"""
try:
self.sock.settimeout(timeout)
except AttributeError:
pass
def gettimeout(self):
"""
Retrieve the timeout currently associated with the socket
"""
return self._timeout
def flush(self):
# no buffering
pass
def recv(self, n=4096, timeout='default'):
"""
Receive at most n bytes (default 4096) from the socket
Aliases: read, get
"""
self._print_recv_header(
'======== Receiving {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: min(n, len(s)), timeout)
def recv_until(self, s, max_size=None, timeout='default'):
"""
Recieve data from the socket until the given substring is observed.
Data in the same datagram as the substring, following the substring,
will not be returned and will be cached for future receives.
Aliases: read_until, readuntil, recvuntil
"""
self._print_recv_header(
'======== Receiving until {0}{timeout_text} ========', timeout, repr(s))
if max_size is None:
max_size = 2 ** 62
def _predicate(buf):
try:
return min(buf.index(s) + len(s), max_size)
except ValueError:
return 0 if len(buf) < max_size else max_size
return self._recv_predicate(_predicate, timeout)
def recv_all(self, timeout='default'):
"""
Return all data recieved until connection closes.
Aliases: read_all, readall, recvall
"""
self._print_recv_header('======== Receiving until close{timeout_text} ========', timeout)
return self._recv_predicate(lambda s: 0, timeout, raise_eof=False)
def recv_exactly(self, n, timeout='default'):
"""
Recieve exactly n bytes
Aliases: read_exactly, readexactly, recvexactly
"""
self._print_recv_header(
'======== Receiving until exactly {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: n if len(s) >= n else 0, timeout)
def send(self, s):
"""
Sends all the given data to the socket.
Aliases: write, put, sendall, send_all
"""
self._print_header('======== Sending ({0}) ========'.format(len(s)))
self._log_send(s)
out = len(s)
while s:
s = s[self._send(s):]
return out
def interact(self, insock=sys.stdin, outsock=sys.stdout):
"""
Connects the socket to the terminal for user interaction.
Alternate input and output files may be specified.
This method cannot be used with a timeout.
Aliases: interactive, interaction
"""
self._print_header('======== Beginning interactive session ========')
if hasattr(outsock, 'buffer'):
outsock = outsock.buffer # pylint: disable=no-member
self.timed_out = False
save_verbose = self.verbose
self.verbose = 0
try:
if self.buf:
outsock.write(self.buf)
outsock.flush()
self.buf = b''
while True:
readable_socks = select(self.sock, insock)
for readable in readable_socks:
if readable is insock:
data = os.read(insock.fileno(), 4096)
self.send(data)
if not data:
raise NetcatError
else:
data = self.recv(timeout=None)
outsock.write(data)
outsock.flush()
if not data:
raise NetcatError
except KeyboardInterrupt:
self.verbose = save_verbose
self._print_header('\n======== Connection interrupted! ========')
raise
except (socket.error, NetcatError):
self.verbose = save_verbose
self._print_header('\n======== Connection dropped! ========')
finally:
self.verbose = save_verbose
LINE_ENDING = b'\n'
def recv_line(self, max_size=None, timeout='default', ending=None):
"""
Recieve until the next newline , default "\\n". The newline string can
be changed by changing ``nc.LINE_ENDING``. The newline will be returned
as part of the string.
Aliases: recvline, readline, read_line, readln, recvln
"""
if ending is None:
ending = self.LINE_ENDING
return self.recv_until(ending, max_size, timeout)
def send_line(self, line, ending=None):
"""
Write the string to the wire, followed by a newline. The newline string
can be changed by changing ``nc.LINE_ENDING``.
Aliases: sendline, writeline, write_line, writeln, sendln
"""
if ending is None:
ending = self.LINE_ENDING
return self.send(line + ending)
read = recv
get = recv
write = send
put = send
sendall = send
send_all = send
read_until = recv_until
readuntil = recv_until
recvuntil = recv_until
read_all = recv_all
readall = recv_all
recvall = recv_all
read_exactly = recv_exactly
readexactly = recv_exactly
recvexactly = recv_exactly
interactive = interact
ineraction = interact
recvline = recv_line
readline = recv_line
read_line = recv_line
readln = recv_line
recvln = recv_line
sendline = send_line
writeline = send_line
write_line = send_line
writeln = send_line
sendln = send_line
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat._recv_predicate
|
python
|
def _recv_predicate(self, predicate, timeout='default', raise_eof=True):
if timeout == 'default':
timeout = self._timeout
self.timed_out = False
start = time.time()
try:
while True:
cut_at = predicate(self.buf)
if cut_at > 0:
break
if timeout is not None:
time_elapsed = time.time() - start
if time_elapsed > timeout:
raise socket.timeout
self._settimeout(timeout - time_elapsed)
data = self._recv(4096)
self._log_recv(data, False)
self.buf += data
if not data:
if raise_eof:
raise NetcatError("Connection dropped!")
cut_at = len(self.buf)
break
except KeyboardInterrupt:
self._print_header('\n======== Connection interrupted! ========')
raise
except socket.timeout:
self.timed_out = True
if self._raise_timeout:
raise NetcatTimeout()
return b''
except socket.error as exc:
raise NetcatError('Socket error: %r' % exc)
self._settimeout(self._timeout)
ret = self.buf[:cut_at]
self.buf = self.buf[cut_at:]
self._log_recv(ret, True)
return ret
|
Receive until predicate returns a positive integer.
The returned number is the size to return.
|
train
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L569-L618
|
[
"def _print_header(self, header):\n if self.verbose and self.echo_headers:\n self._print_verbose(header)\n",
"def _log_recv(self, data, yielding):\n if yielding == self.log_yield:\n if self.verbose and self.echo_recving:\n self._log_something(data, self.echo_recv_prefix)\n if self.log_recv:\n self.log_recv.write(data)\n",
"def _recv(self, size):\n if hasattr(self.sock, 'recv'):\n return self.sock.recv(size)\n elif hasattr(self.sock, 'read'):\n return self.sock.read(size) # pylint: disable=no-member\n else:\n raise ValueError(\"I don't know how to read from this stream!\")\n",
"def _settimeout(self, timeout):\n \"\"\"\n Internal method - catches failures when working with non-timeoutable\n streams, like files\n \"\"\"\n try:\n self.sock.settimeout(timeout)\n except AttributeError:\n pass\n",
"return self._recv_predicate(lambda s: min(n, len(s)), timeout)\n",
"def _predicate(buf):\n try:\n return min(buf.index(s) + len(s), max_size)\n except ValueError:\n return 0 if len(buf) < max_size else max_size\n",
"return self._recv_predicate(lambda s: 0, timeout, raise_eof=False)\n",
"return self._recv_predicate(lambda s: n if len(s) >= n else 0, timeout)\n"
] |
class Netcat(object):
"""
This is the main class you will use to interact with a peer over the
network! You may instanciate this class to either connect to a server or
listen for a one-off client.
One of the following must be passed in order to initialize a Netcat
object:
:param connect: the address/port to connect to
:param listen: the address/port to bind to for listening
:param sock: a python socket or pipe object to wrap
For ``connect`` and ``listen``, they accept basically any argument format
known to mankind. If you find an input format you think would be useful but
isn't accepted, let me know :P
Additionally, the following options modify the behavior of the object:
:param sock_send: If this is specified, this Netcat object will act
as a multiplexer/demultiplexer, using the "normal"
channel for receiving and this channel for sending.
This should be specified as a python socket or pipe
object.
.. warning:: Using ``sock_send`` will cause issues if
you pass this object into a context which
expects to be able to use its
``.fileno()``.
:param udp: Set to True to use udp connections when using the
connect or listen parameters
:param ipv6: Force using ipv6 when using the connect or listen
parameters
:param verbose: Set to True to log data sent/received. The echo_*
properties on this object can be tweaked to
describe exactly what you want logged.
:param log_send: Pass a file-like object open for writing and all
data sent over the socket will be written to it.
:param log_recv: Pass a file-like object open for writing and all
data recieved from the socket will be written to it.
:param raise_timeout:
Whether to raise a NetcatTimeout exception when a
timeout is received. The default is to return the
empty string and set self.timed_out = True
:param retry: Whether to continuously retry establishing a
connection if it fails.
:param log_yield: Control when logging messages are generated on
recv. By default, logging is done when data is
received from the socket, and may be buffered.
By setting this to true, logging is done when data
is yielded to the user, either directly from the
socket or from a buffer.
Any data that is extracted from the target address will override the
options specified here. For example, a url with the ``http:// scheme``
will go over tcp and port 80.
Some properties that may be tweaked to change the logging behavior:
- nc.echo_headers controls whether to print a header describing each
network operation before the data (True)
- nc.echo_perline controls whether the data should be split on newlines
for logging (True)
- nc.echo_sending controls whether to log data on send (True)
- nc.echo_recving controls whether to log data on recv (True)
- nc.echo_hex controls whether to log data hex-encoded (False)
- nc.echo_send_prefix controls a prefix to print before each logged
line of sent data ('>> ')
- nc.echo_recv_prefix controls a prefix to print before each logged
line of received data ('<< ')
Note that these settings ONLY affect the console logging triggered by
the verbose parameter. They don't do anything to the logging triggered
by `log_send` and `log_recv`, which are meant to provide pristine
untouched records of network traffic.
*Example 1:* Send a greeting to a UDP server listening at 192.168.3.6:8888
and log the response as hex:
>>> nc = nclib.Netcat(('192.168.3.6', 8888), udp=True, verbose=True)
>>> nc.echo_hex = True
>>> nc.send(b'\\x00\\x0dHello, world!')
======== Sending (15) ========
>> 00 0D 48 65 6C 6C 6F 2C 20 77 6F 72 6C 64 21 |..Hello, world! |
>>> nc.recv()
======== Receiving 4096B or until timeout (default) ========
<< 00 57 68 65 6C 6C 6F 20 66 72 69 65 6E 64 2E 20 |.Whello friend. |
<< 74 69 6D 65 20 69 73 20 73 68 6F 72 74 2E 20 70 |time is short. p|
<< 6C 65 61 73 65 20 64 6F 20 6E 6F 74 20 77 6F 72 |lease do not wor|
<< 72 79 2C 20 79 6F 75 20 77 69 6C 6C 20 66 69 6E |ry, you will fin|
<< 64 20 79 6F 75 72 20 77 61 79 2E 20 62 75 74 20 |d your way. but |
<< 64 6F 20 68 75 72 72 79 2E |do hurry. |
*Example 2:* Listen for a local TCP connection on port 1234, allow the user
to interact with the client. Log the entire interaction to log.txt.
>>> logfile = open('log.txt', 'wb')
>>> nc = nclib.Netcat(listen=('localhost', 1234), log_send=logfile, log_recv=logfile)
>>> nc.interact()
"""
def __init__(self,
connect=None,
sock=None,
listen=None,
server=None,
sock_send=None,
udp=False,
ipv6=False,
verbose=0,
log_send=None,
log_recv=None,
raise_timeout=False,
retry=False,
log_yield=False):
self.buf = b''
self.verbose = verbose
self.log_send = log_send
self.log_recv = log_recv
self.log_yield = log_yield
self.echo_headers = True
self.echo_perline = True
self.echo_sending = True
self.echo_recving = True
self.echo_hex = False
self.echo_send_prefix = '>> '
self.echo_recv_prefix = '<< '
self.sock = None
self._sock_send = sock_send
self.peer = None
# case: Netcat(host, port)
if isinstance(connect, str) and isinstance(listen, int):
connect = (connect, listen)
# case: Netcat(sock)
if isinstance(connect, socket.socket):
sock = connect
connect = None
# deprecated server kwarg
if server is not None:
connect = server
if sock is None and listen is None and connect is None:
raise ValueError('Not enough arguments, need at least an '
'address or a socket or a listening address!')
## we support passing connect as the "name" of the socket
#if sock is not None and (listen is not None or connect is not None):
# raise ValueError("connect or listen arguments may not be "
# "provided if sock is provided")
if listen is not None and connect is not None:
raise ValueError("connect and listen arguments cannot be provided at the same time")
if sock is None:
if listen is not None:
target = listen
listen = True
else:
target = connect
listen = False
target, listen, udp, ipv6 = self._parse_target(target, listen, udp, ipv6)
self._connect(target, listen, udp, ipv6, retry)
else:
self.sock = sock
self.peer = connect
try:
self._timeout = self.sock.gettimeout()
except AttributeError:
self._timeout = None
self.timed_out = False # set when an operation times out
self._raise_timeout = raise_timeout
@property
def sock_send(self):
if self._sock_send is None:
return self.sock
else:
return self._sock_send
@sock_send.setter
def sock_send(self, val):
self._sock_send = val
@staticmethod
def _parse_target(target, listen, udp, ipv6):
"""
Takes the basic version of the user args and extract as much data as
possible from target. Returns a tuple that is its arguments but
sanitized.
"""
if isinstance(target, str):
if target.startswith('nc '):
out_host = None
out_port = None
try:
opts, pieces = getopt.getopt(target.split()[1:], 'u46lp:',
[])
except getopt.GetoptError as exc:
raise ValueError(exc)
for opt, arg in opts:
if opt == '-u':
udp = True
elif opt == '-4':
ipv6 = False
elif opt == '-6':
ipv6 = True
elif opt == '-l':
listen = True
elif opt == '-p':
out_port = int(arg)
else:
assert False, "unhandled option"
if not pieces:
pass
elif len(pieces) == 1:
if listen and pieces[0].isdigit():
out_port = int(pieces[0])
else:
out_host = pieces[0]
elif len(pieces) == 2 and pieces[1].isdigit():
out_host = pieces[0]
out_port = int(pieces[1])
else:
raise ValueError("Bad cmdline: %s" % target)
if out_host is None:
if listen:
out_host = '::' if ipv6 else '0.0.0.0'
else:
raise ValueError("Missing address: %s" % target)
if out_port is None:
raise ValueError("Missing port: %s" % target)
if _is_ipv6_addr(out_host):
ipv6 = True
return (out_host, out_port), listen, udp, ipv6
elif PROTOCAL_RE.match(target) is not None:
parsed = urlparse(target)
port = None
try:
scheme_udp, scheme_ipv6, scheme_port = KNOWN_SCHEMES[parsed.scheme]
except KeyError:
raise ValueError("Unknown scheme: %s" % parsed.scheme)
if scheme_udp is not None:
udp = scheme_udp
if scheme_ipv6 is not None:
ipv6 = scheme_ipv6
if scheme_port is not None:
port = scheme_port
if parsed.netloc.startswith('['):
addr, extra = parsed.netloc[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
if ':' in parsed.netloc:
addr, port = parsed.netloc.split(':', 1)
port = int(port)
else:
addr = parsed.netloc
if addr is None or port is None:
raise ValueError("Can't parse addr/port from %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
else:
if target.startswith('['):
addr, extra = target[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
port = None
else:
if ':' in target:
addr, port = target.split(':', 1)
port = int(port)
else:
addr = target
port = None
if port is None:
raise ValueError("No port given: %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
elif isinstance(target, (int, long)):
if listen:
out_port = target
else:
raise ValueError("Can't deal with number as connection address")
return ('::' if ipv6 else '0.0.0.0', out_port), listen, udp, ipv6
elif isinstance(target, tuple):
if len(target) >= 1 and isinstance(target[0], str) and _is_ipv6_addr(target[0]):
ipv6 = True
return target, listen, udp, ipv6
else:
raise ValueError("Can't parse target: %r" % target)
def _connect(self, target, listen, udp, ipv6, retry):
"""
Takes target/listen/udp/ipv6 and sets self.sock and self.peer
"""
ty = socket.SOCK_DGRAM if udp else socket.SOCK_STREAM
fam = socket.AF_INET6 if ipv6 else socket.AF_INET
self.sock = socket.socket(fam, ty)
if listen:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(target)
if not udp:
self.sock.listen(1)
conn, addr = self.sock.accept()
self.sock.close()
self.sock = conn
self.peer = addr
else:
self.buf, self.peer = self.sock.recvfrom(1024)
self.sock.connect(self.peer)
self._log_recv(self.buf, False)
if self.verbose:
self._print_verbose('Connection from %s accepted' % str(self.peer))
else:
while True:
try:
self.sock.connect(target)
except (socket.gaierror, socket.herror) as exc:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
except socket.error as exc:
if retry:
time.sleep(0.2)
else:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
else:
break
self.peer = target
def close(self):
"""
Close the socket.
"""
if self._sock_send is not None:
self._sock_send.close()
return self.sock.close()
# inconsistent between sockets and files. support both
@property
def closed(self):
return self._closed
@property
def _closed(self):
if hasattr(self.sock_send, 'closed'):
return self.sock_send.closed
elif hasattr(self.sock_send, '_closed'):
return self.sock_send._closed
else:
return False # ???
def shutdown(self, how=socket.SHUT_RDWR):
"""
Send a shutdown signal for both reading and writing, or whatever
socket.SHUT_* constant you like.
Shutdown differs from closing in that it explicitly changes the state of
the socket resource to closed, whereas closing will only decrement the
number of peers on this end of the socket, since sockets can be a
resource shared by multiple peers on a single OS. When the number of
peers reaches zero, the socket is closed, but not deallocated, so you
still need to call close. (except that this is python and close is
automatically called on the deletion of the socket)
http://stackoverflow.com/questions/409783/socket-shutdown-vs-socket-close
"""
if self._sock_send is not None:
self._sock_send.shutdown(how)
return self.sock.shutdown(how)
def shutdown_rd(self):
"""
Send a shutdown signal for reading - you may no longer read from this
socket.
"""
if self._sock_send is not None:
self.sock.close()
else:
return self.shutdown(socket.SHUT_RD)
def shutdown_wr(self):
"""
Send a shutdown signal for writing - you may no longer write to this
socket.
"""
if self._sock_send is not None:
self._sock_send.close()
else:
return self.shutdown(socket.SHUT_WR)
def fileno(self):
"""
Return the file descriptor associated with this socket
"""
if self._sock_send is not None:
raise UserWarning("Calling fileno when there are in fact two filenos")
return self.sock.fileno()
def _print_verbose(self, s):
assert isinstance(s, str), "s should be str"
sys.stdout.write(s + '\n')
def _print_header(self, header):
if self.verbose and self.echo_headers:
self._print_verbose(header)
def _print_recv_header(self, fmt, timeout, *args):
if self.verbose and self.echo_headers:
if timeout == 'default':
timeout = self._timeout
if timeout is not None:
timeout_text = ' or until timeout ({0})'.format(timeout)
else:
timeout_text = ''
self._print_verbose(fmt.format(*args, timeout_text=timeout_text))
def _log_something(self, data, prefix):
if self.echo_perline:
if self.echo_hex:
self._print_hex_lines(data, prefix)
else:
self._print_lines(data, prefix)
else:
if self.echo_hex:
if hasattr(data, 'hex'):
self._print_verbose(prefix + data.hex())
else:
self._print_verbose(prefix + data.encode('hex'))
else:
self._print_verbose(prefix + str(data))
def _log_recv(self, data, yielding):
if yielding == self.log_yield:
if self.verbose and self.echo_recving:
self._log_something(data, self.echo_recv_prefix)
if self.log_recv:
self.log_recv.write(data)
def _log_send(self, data):
if self.verbose and self.echo_sending:
self._log_something(data, self.echo_send_prefix)
if self.log_send:
self.log_send.write(data)
def _print_lines(self, s, prefix):
for line in s.split(b'\n'):
self._print_verbose(prefix + str(line))
@staticmethod
def _to_spaced_hex(s):
if isinstance(s, str):
return ' '.join('%02X' % ord(a) for a in s)
if isinstance(s, bytes):
return ' '.join('%02X' % a for a in s)
raise TypeError('expected str or bytes instance')
@staticmethod
def _to_printable_str(s):
if isinstance(s, str):
return ''.join(a if ' ' <= a <= '~' else '.' for a in s)
if isinstance(s, bytes):
return ''.join(chr(a) if ord(' ') <= a <= ord('~') else '.' for a in s)
raise TypeError('expected str or bytes instance')
def _print_hex_lines(self, s, prefix):
for i in range(0, len(s), 16):
block = s[i:i+16]
spaced_hex = self._to_spaced_hex(block)
printable_str = self._to_printable_str(block)
self._print_verbose('%s%-47s |%-16s|' % (prefix, spaced_hex, printable_str))
def settimeout(self, timeout):
"""
Set the default timeout in seconds to use for subsequent socket
operations
"""
self._timeout = timeout
self._settimeout(timeout)
def _send(self, data):
if hasattr(self.sock_send, 'send'):
return self.sock_send.send(data)
elif hasattr(self.sock_send, 'write'):
return self.sock_send.write(data) # pylint: disable=no-member
else:
raise ValueError("I don't know how to write to this stream!")
def _recv(self, size):
if hasattr(self.sock, 'recv'):
return self.sock.recv(size)
elif hasattr(self.sock, 'read'):
return self.sock.read(size) # pylint: disable=no-member
else:
raise ValueError("I don't know how to read from this stream!")
def _settimeout(self, timeout):
"""
Internal method - catches failures when working with non-timeoutable
streams, like files
"""
try:
self.sock.settimeout(timeout)
except AttributeError:
pass
def gettimeout(self):
"""
Retrieve the timeout currently associated with the socket
"""
return self._timeout
def flush(self):
# no buffering
pass
def recv(self, n=4096, timeout='default'):
"""
Receive at most n bytes (default 4096) from the socket
Aliases: read, get
"""
self._print_recv_header(
'======== Receiving {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: min(n, len(s)), timeout)
def recv_until(self, s, max_size=None, timeout='default'):
"""
Recieve data from the socket until the given substring is observed.
Data in the same datagram as the substring, following the substring,
will not be returned and will be cached for future receives.
Aliases: read_until, readuntil, recvuntil
"""
self._print_recv_header(
'======== Receiving until {0}{timeout_text} ========', timeout, repr(s))
if max_size is None:
max_size = 2 ** 62
def _predicate(buf):
try:
return min(buf.index(s) + len(s), max_size)
except ValueError:
return 0 if len(buf) < max_size else max_size
return self._recv_predicate(_predicate, timeout)
def recv_all(self, timeout='default'):
"""
Return all data recieved until connection closes.
Aliases: read_all, readall, recvall
"""
self._print_recv_header('======== Receiving until close{timeout_text} ========', timeout)
return self._recv_predicate(lambda s: 0, timeout, raise_eof=False)
def recv_exactly(self, n, timeout='default'):
"""
Recieve exactly n bytes
Aliases: read_exactly, readexactly, recvexactly
"""
self._print_recv_header(
'======== Receiving until exactly {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: n if len(s) >= n else 0, timeout)
def send(self, s):
"""
Sends all the given data to the socket.
Aliases: write, put, sendall, send_all
"""
self._print_header('======== Sending ({0}) ========'.format(len(s)))
self._log_send(s)
out = len(s)
while s:
s = s[self._send(s):]
return out
def interact(self, insock=sys.stdin, outsock=sys.stdout):
"""
Connects the socket to the terminal for user interaction.
Alternate input and output files may be specified.
This method cannot be used with a timeout.
Aliases: interactive, interaction
"""
self._print_header('======== Beginning interactive session ========')
if hasattr(outsock, 'buffer'):
outsock = outsock.buffer # pylint: disable=no-member
self.timed_out = False
save_verbose = self.verbose
self.verbose = 0
try:
if self.buf:
outsock.write(self.buf)
outsock.flush()
self.buf = b''
while True:
readable_socks = select(self.sock, insock)
for readable in readable_socks:
if readable is insock:
data = os.read(insock.fileno(), 4096)
self.send(data)
if not data:
raise NetcatError
else:
data = self.recv(timeout=None)
outsock.write(data)
outsock.flush()
if not data:
raise NetcatError
except KeyboardInterrupt:
self.verbose = save_verbose
self._print_header('\n======== Connection interrupted! ========')
raise
except (socket.error, NetcatError):
self.verbose = save_verbose
self._print_header('\n======== Connection dropped! ========')
finally:
self.verbose = save_verbose
LINE_ENDING = b'\n'
def recv_line(self, max_size=None, timeout='default', ending=None):
"""
Recieve until the next newline , default "\\n". The newline string can
be changed by changing ``nc.LINE_ENDING``. The newline will be returned
as part of the string.
Aliases: recvline, readline, read_line, readln, recvln
"""
if ending is None:
ending = self.LINE_ENDING
return self.recv_until(ending, max_size, timeout)
def send_line(self, line, ending=None):
"""
Write the string to the wire, followed by a newline. The newline string
can be changed by changing ``nc.LINE_ENDING``.
Aliases: sendline, writeline, write_line, writeln, sendln
"""
if ending is None:
ending = self.LINE_ENDING
return self.send(line + ending)
read = recv
get = recv
write = send
put = send
sendall = send
send_all = send
read_until = recv_until
readuntil = recv_until
recvuntil = recv_until
read_all = recv_all
readall = recv_all
recvall = recv_all
read_exactly = recv_exactly
readexactly = recv_exactly
recvexactly = recv_exactly
interactive = interact
ineraction = interact
recvline = recv_line
readline = recv_line
read_line = recv_line
readln = recv_line
recvln = recv_line
sendline = send_line
writeline = send_line
write_line = send_line
writeln = send_line
sendln = send_line
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat.recv
|
python
|
def recv(self, n=4096, timeout='default'):
self._print_recv_header(
'======== Receiving {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: min(n, len(s)), timeout)
|
Receive at most n bytes (default 4096) from the socket
Aliases: read, get
|
train
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L640-L650
|
[
"def _print_recv_header(self, fmt, timeout, *args):\n if self.verbose and self.echo_headers:\n if timeout == 'default':\n timeout = self._timeout\n if timeout is not None:\n timeout_text = ' or until timeout ({0})'.format(timeout)\n else:\n timeout_text = ''\n\n self._print_verbose(fmt.format(*args, timeout_text=timeout_text))\n",
"def _recv_predicate(self, predicate, timeout='default', raise_eof=True):\n \"\"\"\n Receive until predicate returns a positive integer.\n The returned number is the size to return.\n \"\"\"\n\n if timeout == 'default':\n timeout = self._timeout\n\n self.timed_out = False\n\n start = time.time()\n try:\n while True:\n cut_at = predicate(self.buf)\n if cut_at > 0:\n break\n if timeout is not None:\n time_elapsed = time.time() - start\n if time_elapsed > timeout:\n raise socket.timeout\n self._settimeout(timeout - time_elapsed)\n\n data = self._recv(4096)\n self._log_recv(data, False)\n self.buf += data\n\n if not data:\n if raise_eof:\n raise NetcatError(\"Connection dropped!\")\n cut_at = len(self.buf)\n break\n\n except KeyboardInterrupt:\n self._print_header('\\n======== Connection interrupted! ========')\n raise\n except socket.timeout:\n self.timed_out = True\n if self._raise_timeout:\n raise NetcatTimeout()\n return b''\n except socket.error as exc:\n raise NetcatError('Socket error: %r' % exc)\n\n self._settimeout(self._timeout)\n\n ret = self.buf[:cut_at]\n self.buf = self.buf[cut_at:]\n self._log_recv(ret, True)\n return ret\n"
] |
class Netcat(object):
"""
This is the main class you will use to interact with a peer over the
network! You may instanciate this class to either connect to a server or
listen for a one-off client.
One of the following must be passed in order to initialize a Netcat
object:
:param connect: the address/port to connect to
:param listen: the address/port to bind to for listening
:param sock: a python socket or pipe object to wrap
For ``connect`` and ``listen``, they accept basically any argument format
known to mankind. If you find an input format you think would be useful but
isn't accepted, let me know :P
Additionally, the following options modify the behavior of the object:
:param sock_send: If this is specified, this Netcat object will act
as a multiplexer/demultiplexer, using the "normal"
channel for receiving and this channel for sending.
This should be specified as a python socket or pipe
object.
.. warning:: Using ``sock_send`` will cause issues if
you pass this object into a context which
expects to be able to use its
``.fileno()``.
:param udp: Set to True to use udp connections when using the
connect or listen parameters
:param ipv6: Force using ipv6 when using the connect or listen
parameters
:param verbose: Set to True to log data sent/received. The echo_*
properties on this object can be tweaked to
describe exactly what you want logged.
:param log_send: Pass a file-like object open for writing and all
data sent over the socket will be written to it.
:param log_recv: Pass a file-like object open for writing and all
data recieved from the socket will be written to it.
:param raise_timeout:
Whether to raise a NetcatTimeout exception when a
timeout is received. The default is to return the
empty string and set self.timed_out = True
:param retry: Whether to continuously retry establishing a
connection if it fails.
:param log_yield: Control when logging messages are generated on
recv. By default, logging is done when data is
received from the socket, and may be buffered.
By setting this to true, logging is done when data
is yielded to the user, either directly from the
socket or from a buffer.
Any data that is extracted from the target address will override the
options specified here. For example, a url with the ``http:// scheme``
will go over tcp and port 80.
Some properties that may be tweaked to change the logging behavior:
- nc.echo_headers controls whether to print a header describing each
network operation before the data (True)
- nc.echo_perline controls whether the data should be split on newlines
for logging (True)
- nc.echo_sending controls whether to log data on send (True)
- nc.echo_recving controls whether to log data on recv (True)
- nc.echo_hex controls whether to log data hex-encoded (False)
- nc.echo_send_prefix controls a prefix to print before each logged
line of sent data ('>> ')
- nc.echo_recv_prefix controls a prefix to print before each logged
line of received data ('<< ')
Note that these settings ONLY affect the console logging triggered by
the verbose parameter. They don't do anything to the logging triggered
by `log_send` and `log_recv`, which are meant to provide pristine
untouched records of network traffic.
*Example 1:* Send a greeting to a UDP server listening at 192.168.3.6:8888
and log the response as hex:
>>> nc = nclib.Netcat(('192.168.3.6', 8888), udp=True, verbose=True)
>>> nc.echo_hex = True
>>> nc.send(b'\\x00\\x0dHello, world!')
======== Sending (15) ========
>> 00 0D 48 65 6C 6C 6F 2C 20 77 6F 72 6C 64 21 |..Hello, world! |
>>> nc.recv()
======== Receiving 4096B or until timeout (default) ========
<< 00 57 68 65 6C 6C 6F 20 66 72 69 65 6E 64 2E 20 |.Whello friend. |
<< 74 69 6D 65 20 69 73 20 73 68 6F 72 74 2E 20 70 |time is short. p|
<< 6C 65 61 73 65 20 64 6F 20 6E 6F 74 20 77 6F 72 |lease do not wor|
<< 72 79 2C 20 79 6F 75 20 77 69 6C 6C 20 66 69 6E |ry, you will fin|
<< 64 20 79 6F 75 72 20 77 61 79 2E 20 62 75 74 20 |d your way. but |
<< 64 6F 20 68 75 72 72 79 2E |do hurry. |
*Example 2:* Listen for a local TCP connection on port 1234, allow the user
to interact with the client. Log the entire interaction to log.txt.
>>> logfile = open('log.txt', 'wb')
>>> nc = nclib.Netcat(listen=('localhost', 1234), log_send=logfile, log_recv=logfile)
>>> nc.interact()
"""
def __init__(self,
connect=None,
sock=None,
listen=None,
server=None,
sock_send=None,
udp=False,
ipv6=False,
verbose=0,
log_send=None,
log_recv=None,
raise_timeout=False,
retry=False,
log_yield=False):
self.buf = b''
self.verbose = verbose
self.log_send = log_send
self.log_recv = log_recv
self.log_yield = log_yield
self.echo_headers = True
self.echo_perline = True
self.echo_sending = True
self.echo_recving = True
self.echo_hex = False
self.echo_send_prefix = '>> '
self.echo_recv_prefix = '<< '
self.sock = None
self._sock_send = sock_send
self.peer = None
# case: Netcat(host, port)
if isinstance(connect, str) and isinstance(listen, int):
connect = (connect, listen)
# case: Netcat(sock)
if isinstance(connect, socket.socket):
sock = connect
connect = None
# deprecated server kwarg
if server is not None:
connect = server
if sock is None and listen is None and connect is None:
raise ValueError('Not enough arguments, need at least an '
'address or a socket or a listening address!')
## we support passing connect as the "name" of the socket
#if sock is not None and (listen is not None or connect is not None):
# raise ValueError("connect or listen arguments may not be "
# "provided if sock is provided")
if listen is not None and connect is not None:
raise ValueError("connect and listen arguments cannot be provided at the same time")
if sock is None:
if listen is not None:
target = listen
listen = True
else:
target = connect
listen = False
target, listen, udp, ipv6 = self._parse_target(target, listen, udp, ipv6)
self._connect(target, listen, udp, ipv6, retry)
else:
self.sock = sock
self.peer = connect
try:
self._timeout = self.sock.gettimeout()
except AttributeError:
self._timeout = None
self.timed_out = False # set when an operation times out
self._raise_timeout = raise_timeout
@property
def sock_send(self):
if self._sock_send is None:
return self.sock
else:
return self._sock_send
@sock_send.setter
def sock_send(self, val):
self._sock_send = val
@staticmethod
def _parse_target(target, listen, udp, ipv6):
"""
Takes the basic version of the user args and extract as much data as
possible from target. Returns a tuple that is its arguments but
sanitized.
"""
if isinstance(target, str):
if target.startswith('nc '):
out_host = None
out_port = None
try:
opts, pieces = getopt.getopt(target.split()[1:], 'u46lp:',
[])
except getopt.GetoptError as exc:
raise ValueError(exc)
for opt, arg in opts:
if opt == '-u':
udp = True
elif opt == '-4':
ipv6 = False
elif opt == '-6':
ipv6 = True
elif opt == '-l':
listen = True
elif opt == '-p':
out_port = int(arg)
else:
assert False, "unhandled option"
if not pieces:
pass
elif len(pieces) == 1:
if listen and pieces[0].isdigit():
out_port = int(pieces[0])
else:
out_host = pieces[0]
elif len(pieces) == 2 and pieces[1].isdigit():
out_host = pieces[0]
out_port = int(pieces[1])
else:
raise ValueError("Bad cmdline: %s" % target)
if out_host is None:
if listen:
out_host = '::' if ipv6 else '0.0.0.0'
else:
raise ValueError("Missing address: %s" % target)
if out_port is None:
raise ValueError("Missing port: %s" % target)
if _is_ipv6_addr(out_host):
ipv6 = True
return (out_host, out_port), listen, udp, ipv6
elif PROTOCAL_RE.match(target) is not None:
parsed = urlparse(target)
port = None
try:
scheme_udp, scheme_ipv6, scheme_port = KNOWN_SCHEMES[parsed.scheme]
except KeyError:
raise ValueError("Unknown scheme: %s" % parsed.scheme)
if scheme_udp is not None:
udp = scheme_udp
if scheme_ipv6 is not None:
ipv6 = scheme_ipv6
if scheme_port is not None:
port = scheme_port
if parsed.netloc.startswith('['):
addr, extra = parsed.netloc[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
if ':' in parsed.netloc:
addr, port = parsed.netloc.split(':', 1)
port = int(port)
else:
addr = parsed.netloc
if addr is None or port is None:
raise ValueError("Can't parse addr/port from %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
else:
if target.startswith('['):
addr, extra = target[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
port = None
else:
if ':' in target:
addr, port = target.split(':', 1)
port = int(port)
else:
addr = target
port = None
if port is None:
raise ValueError("No port given: %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
elif isinstance(target, (int, long)):
if listen:
out_port = target
else:
raise ValueError("Can't deal with number as connection address")
return ('::' if ipv6 else '0.0.0.0', out_port), listen, udp, ipv6
elif isinstance(target, tuple):
if len(target) >= 1 and isinstance(target[0], str) and _is_ipv6_addr(target[0]):
ipv6 = True
return target, listen, udp, ipv6
else:
raise ValueError("Can't parse target: %r" % target)
def _connect(self, target, listen, udp, ipv6, retry):
"""
Takes target/listen/udp/ipv6 and sets self.sock and self.peer
"""
ty = socket.SOCK_DGRAM if udp else socket.SOCK_STREAM
fam = socket.AF_INET6 if ipv6 else socket.AF_INET
self.sock = socket.socket(fam, ty)
if listen:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(target)
if not udp:
self.sock.listen(1)
conn, addr = self.sock.accept()
self.sock.close()
self.sock = conn
self.peer = addr
else:
self.buf, self.peer = self.sock.recvfrom(1024)
self.sock.connect(self.peer)
self._log_recv(self.buf, False)
if self.verbose:
self._print_verbose('Connection from %s accepted' % str(self.peer))
else:
while True:
try:
self.sock.connect(target)
except (socket.gaierror, socket.herror) as exc:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
except socket.error as exc:
if retry:
time.sleep(0.2)
else:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
else:
break
self.peer = target
def close(self):
"""
Close the socket.
"""
if self._sock_send is not None:
self._sock_send.close()
return self.sock.close()
# inconsistent between sockets and files. support both
@property
def closed(self):
return self._closed
@property
def _closed(self):
if hasattr(self.sock_send, 'closed'):
return self.sock_send.closed
elif hasattr(self.sock_send, '_closed'):
return self.sock_send._closed
else:
return False # ???
def shutdown(self, how=socket.SHUT_RDWR):
"""
Send a shutdown signal for both reading and writing, or whatever
socket.SHUT_* constant you like.
Shutdown differs from closing in that it explicitly changes the state of
the socket resource to closed, whereas closing will only decrement the
number of peers on this end of the socket, since sockets can be a
resource shared by multiple peers on a single OS. When the number of
peers reaches zero, the socket is closed, but not deallocated, so you
still need to call close. (except that this is python and close is
automatically called on the deletion of the socket)
http://stackoverflow.com/questions/409783/socket-shutdown-vs-socket-close
"""
if self._sock_send is not None:
self._sock_send.shutdown(how)
return self.sock.shutdown(how)
def shutdown_rd(self):
"""
Send a shutdown signal for reading - you may no longer read from this
socket.
"""
if self._sock_send is not None:
self.sock.close()
else:
return self.shutdown(socket.SHUT_RD)
def shutdown_wr(self):
"""
Send a shutdown signal for writing - you may no longer write to this
socket.
"""
if self._sock_send is not None:
self._sock_send.close()
else:
return self.shutdown(socket.SHUT_WR)
def fileno(self):
"""
Return the file descriptor associated with this socket
"""
if self._sock_send is not None:
raise UserWarning("Calling fileno when there are in fact two filenos")
return self.sock.fileno()
def _print_verbose(self, s):
assert isinstance(s, str), "s should be str"
sys.stdout.write(s + '\n')
def _print_header(self, header):
if self.verbose and self.echo_headers:
self._print_verbose(header)
def _print_recv_header(self, fmt, timeout, *args):
if self.verbose and self.echo_headers:
if timeout == 'default':
timeout = self._timeout
if timeout is not None:
timeout_text = ' or until timeout ({0})'.format(timeout)
else:
timeout_text = ''
self._print_verbose(fmt.format(*args, timeout_text=timeout_text))
def _log_something(self, data, prefix):
if self.echo_perline:
if self.echo_hex:
self._print_hex_lines(data, prefix)
else:
self._print_lines(data, prefix)
else:
if self.echo_hex:
if hasattr(data, 'hex'):
self._print_verbose(prefix + data.hex())
else:
self._print_verbose(prefix + data.encode('hex'))
else:
self._print_verbose(prefix + str(data))
def _log_recv(self, data, yielding):
if yielding == self.log_yield:
if self.verbose and self.echo_recving:
self._log_something(data, self.echo_recv_prefix)
if self.log_recv:
self.log_recv.write(data)
def _log_send(self, data):
if self.verbose and self.echo_sending:
self._log_something(data, self.echo_send_prefix)
if self.log_send:
self.log_send.write(data)
def _print_lines(self, s, prefix):
for line in s.split(b'\n'):
self._print_verbose(prefix + str(line))
@staticmethod
def _to_spaced_hex(s):
if isinstance(s, str):
return ' '.join('%02X' % ord(a) for a in s)
if isinstance(s, bytes):
return ' '.join('%02X' % a for a in s)
raise TypeError('expected str or bytes instance')
@staticmethod
def _to_printable_str(s):
if isinstance(s, str):
return ''.join(a if ' ' <= a <= '~' else '.' for a in s)
if isinstance(s, bytes):
return ''.join(chr(a) if ord(' ') <= a <= ord('~') else '.' for a in s)
raise TypeError('expected str or bytes instance')
def _print_hex_lines(self, s, prefix):
for i in range(0, len(s), 16):
block = s[i:i+16]
spaced_hex = self._to_spaced_hex(block)
printable_str = self._to_printable_str(block)
self._print_verbose('%s%-47s |%-16s|' % (prefix, spaced_hex, printable_str))
def settimeout(self, timeout):
"""
Set the default timeout in seconds to use for subsequent socket
operations
"""
self._timeout = timeout
self._settimeout(timeout)
def _send(self, data):
if hasattr(self.sock_send, 'send'):
return self.sock_send.send(data)
elif hasattr(self.sock_send, 'write'):
return self.sock_send.write(data) # pylint: disable=no-member
else:
raise ValueError("I don't know how to write to this stream!")
def _recv(self, size):
if hasattr(self.sock, 'recv'):
return self.sock.recv(size)
elif hasattr(self.sock, 'read'):
return self.sock.read(size) # pylint: disable=no-member
else:
raise ValueError("I don't know how to read from this stream!")
def _recv_predicate(self, predicate, timeout='default', raise_eof=True):
"""
Receive until predicate returns a positive integer.
The returned number is the size to return.
"""
if timeout == 'default':
timeout = self._timeout
self.timed_out = False
start = time.time()
try:
while True:
cut_at = predicate(self.buf)
if cut_at > 0:
break
if timeout is not None:
time_elapsed = time.time() - start
if time_elapsed > timeout:
raise socket.timeout
self._settimeout(timeout - time_elapsed)
data = self._recv(4096)
self._log_recv(data, False)
self.buf += data
if not data:
if raise_eof:
raise NetcatError("Connection dropped!")
cut_at = len(self.buf)
break
except KeyboardInterrupt:
self._print_header('\n======== Connection interrupted! ========')
raise
except socket.timeout:
self.timed_out = True
if self._raise_timeout:
raise NetcatTimeout()
return b''
except socket.error as exc:
raise NetcatError('Socket error: %r' % exc)
self._settimeout(self._timeout)
ret = self.buf[:cut_at]
self.buf = self.buf[cut_at:]
self._log_recv(ret, True)
return ret
def _settimeout(self, timeout):
"""
Internal method - catches failures when working with non-timeoutable
streams, like files
"""
try:
self.sock.settimeout(timeout)
except AttributeError:
pass
def gettimeout(self):
"""
Retrieve the timeout currently associated with the socket
"""
return self._timeout
def flush(self):
# no buffering
pass
def recv_until(self, s, max_size=None, timeout='default'):
"""
Recieve data from the socket until the given substring is observed.
Data in the same datagram as the substring, following the substring,
will not be returned and will be cached for future receives.
Aliases: read_until, readuntil, recvuntil
"""
self._print_recv_header(
'======== Receiving until {0}{timeout_text} ========', timeout, repr(s))
if max_size is None:
max_size = 2 ** 62
def _predicate(buf):
try:
return min(buf.index(s) + len(s), max_size)
except ValueError:
return 0 if len(buf) < max_size else max_size
return self._recv_predicate(_predicate, timeout)
def recv_all(self, timeout='default'):
"""
Return all data recieved until connection closes.
Aliases: read_all, readall, recvall
"""
self._print_recv_header('======== Receiving until close{timeout_text} ========', timeout)
return self._recv_predicate(lambda s: 0, timeout, raise_eof=False)
def recv_exactly(self, n, timeout='default'):
"""
Recieve exactly n bytes
Aliases: read_exactly, readexactly, recvexactly
"""
self._print_recv_header(
'======== Receiving until exactly {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: n if len(s) >= n else 0, timeout)
def send(self, s):
"""
Sends all the given data to the socket.
Aliases: write, put, sendall, send_all
"""
self._print_header('======== Sending ({0}) ========'.format(len(s)))
self._log_send(s)
out = len(s)
while s:
s = s[self._send(s):]
return out
def interact(self, insock=sys.stdin, outsock=sys.stdout):
"""
Connects the socket to the terminal for user interaction.
Alternate input and output files may be specified.
This method cannot be used with a timeout.
Aliases: interactive, interaction
"""
self._print_header('======== Beginning interactive session ========')
if hasattr(outsock, 'buffer'):
outsock = outsock.buffer # pylint: disable=no-member
self.timed_out = False
save_verbose = self.verbose
self.verbose = 0
try:
if self.buf:
outsock.write(self.buf)
outsock.flush()
self.buf = b''
while True:
readable_socks = select(self.sock, insock)
for readable in readable_socks:
if readable is insock:
data = os.read(insock.fileno(), 4096)
self.send(data)
if not data:
raise NetcatError
else:
data = self.recv(timeout=None)
outsock.write(data)
outsock.flush()
if not data:
raise NetcatError
except KeyboardInterrupt:
self.verbose = save_verbose
self._print_header('\n======== Connection interrupted! ========')
raise
except (socket.error, NetcatError):
self.verbose = save_verbose
self._print_header('\n======== Connection dropped! ========')
finally:
self.verbose = save_verbose
LINE_ENDING = b'\n'
def recv_line(self, max_size=None, timeout='default', ending=None):
"""
Recieve until the next newline , default "\\n". The newline string can
be changed by changing ``nc.LINE_ENDING``. The newline will be returned
as part of the string.
Aliases: recvline, readline, read_line, readln, recvln
"""
if ending is None:
ending = self.LINE_ENDING
return self.recv_until(ending, max_size, timeout)
def send_line(self, line, ending=None):
"""
Write the string to the wire, followed by a newline. The newline string
can be changed by changing ``nc.LINE_ENDING``.
Aliases: sendline, writeline, write_line, writeln, sendln
"""
if ending is None:
ending = self.LINE_ENDING
return self.send(line + ending)
read = recv
get = recv
write = send
put = send
sendall = send
send_all = send
read_until = recv_until
readuntil = recv_until
recvuntil = recv_until
read_all = recv_all
readall = recv_all
recvall = recv_all
read_exactly = recv_exactly
readexactly = recv_exactly
recvexactly = recv_exactly
interactive = interact
ineraction = interact
recvline = recv_line
readline = recv_line
read_line = recv_line
readln = recv_line
recvln = recv_line
sendline = send_line
writeline = send_line
write_line = send_line
writeln = send_line
sendln = send_line
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat.recv_until
|
python
|
def recv_until(self, s, max_size=None, timeout='default'):
self._print_recv_header(
'======== Receiving until {0}{timeout_text} ========', timeout, repr(s))
if max_size is None:
max_size = 2 ** 62
def _predicate(buf):
try:
return min(buf.index(s) + len(s), max_size)
except ValueError:
return 0 if len(buf) < max_size else max_size
return self._recv_predicate(_predicate, timeout)
|
Recieve data from the socket until the given substring is observed.
Data in the same datagram as the substring, following the substring,
will not be returned and will be cached for future receives.
Aliases: read_until, readuntil, recvuntil
|
train
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L652-L672
|
[
"def _print_recv_header(self, fmt, timeout, *args):\n if self.verbose and self.echo_headers:\n if timeout == 'default':\n timeout = self._timeout\n if timeout is not None:\n timeout_text = ' or until timeout ({0})'.format(timeout)\n else:\n timeout_text = ''\n\n self._print_verbose(fmt.format(*args, timeout_text=timeout_text))\n",
"def _recv_predicate(self, predicate, timeout='default', raise_eof=True):\n \"\"\"\n Receive until predicate returns a positive integer.\n The returned number is the size to return.\n \"\"\"\n\n if timeout == 'default':\n timeout = self._timeout\n\n self.timed_out = False\n\n start = time.time()\n try:\n while True:\n cut_at = predicate(self.buf)\n if cut_at > 0:\n break\n if timeout is not None:\n time_elapsed = time.time() - start\n if time_elapsed > timeout:\n raise socket.timeout\n self._settimeout(timeout - time_elapsed)\n\n data = self._recv(4096)\n self._log_recv(data, False)\n self.buf += data\n\n if not data:\n if raise_eof:\n raise NetcatError(\"Connection dropped!\")\n cut_at = len(self.buf)\n break\n\n except KeyboardInterrupt:\n self._print_header('\\n======== Connection interrupted! ========')\n raise\n except socket.timeout:\n self.timed_out = True\n if self._raise_timeout:\n raise NetcatTimeout()\n return b''\n except socket.error as exc:\n raise NetcatError('Socket error: %r' % exc)\n\n self._settimeout(self._timeout)\n\n ret = self.buf[:cut_at]\n self.buf = self.buf[cut_at:]\n self._log_recv(ret, True)\n return ret\n"
] |
class Netcat(object):
"""
This is the main class you will use to interact with a peer over the
network! You may instanciate this class to either connect to a server or
listen for a one-off client.
One of the following must be passed in order to initialize a Netcat
object:
:param connect: the address/port to connect to
:param listen: the address/port to bind to for listening
:param sock: a python socket or pipe object to wrap
For ``connect`` and ``listen``, they accept basically any argument format
known to mankind. If you find an input format you think would be useful but
isn't accepted, let me know :P
Additionally, the following options modify the behavior of the object:
:param sock_send: If this is specified, this Netcat object will act
as a multiplexer/demultiplexer, using the "normal"
channel for receiving and this channel for sending.
This should be specified as a python socket or pipe
object.
.. warning:: Using ``sock_send`` will cause issues if
you pass this object into a context which
expects to be able to use its
``.fileno()``.
:param udp: Set to True to use udp connections when using the
connect or listen parameters
:param ipv6: Force using ipv6 when using the connect or listen
parameters
:param verbose: Set to True to log data sent/received. The echo_*
properties on this object can be tweaked to
describe exactly what you want logged.
:param log_send: Pass a file-like object open for writing and all
data sent over the socket will be written to it.
:param log_recv: Pass a file-like object open for writing and all
data recieved from the socket will be written to it.
:param raise_timeout:
Whether to raise a NetcatTimeout exception when a
timeout is received. The default is to return the
empty string and set self.timed_out = True
:param retry: Whether to continuously retry establishing a
connection if it fails.
:param log_yield: Control when logging messages are generated on
recv. By default, logging is done when data is
received from the socket, and may be buffered.
By setting this to true, logging is done when data
is yielded to the user, either directly from the
socket or from a buffer.
Any data that is extracted from the target address will override the
options specified here. For example, a url with the ``http:// scheme``
will go over tcp and port 80.
Some properties that may be tweaked to change the logging behavior:
- nc.echo_headers controls whether to print a header describing each
network operation before the data (True)
- nc.echo_perline controls whether the data should be split on newlines
for logging (True)
- nc.echo_sending controls whether to log data on send (True)
- nc.echo_recving controls whether to log data on recv (True)
- nc.echo_hex controls whether to log data hex-encoded (False)
- nc.echo_send_prefix controls a prefix to print before each logged
line of sent data ('>> ')
- nc.echo_recv_prefix controls a prefix to print before each logged
line of received data ('<< ')
Note that these settings ONLY affect the console logging triggered by
the verbose parameter. They don't do anything to the logging triggered
by `log_send` and `log_recv`, which are meant to provide pristine
untouched records of network traffic.
*Example 1:* Send a greeting to a UDP server listening at 192.168.3.6:8888
and log the response as hex:
>>> nc = nclib.Netcat(('192.168.3.6', 8888), udp=True, verbose=True)
>>> nc.echo_hex = True
>>> nc.send(b'\\x00\\x0dHello, world!')
======== Sending (15) ========
>> 00 0D 48 65 6C 6C 6F 2C 20 77 6F 72 6C 64 21 |..Hello, world! |
>>> nc.recv()
======== Receiving 4096B or until timeout (default) ========
<< 00 57 68 65 6C 6C 6F 20 66 72 69 65 6E 64 2E 20 |.Whello friend. |
<< 74 69 6D 65 20 69 73 20 73 68 6F 72 74 2E 20 70 |time is short. p|
<< 6C 65 61 73 65 20 64 6F 20 6E 6F 74 20 77 6F 72 |lease do not wor|
<< 72 79 2C 20 79 6F 75 20 77 69 6C 6C 20 66 69 6E |ry, you will fin|
<< 64 20 79 6F 75 72 20 77 61 79 2E 20 62 75 74 20 |d your way. but |
<< 64 6F 20 68 75 72 72 79 2E |do hurry. |
*Example 2:* Listen for a local TCP connection on port 1234, allow the user
to interact with the client. Log the entire interaction to log.txt.
>>> logfile = open('log.txt', 'wb')
>>> nc = nclib.Netcat(listen=('localhost', 1234), log_send=logfile, log_recv=logfile)
>>> nc.interact()
"""
def __init__(self,
connect=None,
sock=None,
listen=None,
server=None,
sock_send=None,
udp=False,
ipv6=False,
verbose=0,
log_send=None,
log_recv=None,
raise_timeout=False,
retry=False,
log_yield=False):
self.buf = b''
self.verbose = verbose
self.log_send = log_send
self.log_recv = log_recv
self.log_yield = log_yield
self.echo_headers = True
self.echo_perline = True
self.echo_sending = True
self.echo_recving = True
self.echo_hex = False
self.echo_send_prefix = '>> '
self.echo_recv_prefix = '<< '
self.sock = None
self._sock_send = sock_send
self.peer = None
# case: Netcat(host, port)
if isinstance(connect, str) and isinstance(listen, int):
connect = (connect, listen)
# case: Netcat(sock)
if isinstance(connect, socket.socket):
sock = connect
connect = None
# deprecated server kwarg
if server is not None:
connect = server
if sock is None and listen is None and connect is None:
raise ValueError('Not enough arguments, need at least an '
'address or a socket or a listening address!')
## we support passing connect as the "name" of the socket
#if sock is not None and (listen is not None or connect is not None):
# raise ValueError("connect or listen arguments may not be "
# "provided if sock is provided")
if listen is not None and connect is not None:
raise ValueError("connect and listen arguments cannot be provided at the same time")
if sock is None:
if listen is not None:
target = listen
listen = True
else:
target = connect
listen = False
target, listen, udp, ipv6 = self._parse_target(target, listen, udp, ipv6)
self._connect(target, listen, udp, ipv6, retry)
else:
self.sock = sock
self.peer = connect
try:
self._timeout = self.sock.gettimeout()
except AttributeError:
self._timeout = None
self.timed_out = False # set when an operation times out
self._raise_timeout = raise_timeout
@property
def sock_send(self):
if self._sock_send is None:
return self.sock
else:
return self._sock_send
@sock_send.setter
def sock_send(self, val):
self._sock_send = val
@staticmethod
def _parse_target(target, listen, udp, ipv6):
"""
Takes the basic version of the user args and extract as much data as
possible from target. Returns a tuple that is its arguments but
sanitized.
"""
if isinstance(target, str):
if target.startswith('nc '):
out_host = None
out_port = None
try:
opts, pieces = getopt.getopt(target.split()[1:], 'u46lp:',
[])
except getopt.GetoptError as exc:
raise ValueError(exc)
for opt, arg in opts:
if opt == '-u':
udp = True
elif opt == '-4':
ipv6 = False
elif opt == '-6':
ipv6 = True
elif opt == '-l':
listen = True
elif opt == '-p':
out_port = int(arg)
else:
assert False, "unhandled option"
if not pieces:
pass
elif len(pieces) == 1:
if listen and pieces[0].isdigit():
out_port = int(pieces[0])
else:
out_host = pieces[0]
elif len(pieces) == 2 and pieces[1].isdigit():
out_host = pieces[0]
out_port = int(pieces[1])
else:
raise ValueError("Bad cmdline: %s" % target)
if out_host is None:
if listen:
out_host = '::' if ipv6 else '0.0.0.0'
else:
raise ValueError("Missing address: %s" % target)
if out_port is None:
raise ValueError("Missing port: %s" % target)
if _is_ipv6_addr(out_host):
ipv6 = True
return (out_host, out_port), listen, udp, ipv6
elif PROTOCAL_RE.match(target) is not None:
parsed = urlparse(target)
port = None
try:
scheme_udp, scheme_ipv6, scheme_port = KNOWN_SCHEMES[parsed.scheme]
except KeyError:
raise ValueError("Unknown scheme: %s" % parsed.scheme)
if scheme_udp is not None:
udp = scheme_udp
if scheme_ipv6 is not None:
ipv6 = scheme_ipv6
if scheme_port is not None:
port = scheme_port
if parsed.netloc.startswith('['):
addr, extra = parsed.netloc[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
if ':' in parsed.netloc:
addr, port = parsed.netloc.split(':', 1)
port = int(port)
else:
addr = parsed.netloc
if addr is None or port is None:
raise ValueError("Can't parse addr/port from %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
else:
if target.startswith('['):
addr, extra = target[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
port = None
else:
if ':' in target:
addr, port = target.split(':', 1)
port = int(port)
else:
addr = target
port = None
if port is None:
raise ValueError("No port given: %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
elif isinstance(target, (int, long)):
if listen:
out_port = target
else:
raise ValueError("Can't deal with number as connection address")
return ('::' if ipv6 else '0.0.0.0', out_port), listen, udp, ipv6
elif isinstance(target, tuple):
if len(target) >= 1 and isinstance(target[0], str) and _is_ipv6_addr(target[0]):
ipv6 = True
return target, listen, udp, ipv6
else:
raise ValueError("Can't parse target: %r" % target)
def _connect(self, target, listen, udp, ipv6, retry):
"""
Takes target/listen/udp/ipv6 and sets self.sock and self.peer
"""
ty = socket.SOCK_DGRAM if udp else socket.SOCK_STREAM
fam = socket.AF_INET6 if ipv6 else socket.AF_INET
self.sock = socket.socket(fam, ty)
if listen:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(target)
if not udp:
self.sock.listen(1)
conn, addr = self.sock.accept()
self.sock.close()
self.sock = conn
self.peer = addr
else:
self.buf, self.peer = self.sock.recvfrom(1024)
self.sock.connect(self.peer)
self._log_recv(self.buf, False)
if self.verbose:
self._print_verbose('Connection from %s accepted' % str(self.peer))
else:
while True:
try:
self.sock.connect(target)
except (socket.gaierror, socket.herror) as exc:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
except socket.error as exc:
if retry:
time.sleep(0.2)
else:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
else:
break
self.peer = target
def close(self):
"""
Close the socket.
"""
if self._sock_send is not None:
self._sock_send.close()
return self.sock.close()
# inconsistent between sockets and files. support both
@property
def closed(self):
return self._closed
@property
def _closed(self):
if hasattr(self.sock_send, 'closed'):
return self.sock_send.closed
elif hasattr(self.sock_send, '_closed'):
return self.sock_send._closed
else:
return False # ???
def shutdown(self, how=socket.SHUT_RDWR):
"""
Send a shutdown signal for both reading and writing, or whatever
socket.SHUT_* constant you like.
Shutdown differs from closing in that it explicitly changes the state of
the socket resource to closed, whereas closing will only decrement the
number of peers on this end of the socket, since sockets can be a
resource shared by multiple peers on a single OS. When the number of
peers reaches zero, the socket is closed, but not deallocated, so you
still need to call close. (except that this is python and close is
automatically called on the deletion of the socket)
http://stackoverflow.com/questions/409783/socket-shutdown-vs-socket-close
"""
if self._sock_send is not None:
self._sock_send.shutdown(how)
return self.sock.shutdown(how)
def shutdown_rd(self):
"""
Send a shutdown signal for reading - you may no longer read from this
socket.
"""
if self._sock_send is not None:
self.sock.close()
else:
return self.shutdown(socket.SHUT_RD)
def shutdown_wr(self):
"""
Send a shutdown signal for writing - you may no longer write to this
socket.
"""
if self._sock_send is not None:
self._sock_send.close()
else:
return self.shutdown(socket.SHUT_WR)
def fileno(self):
"""
Return the file descriptor associated with this socket
"""
if self._sock_send is not None:
raise UserWarning("Calling fileno when there are in fact two filenos")
return self.sock.fileno()
def _print_verbose(self, s):
assert isinstance(s, str), "s should be str"
sys.stdout.write(s + '\n')
def _print_header(self, header):
if self.verbose and self.echo_headers:
self._print_verbose(header)
def _print_recv_header(self, fmt, timeout, *args):
if self.verbose and self.echo_headers:
if timeout == 'default':
timeout = self._timeout
if timeout is not None:
timeout_text = ' or until timeout ({0})'.format(timeout)
else:
timeout_text = ''
self._print_verbose(fmt.format(*args, timeout_text=timeout_text))
def _log_something(self, data, prefix):
if self.echo_perline:
if self.echo_hex:
self._print_hex_lines(data, prefix)
else:
self._print_lines(data, prefix)
else:
if self.echo_hex:
if hasattr(data, 'hex'):
self._print_verbose(prefix + data.hex())
else:
self._print_verbose(prefix + data.encode('hex'))
else:
self._print_verbose(prefix + str(data))
def _log_recv(self, data, yielding):
if yielding == self.log_yield:
if self.verbose and self.echo_recving:
self._log_something(data, self.echo_recv_prefix)
if self.log_recv:
self.log_recv.write(data)
def _log_send(self, data):
if self.verbose and self.echo_sending:
self._log_something(data, self.echo_send_prefix)
if self.log_send:
self.log_send.write(data)
def _print_lines(self, s, prefix):
for line in s.split(b'\n'):
self._print_verbose(prefix + str(line))
@staticmethod
def _to_spaced_hex(s):
if isinstance(s, str):
return ' '.join('%02X' % ord(a) for a in s)
if isinstance(s, bytes):
return ' '.join('%02X' % a for a in s)
raise TypeError('expected str or bytes instance')
@staticmethod
def _to_printable_str(s):
if isinstance(s, str):
return ''.join(a if ' ' <= a <= '~' else '.' for a in s)
if isinstance(s, bytes):
return ''.join(chr(a) if ord(' ') <= a <= ord('~') else '.' for a in s)
raise TypeError('expected str or bytes instance')
def _print_hex_lines(self, s, prefix):
for i in range(0, len(s), 16):
block = s[i:i+16]
spaced_hex = self._to_spaced_hex(block)
printable_str = self._to_printable_str(block)
self._print_verbose('%s%-47s |%-16s|' % (prefix, spaced_hex, printable_str))
def settimeout(self, timeout):
"""
Set the default timeout in seconds to use for subsequent socket
operations
"""
self._timeout = timeout
self._settimeout(timeout)
def _send(self, data):
if hasattr(self.sock_send, 'send'):
return self.sock_send.send(data)
elif hasattr(self.sock_send, 'write'):
return self.sock_send.write(data) # pylint: disable=no-member
else:
raise ValueError("I don't know how to write to this stream!")
def _recv(self, size):
if hasattr(self.sock, 'recv'):
return self.sock.recv(size)
elif hasattr(self.sock, 'read'):
return self.sock.read(size) # pylint: disable=no-member
else:
raise ValueError("I don't know how to read from this stream!")
def _recv_predicate(self, predicate, timeout='default', raise_eof=True):
"""
Receive until predicate returns a positive integer.
The returned number is the size to return.
"""
if timeout == 'default':
timeout = self._timeout
self.timed_out = False
start = time.time()
try:
while True:
cut_at = predicate(self.buf)
if cut_at > 0:
break
if timeout is not None:
time_elapsed = time.time() - start
if time_elapsed > timeout:
raise socket.timeout
self._settimeout(timeout - time_elapsed)
data = self._recv(4096)
self._log_recv(data, False)
self.buf += data
if not data:
if raise_eof:
raise NetcatError("Connection dropped!")
cut_at = len(self.buf)
break
except KeyboardInterrupt:
self._print_header('\n======== Connection interrupted! ========')
raise
except socket.timeout:
self.timed_out = True
if self._raise_timeout:
raise NetcatTimeout()
return b''
except socket.error as exc:
raise NetcatError('Socket error: %r' % exc)
self._settimeout(self._timeout)
ret = self.buf[:cut_at]
self.buf = self.buf[cut_at:]
self._log_recv(ret, True)
return ret
def _settimeout(self, timeout):
"""
Internal method - catches failures when working with non-timeoutable
streams, like files
"""
try:
self.sock.settimeout(timeout)
except AttributeError:
pass
def gettimeout(self):
"""
Retrieve the timeout currently associated with the socket
"""
return self._timeout
def flush(self):
# no buffering
pass
def recv(self, n=4096, timeout='default'):
"""
Receive at most n bytes (default 4096) from the socket
Aliases: read, get
"""
self._print_recv_header(
'======== Receiving {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: min(n, len(s)), timeout)
def recv_all(self, timeout='default'):
"""
Return all data recieved until connection closes.
Aliases: read_all, readall, recvall
"""
self._print_recv_header('======== Receiving until close{timeout_text} ========', timeout)
return self._recv_predicate(lambda s: 0, timeout, raise_eof=False)
def recv_exactly(self, n, timeout='default'):
"""
Recieve exactly n bytes
Aliases: read_exactly, readexactly, recvexactly
"""
self._print_recv_header(
'======== Receiving until exactly {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: n if len(s) >= n else 0, timeout)
def send(self, s):
"""
Sends all the given data to the socket.
Aliases: write, put, sendall, send_all
"""
self._print_header('======== Sending ({0}) ========'.format(len(s)))
self._log_send(s)
out = len(s)
while s:
s = s[self._send(s):]
return out
def interact(self, insock=sys.stdin, outsock=sys.stdout):
"""
Connects the socket to the terminal for user interaction.
Alternate input and output files may be specified.
This method cannot be used with a timeout.
Aliases: interactive, interaction
"""
self._print_header('======== Beginning interactive session ========')
if hasattr(outsock, 'buffer'):
outsock = outsock.buffer # pylint: disable=no-member
self.timed_out = False
save_verbose = self.verbose
self.verbose = 0
try:
if self.buf:
outsock.write(self.buf)
outsock.flush()
self.buf = b''
while True:
readable_socks = select(self.sock, insock)
for readable in readable_socks:
if readable is insock:
data = os.read(insock.fileno(), 4096)
self.send(data)
if not data:
raise NetcatError
else:
data = self.recv(timeout=None)
outsock.write(data)
outsock.flush()
if not data:
raise NetcatError
except KeyboardInterrupt:
self.verbose = save_verbose
self._print_header('\n======== Connection interrupted! ========')
raise
except (socket.error, NetcatError):
self.verbose = save_verbose
self._print_header('\n======== Connection dropped! ========')
finally:
self.verbose = save_verbose
LINE_ENDING = b'\n'
def recv_line(self, max_size=None, timeout='default', ending=None):
"""
Recieve until the next newline , default "\\n". The newline string can
be changed by changing ``nc.LINE_ENDING``. The newline will be returned
as part of the string.
Aliases: recvline, readline, read_line, readln, recvln
"""
if ending is None:
ending = self.LINE_ENDING
return self.recv_until(ending, max_size, timeout)
def send_line(self, line, ending=None):
"""
Write the string to the wire, followed by a newline. The newline string
can be changed by changing ``nc.LINE_ENDING``.
Aliases: sendline, writeline, write_line, writeln, sendln
"""
if ending is None:
ending = self.LINE_ENDING
return self.send(line + ending)
read = recv
get = recv
write = send
put = send
sendall = send
send_all = send
read_until = recv_until
readuntil = recv_until
recvuntil = recv_until
read_all = recv_all
readall = recv_all
recvall = recv_all
read_exactly = recv_exactly
readexactly = recv_exactly
recvexactly = recv_exactly
interactive = interact
ineraction = interact
recvline = recv_line
readline = recv_line
read_line = recv_line
readln = recv_line
recvln = recv_line
sendline = send_line
writeline = send_line
write_line = send_line
writeln = send_line
sendln = send_line
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat.recv_all
|
python
|
def recv_all(self, timeout='default'):
self._print_recv_header('======== Receiving until close{timeout_text} ========', timeout)
return self._recv_predicate(lambda s: 0, timeout, raise_eof=False)
|
Return all data recieved until connection closes.
Aliases: read_all, readall, recvall
|
train
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L674-L683
|
[
"def _print_recv_header(self, fmt, timeout, *args):\n if self.verbose and self.echo_headers:\n if timeout == 'default':\n timeout = self._timeout\n if timeout is not None:\n timeout_text = ' or until timeout ({0})'.format(timeout)\n else:\n timeout_text = ''\n\n self._print_verbose(fmt.format(*args, timeout_text=timeout_text))\n",
"def _recv_predicate(self, predicate, timeout='default', raise_eof=True):\n \"\"\"\n Receive until predicate returns a positive integer.\n The returned number is the size to return.\n \"\"\"\n\n if timeout == 'default':\n timeout = self._timeout\n\n self.timed_out = False\n\n start = time.time()\n try:\n while True:\n cut_at = predicate(self.buf)\n if cut_at > 0:\n break\n if timeout is not None:\n time_elapsed = time.time() - start\n if time_elapsed > timeout:\n raise socket.timeout\n self._settimeout(timeout - time_elapsed)\n\n data = self._recv(4096)\n self._log_recv(data, False)\n self.buf += data\n\n if not data:\n if raise_eof:\n raise NetcatError(\"Connection dropped!\")\n cut_at = len(self.buf)\n break\n\n except KeyboardInterrupt:\n self._print_header('\\n======== Connection interrupted! ========')\n raise\n except socket.timeout:\n self.timed_out = True\n if self._raise_timeout:\n raise NetcatTimeout()\n return b''\n except socket.error as exc:\n raise NetcatError('Socket error: %r' % exc)\n\n self._settimeout(self._timeout)\n\n ret = self.buf[:cut_at]\n self.buf = self.buf[cut_at:]\n self._log_recv(ret, True)\n return ret\n"
] |
class Netcat(object):
"""
This is the main class you will use to interact with a peer over the
network! You may instanciate this class to either connect to a server or
listen for a one-off client.
One of the following must be passed in order to initialize a Netcat
object:
:param connect: the address/port to connect to
:param listen: the address/port to bind to for listening
:param sock: a python socket or pipe object to wrap
For ``connect`` and ``listen``, they accept basically any argument format
known to mankind. If you find an input format you think would be useful but
isn't accepted, let me know :P
Additionally, the following options modify the behavior of the object:
:param sock_send: If this is specified, this Netcat object will act
as a multiplexer/demultiplexer, using the "normal"
channel for receiving and this channel for sending.
This should be specified as a python socket or pipe
object.
.. warning:: Using ``sock_send`` will cause issues if
you pass this object into a context which
expects to be able to use its
``.fileno()``.
:param udp: Set to True to use udp connections when using the
connect or listen parameters
:param ipv6: Force using ipv6 when using the connect or listen
parameters
:param verbose: Set to True to log data sent/received. The echo_*
properties on this object can be tweaked to
describe exactly what you want logged.
:param log_send: Pass a file-like object open for writing and all
data sent over the socket will be written to it.
:param log_recv: Pass a file-like object open for writing and all
data recieved from the socket will be written to it.
:param raise_timeout:
Whether to raise a NetcatTimeout exception when a
timeout is received. The default is to return the
empty string and set self.timed_out = True
:param retry: Whether to continuously retry establishing a
connection if it fails.
:param log_yield: Control when logging messages are generated on
recv. By default, logging is done when data is
received from the socket, and may be buffered.
By setting this to true, logging is done when data
is yielded to the user, either directly from the
socket or from a buffer.
Any data that is extracted from the target address will override the
options specified here. For example, a url with the ``http:// scheme``
will go over tcp and port 80.
Some properties that may be tweaked to change the logging behavior:
- nc.echo_headers controls whether to print a header describing each
network operation before the data (True)
- nc.echo_perline controls whether the data should be split on newlines
for logging (True)
- nc.echo_sending controls whether to log data on send (True)
- nc.echo_recving controls whether to log data on recv (True)
- nc.echo_hex controls whether to log data hex-encoded (False)
- nc.echo_send_prefix controls a prefix to print before each logged
line of sent data ('>> ')
- nc.echo_recv_prefix controls a prefix to print before each logged
line of received data ('<< ')
Note that these settings ONLY affect the console logging triggered by
the verbose parameter. They don't do anything to the logging triggered
by `log_send` and `log_recv`, which are meant to provide pristine
untouched records of network traffic.
*Example 1:* Send a greeting to a UDP server listening at 192.168.3.6:8888
and log the response as hex:
>>> nc = nclib.Netcat(('192.168.3.6', 8888), udp=True, verbose=True)
>>> nc.echo_hex = True
>>> nc.send(b'\\x00\\x0dHello, world!')
======== Sending (15) ========
>> 00 0D 48 65 6C 6C 6F 2C 20 77 6F 72 6C 64 21 |..Hello, world! |
>>> nc.recv()
======== Receiving 4096B or until timeout (default) ========
<< 00 57 68 65 6C 6C 6F 20 66 72 69 65 6E 64 2E 20 |.Whello friend. |
<< 74 69 6D 65 20 69 73 20 73 68 6F 72 74 2E 20 70 |time is short. p|
<< 6C 65 61 73 65 20 64 6F 20 6E 6F 74 20 77 6F 72 |lease do not wor|
<< 72 79 2C 20 79 6F 75 20 77 69 6C 6C 20 66 69 6E |ry, you will fin|
<< 64 20 79 6F 75 72 20 77 61 79 2E 20 62 75 74 20 |d your way. but |
<< 64 6F 20 68 75 72 72 79 2E |do hurry. |
*Example 2:* Listen for a local TCP connection on port 1234, allow the user
to interact with the client. Log the entire interaction to log.txt.
>>> logfile = open('log.txt', 'wb')
>>> nc = nclib.Netcat(listen=('localhost', 1234), log_send=logfile, log_recv=logfile)
>>> nc.interact()
"""
def __init__(self,
connect=None,
sock=None,
listen=None,
server=None,
sock_send=None,
udp=False,
ipv6=False,
verbose=0,
log_send=None,
log_recv=None,
raise_timeout=False,
retry=False,
log_yield=False):
self.buf = b''
self.verbose = verbose
self.log_send = log_send
self.log_recv = log_recv
self.log_yield = log_yield
self.echo_headers = True
self.echo_perline = True
self.echo_sending = True
self.echo_recving = True
self.echo_hex = False
self.echo_send_prefix = '>> '
self.echo_recv_prefix = '<< '
self.sock = None
self._sock_send = sock_send
self.peer = None
# case: Netcat(host, port)
if isinstance(connect, str) and isinstance(listen, int):
connect = (connect, listen)
# case: Netcat(sock)
if isinstance(connect, socket.socket):
sock = connect
connect = None
# deprecated server kwarg
if server is not None:
connect = server
if sock is None and listen is None and connect is None:
raise ValueError('Not enough arguments, need at least an '
'address or a socket or a listening address!')
## we support passing connect as the "name" of the socket
#if sock is not None and (listen is not None or connect is not None):
# raise ValueError("connect or listen arguments may not be "
# "provided if sock is provided")
if listen is not None and connect is not None:
raise ValueError("connect and listen arguments cannot be provided at the same time")
if sock is None:
if listen is not None:
target = listen
listen = True
else:
target = connect
listen = False
target, listen, udp, ipv6 = self._parse_target(target, listen, udp, ipv6)
self._connect(target, listen, udp, ipv6, retry)
else:
self.sock = sock
self.peer = connect
try:
self._timeout = self.sock.gettimeout()
except AttributeError:
self._timeout = None
self.timed_out = False # set when an operation times out
self._raise_timeout = raise_timeout
@property
def sock_send(self):
if self._sock_send is None:
return self.sock
else:
return self._sock_send
@sock_send.setter
def sock_send(self, val):
self._sock_send = val
@staticmethod
def _parse_target(target, listen, udp, ipv6):
"""
Takes the basic version of the user args and extract as much data as
possible from target. Returns a tuple that is its arguments but
sanitized.
"""
if isinstance(target, str):
if target.startswith('nc '):
out_host = None
out_port = None
try:
opts, pieces = getopt.getopt(target.split()[1:], 'u46lp:',
[])
except getopt.GetoptError as exc:
raise ValueError(exc)
for opt, arg in opts:
if opt == '-u':
udp = True
elif opt == '-4':
ipv6 = False
elif opt == '-6':
ipv6 = True
elif opt == '-l':
listen = True
elif opt == '-p':
out_port = int(arg)
else:
assert False, "unhandled option"
if not pieces:
pass
elif len(pieces) == 1:
if listen and pieces[0].isdigit():
out_port = int(pieces[0])
else:
out_host = pieces[0]
elif len(pieces) == 2 and pieces[1].isdigit():
out_host = pieces[0]
out_port = int(pieces[1])
else:
raise ValueError("Bad cmdline: %s" % target)
if out_host is None:
if listen:
out_host = '::' if ipv6 else '0.0.0.0'
else:
raise ValueError("Missing address: %s" % target)
if out_port is None:
raise ValueError("Missing port: %s" % target)
if _is_ipv6_addr(out_host):
ipv6 = True
return (out_host, out_port), listen, udp, ipv6
elif PROTOCAL_RE.match(target) is not None:
parsed = urlparse(target)
port = None
try:
scheme_udp, scheme_ipv6, scheme_port = KNOWN_SCHEMES[parsed.scheme]
except KeyError:
raise ValueError("Unknown scheme: %s" % parsed.scheme)
if scheme_udp is not None:
udp = scheme_udp
if scheme_ipv6 is not None:
ipv6 = scheme_ipv6
if scheme_port is not None:
port = scheme_port
if parsed.netloc.startswith('['):
addr, extra = parsed.netloc[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
if ':' in parsed.netloc:
addr, port = parsed.netloc.split(':', 1)
port = int(port)
else:
addr = parsed.netloc
if addr is None or port is None:
raise ValueError("Can't parse addr/port from %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
else:
if target.startswith('['):
addr, extra = target[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
port = None
else:
if ':' in target:
addr, port = target.split(':', 1)
port = int(port)
else:
addr = target
port = None
if port is None:
raise ValueError("No port given: %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
elif isinstance(target, (int, long)):
if listen:
out_port = target
else:
raise ValueError("Can't deal with number as connection address")
return ('::' if ipv6 else '0.0.0.0', out_port), listen, udp, ipv6
elif isinstance(target, tuple):
if len(target) >= 1 and isinstance(target[0], str) and _is_ipv6_addr(target[0]):
ipv6 = True
return target, listen, udp, ipv6
else:
raise ValueError("Can't parse target: %r" % target)
def _connect(self, target, listen, udp, ipv6, retry):
"""
Takes target/listen/udp/ipv6 and sets self.sock and self.peer
"""
ty = socket.SOCK_DGRAM if udp else socket.SOCK_STREAM
fam = socket.AF_INET6 if ipv6 else socket.AF_INET
self.sock = socket.socket(fam, ty)
if listen:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(target)
if not udp:
self.sock.listen(1)
conn, addr = self.sock.accept()
self.sock.close()
self.sock = conn
self.peer = addr
else:
self.buf, self.peer = self.sock.recvfrom(1024)
self.sock.connect(self.peer)
self._log_recv(self.buf, False)
if self.verbose:
self._print_verbose('Connection from %s accepted' % str(self.peer))
else:
while True:
try:
self.sock.connect(target)
except (socket.gaierror, socket.herror) as exc:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
except socket.error as exc:
if retry:
time.sleep(0.2)
else:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
else:
break
self.peer = target
def close(self):
"""
Close the socket.
"""
if self._sock_send is not None:
self._sock_send.close()
return self.sock.close()
# inconsistent between sockets and files. support both
@property
def closed(self):
return self._closed
@property
def _closed(self):
if hasattr(self.sock_send, 'closed'):
return self.sock_send.closed
elif hasattr(self.sock_send, '_closed'):
return self.sock_send._closed
else:
return False # ???
def shutdown(self, how=socket.SHUT_RDWR):
"""
Send a shutdown signal for both reading and writing, or whatever
socket.SHUT_* constant you like.
Shutdown differs from closing in that it explicitly changes the state of
the socket resource to closed, whereas closing will only decrement the
number of peers on this end of the socket, since sockets can be a
resource shared by multiple peers on a single OS. When the number of
peers reaches zero, the socket is closed, but not deallocated, so you
still need to call close. (except that this is python and close is
automatically called on the deletion of the socket)
http://stackoverflow.com/questions/409783/socket-shutdown-vs-socket-close
"""
if self._sock_send is not None:
self._sock_send.shutdown(how)
return self.sock.shutdown(how)
def shutdown_rd(self):
"""
Send a shutdown signal for reading - you may no longer read from this
socket.
"""
if self._sock_send is not None:
self.sock.close()
else:
return self.shutdown(socket.SHUT_RD)
def shutdown_wr(self):
"""
Send a shutdown signal for writing - you may no longer write to this
socket.
"""
if self._sock_send is not None:
self._sock_send.close()
else:
return self.shutdown(socket.SHUT_WR)
def fileno(self):
"""
Return the file descriptor associated with this socket
"""
if self._sock_send is not None:
raise UserWarning("Calling fileno when there are in fact two filenos")
return self.sock.fileno()
def _print_verbose(self, s):
assert isinstance(s, str), "s should be str"
sys.stdout.write(s + '\n')
def _print_header(self, header):
if self.verbose and self.echo_headers:
self._print_verbose(header)
def _print_recv_header(self, fmt, timeout, *args):
if self.verbose and self.echo_headers:
if timeout == 'default':
timeout = self._timeout
if timeout is not None:
timeout_text = ' or until timeout ({0})'.format(timeout)
else:
timeout_text = ''
self._print_verbose(fmt.format(*args, timeout_text=timeout_text))
def _log_something(self, data, prefix):
if self.echo_perline:
if self.echo_hex:
self._print_hex_lines(data, prefix)
else:
self._print_lines(data, prefix)
else:
if self.echo_hex:
if hasattr(data, 'hex'):
self._print_verbose(prefix + data.hex())
else:
self._print_verbose(prefix + data.encode('hex'))
else:
self._print_verbose(prefix + str(data))
def _log_recv(self, data, yielding):
if yielding == self.log_yield:
if self.verbose and self.echo_recving:
self._log_something(data, self.echo_recv_prefix)
if self.log_recv:
self.log_recv.write(data)
def _log_send(self, data):
if self.verbose and self.echo_sending:
self._log_something(data, self.echo_send_prefix)
if self.log_send:
self.log_send.write(data)
def _print_lines(self, s, prefix):
for line in s.split(b'\n'):
self._print_verbose(prefix + str(line))
@staticmethod
def _to_spaced_hex(s):
if isinstance(s, str):
return ' '.join('%02X' % ord(a) for a in s)
if isinstance(s, bytes):
return ' '.join('%02X' % a for a in s)
raise TypeError('expected str or bytes instance')
@staticmethod
def _to_printable_str(s):
if isinstance(s, str):
return ''.join(a if ' ' <= a <= '~' else '.' for a in s)
if isinstance(s, bytes):
return ''.join(chr(a) if ord(' ') <= a <= ord('~') else '.' for a in s)
raise TypeError('expected str or bytes instance')
def _print_hex_lines(self, s, prefix):
for i in range(0, len(s), 16):
block = s[i:i+16]
spaced_hex = self._to_spaced_hex(block)
printable_str = self._to_printable_str(block)
self._print_verbose('%s%-47s |%-16s|' % (prefix, spaced_hex, printable_str))
def settimeout(self, timeout):
"""
Set the default timeout in seconds to use for subsequent socket
operations
"""
self._timeout = timeout
self._settimeout(timeout)
def _send(self, data):
if hasattr(self.sock_send, 'send'):
return self.sock_send.send(data)
elif hasattr(self.sock_send, 'write'):
return self.sock_send.write(data) # pylint: disable=no-member
else:
raise ValueError("I don't know how to write to this stream!")
def _recv(self, size):
if hasattr(self.sock, 'recv'):
return self.sock.recv(size)
elif hasattr(self.sock, 'read'):
return self.sock.read(size) # pylint: disable=no-member
else:
raise ValueError("I don't know how to read from this stream!")
def _recv_predicate(self, predicate, timeout='default', raise_eof=True):
"""
Receive until predicate returns a positive integer.
The returned number is the size to return.
"""
if timeout == 'default':
timeout = self._timeout
self.timed_out = False
start = time.time()
try:
while True:
cut_at = predicate(self.buf)
if cut_at > 0:
break
if timeout is not None:
time_elapsed = time.time() - start
if time_elapsed > timeout:
raise socket.timeout
self._settimeout(timeout - time_elapsed)
data = self._recv(4096)
self._log_recv(data, False)
self.buf += data
if not data:
if raise_eof:
raise NetcatError("Connection dropped!")
cut_at = len(self.buf)
break
except KeyboardInterrupt:
self._print_header('\n======== Connection interrupted! ========')
raise
except socket.timeout:
self.timed_out = True
if self._raise_timeout:
raise NetcatTimeout()
return b''
except socket.error as exc:
raise NetcatError('Socket error: %r' % exc)
self._settimeout(self._timeout)
ret = self.buf[:cut_at]
self.buf = self.buf[cut_at:]
self._log_recv(ret, True)
return ret
def _settimeout(self, timeout):
"""
Internal method - catches failures when working with non-timeoutable
streams, like files
"""
try:
self.sock.settimeout(timeout)
except AttributeError:
pass
def gettimeout(self):
"""
Retrieve the timeout currently associated with the socket
"""
return self._timeout
def flush(self):
# no buffering
pass
def recv(self, n=4096, timeout='default'):
"""
Receive at most n bytes (default 4096) from the socket
Aliases: read, get
"""
self._print_recv_header(
'======== Receiving {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: min(n, len(s)), timeout)
def recv_until(self, s, max_size=None, timeout='default'):
"""
Recieve data from the socket until the given substring is observed.
Data in the same datagram as the substring, following the substring,
will not be returned and will be cached for future receives.
Aliases: read_until, readuntil, recvuntil
"""
self._print_recv_header(
'======== Receiving until {0}{timeout_text} ========', timeout, repr(s))
if max_size is None:
max_size = 2 ** 62
def _predicate(buf):
try:
return min(buf.index(s) + len(s), max_size)
except ValueError:
return 0 if len(buf) < max_size else max_size
return self._recv_predicate(_predicate, timeout)
def recv_exactly(self, n, timeout='default'):
"""
Recieve exactly n bytes
Aliases: read_exactly, readexactly, recvexactly
"""
self._print_recv_header(
'======== Receiving until exactly {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: n if len(s) >= n else 0, timeout)
def send(self, s):
"""
Sends all the given data to the socket.
Aliases: write, put, sendall, send_all
"""
self._print_header('======== Sending ({0}) ========'.format(len(s)))
self._log_send(s)
out = len(s)
while s:
s = s[self._send(s):]
return out
def interact(self, insock=sys.stdin, outsock=sys.stdout):
"""
Connects the socket to the terminal for user interaction.
Alternate input and output files may be specified.
This method cannot be used with a timeout.
Aliases: interactive, interaction
"""
self._print_header('======== Beginning interactive session ========')
if hasattr(outsock, 'buffer'):
outsock = outsock.buffer # pylint: disable=no-member
self.timed_out = False
save_verbose = self.verbose
self.verbose = 0
try:
if self.buf:
outsock.write(self.buf)
outsock.flush()
self.buf = b''
while True:
readable_socks = select(self.sock, insock)
for readable in readable_socks:
if readable is insock:
data = os.read(insock.fileno(), 4096)
self.send(data)
if not data:
raise NetcatError
else:
data = self.recv(timeout=None)
outsock.write(data)
outsock.flush()
if not data:
raise NetcatError
except KeyboardInterrupt:
self.verbose = save_verbose
self._print_header('\n======== Connection interrupted! ========')
raise
except (socket.error, NetcatError):
self.verbose = save_verbose
self._print_header('\n======== Connection dropped! ========')
finally:
self.verbose = save_verbose
LINE_ENDING = b'\n'
def recv_line(self, max_size=None, timeout='default', ending=None):
"""
Recieve until the next newline , default "\\n". The newline string can
be changed by changing ``nc.LINE_ENDING``. The newline will be returned
as part of the string.
Aliases: recvline, readline, read_line, readln, recvln
"""
if ending is None:
ending = self.LINE_ENDING
return self.recv_until(ending, max_size, timeout)
def send_line(self, line, ending=None):
"""
Write the string to the wire, followed by a newline. The newline string
can be changed by changing ``nc.LINE_ENDING``.
Aliases: sendline, writeline, write_line, writeln, sendln
"""
if ending is None:
ending = self.LINE_ENDING
return self.send(line + ending)
read = recv
get = recv
write = send
put = send
sendall = send
send_all = send
read_until = recv_until
readuntil = recv_until
recvuntil = recv_until
read_all = recv_all
readall = recv_all
recvall = recv_all
read_exactly = recv_exactly
readexactly = recv_exactly
recvexactly = recv_exactly
interactive = interact
ineraction = interact
recvline = recv_line
readline = recv_line
read_line = recv_line
readln = recv_line
recvln = recv_line
sendline = send_line
writeline = send_line
write_line = send_line
writeln = send_line
sendln = send_line
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat.recv_exactly
|
python
|
def recv_exactly(self, n, timeout='default'):
self._print_recv_header(
'======== Receiving until exactly {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: n if len(s) >= n else 0, timeout)
|
Recieve exactly n bytes
Aliases: read_exactly, readexactly, recvexactly
|
train
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L685-L695
|
[
"def _print_recv_header(self, fmt, timeout, *args):\n if self.verbose and self.echo_headers:\n if timeout == 'default':\n timeout = self._timeout\n if timeout is not None:\n timeout_text = ' or until timeout ({0})'.format(timeout)\n else:\n timeout_text = ''\n\n self._print_verbose(fmt.format(*args, timeout_text=timeout_text))\n",
"def _recv_predicate(self, predicate, timeout='default', raise_eof=True):\n \"\"\"\n Receive until predicate returns a positive integer.\n The returned number is the size to return.\n \"\"\"\n\n if timeout == 'default':\n timeout = self._timeout\n\n self.timed_out = False\n\n start = time.time()\n try:\n while True:\n cut_at = predicate(self.buf)\n if cut_at > 0:\n break\n if timeout is not None:\n time_elapsed = time.time() - start\n if time_elapsed > timeout:\n raise socket.timeout\n self._settimeout(timeout - time_elapsed)\n\n data = self._recv(4096)\n self._log_recv(data, False)\n self.buf += data\n\n if not data:\n if raise_eof:\n raise NetcatError(\"Connection dropped!\")\n cut_at = len(self.buf)\n break\n\n except KeyboardInterrupt:\n self._print_header('\\n======== Connection interrupted! ========')\n raise\n except socket.timeout:\n self.timed_out = True\n if self._raise_timeout:\n raise NetcatTimeout()\n return b''\n except socket.error as exc:\n raise NetcatError('Socket error: %r' % exc)\n\n self._settimeout(self._timeout)\n\n ret = self.buf[:cut_at]\n self.buf = self.buf[cut_at:]\n self._log_recv(ret, True)\n return ret\n"
] |
class Netcat(object):
"""
This is the main class you will use to interact with a peer over the
network! You may instanciate this class to either connect to a server or
listen for a one-off client.
One of the following must be passed in order to initialize a Netcat
object:
:param connect: the address/port to connect to
:param listen: the address/port to bind to for listening
:param sock: a python socket or pipe object to wrap
For ``connect`` and ``listen``, they accept basically any argument format
known to mankind. If you find an input format you think would be useful but
isn't accepted, let me know :P
Additionally, the following options modify the behavior of the object:
:param sock_send: If this is specified, this Netcat object will act
as a multiplexer/demultiplexer, using the "normal"
channel for receiving and this channel for sending.
This should be specified as a python socket or pipe
object.
.. warning:: Using ``sock_send`` will cause issues if
you pass this object into a context which
expects to be able to use its
``.fileno()``.
:param udp: Set to True to use udp connections when using the
connect or listen parameters
:param ipv6: Force using ipv6 when using the connect or listen
parameters
:param verbose: Set to True to log data sent/received. The echo_*
properties on this object can be tweaked to
describe exactly what you want logged.
:param log_send: Pass a file-like object open for writing and all
data sent over the socket will be written to it.
:param log_recv: Pass a file-like object open for writing and all
data recieved from the socket will be written to it.
:param raise_timeout:
Whether to raise a NetcatTimeout exception when a
timeout is received. The default is to return the
empty string and set self.timed_out = True
:param retry: Whether to continuously retry establishing a
connection if it fails.
:param log_yield: Control when logging messages are generated on
recv. By default, logging is done when data is
received from the socket, and may be buffered.
By setting this to true, logging is done when data
is yielded to the user, either directly from the
socket or from a buffer.
Any data that is extracted from the target address will override the
options specified here. For example, a url with the ``http:// scheme``
will go over tcp and port 80.
Some properties that may be tweaked to change the logging behavior:
- nc.echo_headers controls whether to print a header describing each
network operation before the data (True)
- nc.echo_perline controls whether the data should be split on newlines
for logging (True)
- nc.echo_sending controls whether to log data on send (True)
- nc.echo_recving controls whether to log data on recv (True)
- nc.echo_hex controls whether to log data hex-encoded (False)
- nc.echo_send_prefix controls a prefix to print before each logged
line of sent data ('>> ')
- nc.echo_recv_prefix controls a prefix to print before each logged
line of received data ('<< ')
Note that these settings ONLY affect the console logging triggered by
the verbose parameter. They don't do anything to the logging triggered
by `log_send` and `log_recv`, which are meant to provide pristine
untouched records of network traffic.
*Example 1:* Send a greeting to a UDP server listening at 192.168.3.6:8888
and log the response as hex:
>>> nc = nclib.Netcat(('192.168.3.6', 8888), udp=True, verbose=True)
>>> nc.echo_hex = True
>>> nc.send(b'\\x00\\x0dHello, world!')
======== Sending (15) ========
>> 00 0D 48 65 6C 6C 6F 2C 20 77 6F 72 6C 64 21 |..Hello, world! |
>>> nc.recv()
======== Receiving 4096B or until timeout (default) ========
<< 00 57 68 65 6C 6C 6F 20 66 72 69 65 6E 64 2E 20 |.Whello friend. |
<< 74 69 6D 65 20 69 73 20 73 68 6F 72 74 2E 20 70 |time is short. p|
<< 6C 65 61 73 65 20 64 6F 20 6E 6F 74 20 77 6F 72 |lease do not wor|
<< 72 79 2C 20 79 6F 75 20 77 69 6C 6C 20 66 69 6E |ry, you will fin|
<< 64 20 79 6F 75 72 20 77 61 79 2E 20 62 75 74 20 |d your way. but |
<< 64 6F 20 68 75 72 72 79 2E |do hurry. |
*Example 2:* Listen for a local TCP connection on port 1234, allow the user
to interact with the client. Log the entire interaction to log.txt.
>>> logfile = open('log.txt', 'wb')
>>> nc = nclib.Netcat(listen=('localhost', 1234), log_send=logfile, log_recv=logfile)
>>> nc.interact()
"""
def __init__(self,
connect=None,
sock=None,
listen=None,
server=None,
sock_send=None,
udp=False,
ipv6=False,
verbose=0,
log_send=None,
log_recv=None,
raise_timeout=False,
retry=False,
log_yield=False):
self.buf = b''
self.verbose = verbose
self.log_send = log_send
self.log_recv = log_recv
self.log_yield = log_yield
self.echo_headers = True
self.echo_perline = True
self.echo_sending = True
self.echo_recving = True
self.echo_hex = False
self.echo_send_prefix = '>> '
self.echo_recv_prefix = '<< '
self.sock = None
self._sock_send = sock_send
self.peer = None
# case: Netcat(host, port)
if isinstance(connect, str) and isinstance(listen, int):
connect = (connect, listen)
# case: Netcat(sock)
if isinstance(connect, socket.socket):
sock = connect
connect = None
# deprecated server kwarg
if server is not None:
connect = server
if sock is None and listen is None and connect is None:
raise ValueError('Not enough arguments, need at least an '
'address or a socket or a listening address!')
## we support passing connect as the "name" of the socket
#if sock is not None and (listen is not None or connect is not None):
# raise ValueError("connect or listen arguments may not be "
# "provided if sock is provided")
if listen is not None and connect is not None:
raise ValueError("connect and listen arguments cannot be provided at the same time")
if sock is None:
if listen is not None:
target = listen
listen = True
else:
target = connect
listen = False
target, listen, udp, ipv6 = self._parse_target(target, listen, udp, ipv6)
self._connect(target, listen, udp, ipv6, retry)
else:
self.sock = sock
self.peer = connect
try:
self._timeout = self.sock.gettimeout()
except AttributeError:
self._timeout = None
self.timed_out = False # set when an operation times out
self._raise_timeout = raise_timeout
@property
def sock_send(self):
if self._sock_send is None:
return self.sock
else:
return self._sock_send
@sock_send.setter
def sock_send(self, val):
self._sock_send = val
@staticmethod
def _parse_target(target, listen, udp, ipv6):
"""
Takes the basic version of the user args and extract as much data as
possible from target. Returns a tuple that is its arguments but
sanitized.
"""
if isinstance(target, str):
if target.startswith('nc '):
out_host = None
out_port = None
try:
opts, pieces = getopt.getopt(target.split()[1:], 'u46lp:',
[])
except getopt.GetoptError as exc:
raise ValueError(exc)
for opt, arg in opts:
if opt == '-u':
udp = True
elif opt == '-4':
ipv6 = False
elif opt == '-6':
ipv6 = True
elif opt == '-l':
listen = True
elif opt == '-p':
out_port = int(arg)
else:
assert False, "unhandled option"
if not pieces:
pass
elif len(pieces) == 1:
if listen and pieces[0].isdigit():
out_port = int(pieces[0])
else:
out_host = pieces[0]
elif len(pieces) == 2 and pieces[1].isdigit():
out_host = pieces[0]
out_port = int(pieces[1])
else:
raise ValueError("Bad cmdline: %s" % target)
if out_host is None:
if listen:
out_host = '::' if ipv6 else '0.0.0.0'
else:
raise ValueError("Missing address: %s" % target)
if out_port is None:
raise ValueError("Missing port: %s" % target)
if _is_ipv6_addr(out_host):
ipv6 = True
return (out_host, out_port), listen, udp, ipv6
elif PROTOCAL_RE.match(target) is not None:
parsed = urlparse(target)
port = None
try:
scheme_udp, scheme_ipv6, scheme_port = KNOWN_SCHEMES[parsed.scheme]
except KeyError:
raise ValueError("Unknown scheme: %s" % parsed.scheme)
if scheme_udp is not None:
udp = scheme_udp
if scheme_ipv6 is not None:
ipv6 = scheme_ipv6
if scheme_port is not None:
port = scheme_port
if parsed.netloc.startswith('['):
addr, extra = parsed.netloc[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
if ':' in parsed.netloc:
addr, port = parsed.netloc.split(':', 1)
port = int(port)
else:
addr = parsed.netloc
if addr is None or port is None:
raise ValueError("Can't parse addr/port from %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
else:
if target.startswith('['):
addr, extra = target[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
port = None
else:
if ':' in target:
addr, port = target.split(':', 1)
port = int(port)
else:
addr = target
port = None
if port is None:
raise ValueError("No port given: %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
elif isinstance(target, (int, long)):
if listen:
out_port = target
else:
raise ValueError("Can't deal with number as connection address")
return ('::' if ipv6 else '0.0.0.0', out_port), listen, udp, ipv6
elif isinstance(target, tuple):
if len(target) >= 1 and isinstance(target[0], str) and _is_ipv6_addr(target[0]):
ipv6 = True
return target, listen, udp, ipv6
else:
raise ValueError("Can't parse target: %r" % target)
def _connect(self, target, listen, udp, ipv6, retry):
"""
Takes target/listen/udp/ipv6 and sets self.sock and self.peer
"""
ty = socket.SOCK_DGRAM if udp else socket.SOCK_STREAM
fam = socket.AF_INET6 if ipv6 else socket.AF_INET
self.sock = socket.socket(fam, ty)
if listen:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(target)
if not udp:
self.sock.listen(1)
conn, addr = self.sock.accept()
self.sock.close()
self.sock = conn
self.peer = addr
else:
self.buf, self.peer = self.sock.recvfrom(1024)
self.sock.connect(self.peer)
self._log_recv(self.buf, False)
if self.verbose:
self._print_verbose('Connection from %s accepted' % str(self.peer))
else:
while True:
try:
self.sock.connect(target)
except (socket.gaierror, socket.herror) as exc:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
except socket.error as exc:
if retry:
time.sleep(0.2)
else:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
else:
break
self.peer = target
def close(self):
"""
Close the socket.
"""
if self._sock_send is not None:
self._sock_send.close()
return self.sock.close()
# inconsistent between sockets and files. support both
@property
def closed(self):
return self._closed
@property
def _closed(self):
if hasattr(self.sock_send, 'closed'):
return self.sock_send.closed
elif hasattr(self.sock_send, '_closed'):
return self.sock_send._closed
else:
return False # ???
def shutdown(self, how=socket.SHUT_RDWR):
"""
Send a shutdown signal for both reading and writing, or whatever
socket.SHUT_* constant you like.
Shutdown differs from closing in that it explicitly changes the state of
the socket resource to closed, whereas closing will only decrement the
number of peers on this end of the socket, since sockets can be a
resource shared by multiple peers on a single OS. When the number of
peers reaches zero, the socket is closed, but not deallocated, so you
still need to call close. (except that this is python and close is
automatically called on the deletion of the socket)
http://stackoverflow.com/questions/409783/socket-shutdown-vs-socket-close
"""
if self._sock_send is not None:
self._sock_send.shutdown(how)
return self.sock.shutdown(how)
def shutdown_rd(self):
"""
Send a shutdown signal for reading - you may no longer read from this
socket.
"""
if self._sock_send is not None:
self.sock.close()
else:
return self.shutdown(socket.SHUT_RD)
def shutdown_wr(self):
"""
Send a shutdown signal for writing - you may no longer write to this
socket.
"""
if self._sock_send is not None:
self._sock_send.close()
else:
return self.shutdown(socket.SHUT_WR)
def fileno(self):
"""
Return the file descriptor associated with this socket
"""
if self._sock_send is not None:
raise UserWarning("Calling fileno when there are in fact two filenos")
return self.sock.fileno()
def _print_verbose(self, s):
assert isinstance(s, str), "s should be str"
sys.stdout.write(s + '\n')
def _print_header(self, header):
if self.verbose and self.echo_headers:
self._print_verbose(header)
def _print_recv_header(self, fmt, timeout, *args):
if self.verbose and self.echo_headers:
if timeout == 'default':
timeout = self._timeout
if timeout is not None:
timeout_text = ' or until timeout ({0})'.format(timeout)
else:
timeout_text = ''
self._print_verbose(fmt.format(*args, timeout_text=timeout_text))
def _log_something(self, data, prefix):
if self.echo_perline:
if self.echo_hex:
self._print_hex_lines(data, prefix)
else:
self._print_lines(data, prefix)
else:
if self.echo_hex:
if hasattr(data, 'hex'):
self._print_verbose(prefix + data.hex())
else:
self._print_verbose(prefix + data.encode('hex'))
else:
self._print_verbose(prefix + str(data))
def _log_recv(self, data, yielding):
if yielding == self.log_yield:
if self.verbose and self.echo_recving:
self._log_something(data, self.echo_recv_prefix)
if self.log_recv:
self.log_recv.write(data)
def _log_send(self, data):
if self.verbose and self.echo_sending:
self._log_something(data, self.echo_send_prefix)
if self.log_send:
self.log_send.write(data)
def _print_lines(self, s, prefix):
for line in s.split(b'\n'):
self._print_verbose(prefix + str(line))
@staticmethod
def _to_spaced_hex(s):
if isinstance(s, str):
return ' '.join('%02X' % ord(a) for a in s)
if isinstance(s, bytes):
return ' '.join('%02X' % a for a in s)
raise TypeError('expected str or bytes instance')
@staticmethod
def _to_printable_str(s):
if isinstance(s, str):
return ''.join(a if ' ' <= a <= '~' else '.' for a in s)
if isinstance(s, bytes):
return ''.join(chr(a) if ord(' ') <= a <= ord('~') else '.' for a in s)
raise TypeError('expected str or bytes instance')
def _print_hex_lines(self, s, prefix):
for i in range(0, len(s), 16):
block = s[i:i+16]
spaced_hex = self._to_spaced_hex(block)
printable_str = self._to_printable_str(block)
self._print_verbose('%s%-47s |%-16s|' % (prefix, spaced_hex, printable_str))
def settimeout(self, timeout):
"""
Set the default timeout in seconds to use for subsequent socket
operations
"""
self._timeout = timeout
self._settimeout(timeout)
def _send(self, data):
if hasattr(self.sock_send, 'send'):
return self.sock_send.send(data)
elif hasattr(self.sock_send, 'write'):
return self.sock_send.write(data) # pylint: disable=no-member
else:
raise ValueError("I don't know how to write to this stream!")
def _recv(self, size):
if hasattr(self.sock, 'recv'):
return self.sock.recv(size)
elif hasattr(self.sock, 'read'):
return self.sock.read(size) # pylint: disable=no-member
else:
raise ValueError("I don't know how to read from this stream!")
def _recv_predicate(self, predicate, timeout='default', raise_eof=True):
"""
Receive until predicate returns a positive integer.
The returned number is the size to return.
"""
if timeout == 'default':
timeout = self._timeout
self.timed_out = False
start = time.time()
try:
while True:
cut_at = predicate(self.buf)
if cut_at > 0:
break
if timeout is not None:
time_elapsed = time.time() - start
if time_elapsed > timeout:
raise socket.timeout
self._settimeout(timeout - time_elapsed)
data = self._recv(4096)
self._log_recv(data, False)
self.buf += data
if not data:
if raise_eof:
raise NetcatError("Connection dropped!")
cut_at = len(self.buf)
break
except KeyboardInterrupt:
self._print_header('\n======== Connection interrupted! ========')
raise
except socket.timeout:
self.timed_out = True
if self._raise_timeout:
raise NetcatTimeout()
return b''
except socket.error as exc:
raise NetcatError('Socket error: %r' % exc)
self._settimeout(self._timeout)
ret = self.buf[:cut_at]
self.buf = self.buf[cut_at:]
self._log_recv(ret, True)
return ret
def _settimeout(self, timeout):
"""
Internal method - catches failures when working with non-timeoutable
streams, like files
"""
try:
self.sock.settimeout(timeout)
except AttributeError:
pass
def gettimeout(self):
"""
Retrieve the timeout currently associated with the socket
"""
return self._timeout
def flush(self):
# no buffering
pass
def recv(self, n=4096, timeout='default'):
"""
Receive at most n bytes (default 4096) from the socket
Aliases: read, get
"""
self._print_recv_header(
'======== Receiving {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: min(n, len(s)), timeout)
def recv_until(self, s, max_size=None, timeout='default'):
"""
Recieve data from the socket until the given substring is observed.
Data in the same datagram as the substring, following the substring,
will not be returned and will be cached for future receives.
Aliases: read_until, readuntil, recvuntil
"""
self._print_recv_header(
'======== Receiving until {0}{timeout_text} ========', timeout, repr(s))
if max_size is None:
max_size = 2 ** 62
def _predicate(buf):
try:
return min(buf.index(s) + len(s), max_size)
except ValueError:
return 0 if len(buf) < max_size else max_size
return self._recv_predicate(_predicate, timeout)
def recv_all(self, timeout='default'):
"""
Return all data recieved until connection closes.
Aliases: read_all, readall, recvall
"""
self._print_recv_header('======== Receiving until close{timeout_text} ========', timeout)
return self._recv_predicate(lambda s: 0, timeout, raise_eof=False)
def send(self, s):
"""
Sends all the given data to the socket.
Aliases: write, put, sendall, send_all
"""
self._print_header('======== Sending ({0}) ========'.format(len(s)))
self._log_send(s)
out = len(s)
while s:
s = s[self._send(s):]
return out
def interact(self, insock=sys.stdin, outsock=sys.stdout):
"""
Connects the socket to the terminal for user interaction.
Alternate input and output files may be specified.
This method cannot be used with a timeout.
Aliases: interactive, interaction
"""
self._print_header('======== Beginning interactive session ========')
if hasattr(outsock, 'buffer'):
outsock = outsock.buffer # pylint: disable=no-member
self.timed_out = False
save_verbose = self.verbose
self.verbose = 0
try:
if self.buf:
outsock.write(self.buf)
outsock.flush()
self.buf = b''
while True:
readable_socks = select(self.sock, insock)
for readable in readable_socks:
if readable is insock:
data = os.read(insock.fileno(), 4096)
self.send(data)
if not data:
raise NetcatError
else:
data = self.recv(timeout=None)
outsock.write(data)
outsock.flush()
if not data:
raise NetcatError
except KeyboardInterrupt:
self.verbose = save_verbose
self._print_header('\n======== Connection interrupted! ========')
raise
except (socket.error, NetcatError):
self.verbose = save_verbose
self._print_header('\n======== Connection dropped! ========')
finally:
self.verbose = save_verbose
LINE_ENDING = b'\n'
def recv_line(self, max_size=None, timeout='default', ending=None):
"""
Recieve until the next newline , default "\\n". The newline string can
be changed by changing ``nc.LINE_ENDING``. The newline will be returned
as part of the string.
Aliases: recvline, readline, read_line, readln, recvln
"""
if ending is None:
ending = self.LINE_ENDING
return self.recv_until(ending, max_size, timeout)
def send_line(self, line, ending=None):
"""
Write the string to the wire, followed by a newline. The newline string
can be changed by changing ``nc.LINE_ENDING``.
Aliases: sendline, writeline, write_line, writeln, sendln
"""
if ending is None:
ending = self.LINE_ENDING
return self.send(line + ending)
read = recv
get = recv
write = send
put = send
sendall = send
send_all = send
read_until = recv_until
readuntil = recv_until
recvuntil = recv_until
read_all = recv_all
readall = recv_all
recvall = recv_all
read_exactly = recv_exactly
readexactly = recv_exactly
recvexactly = recv_exactly
interactive = interact
ineraction = interact
recvline = recv_line
readline = recv_line
read_line = recv_line
readln = recv_line
recvln = recv_line
sendline = send_line
writeline = send_line
write_line = send_line
writeln = send_line
sendln = send_line
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat.send
|
python
|
def send(self, s):
self._print_header('======== Sending ({0}) ========'.format(len(s)))
self._log_send(s)
out = len(s)
while s:
s = s[self._send(s):]
return out
|
Sends all the given data to the socket.
Aliases: write, put, sendall, send_all
|
train
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L697-L710
|
[
"def _print_header(self, header):\n if self.verbose and self.echo_headers:\n self._print_verbose(header)\n",
"def _log_send(self, data):\n if self.verbose and self.echo_sending:\n self._log_something(data, self.echo_send_prefix)\n if self.log_send:\n self.log_send.write(data)\n",
"def _send(self, data):\n if hasattr(self.sock_send, 'send'):\n return self.sock_send.send(data)\n elif hasattr(self.sock_send, 'write'):\n return self.sock_send.write(data) # pylint: disable=no-member\n else:\n raise ValueError(\"I don't know how to write to this stream!\")\n"
] |
class Netcat(object):
"""
This is the main class you will use to interact with a peer over the
network! You may instanciate this class to either connect to a server or
listen for a one-off client.
One of the following must be passed in order to initialize a Netcat
object:
:param connect: the address/port to connect to
:param listen: the address/port to bind to for listening
:param sock: a python socket or pipe object to wrap
For ``connect`` and ``listen``, they accept basically any argument format
known to mankind. If you find an input format you think would be useful but
isn't accepted, let me know :P
Additionally, the following options modify the behavior of the object:
:param sock_send: If this is specified, this Netcat object will act
as a multiplexer/demultiplexer, using the "normal"
channel for receiving and this channel for sending.
This should be specified as a python socket or pipe
object.
.. warning:: Using ``sock_send`` will cause issues if
you pass this object into a context which
expects to be able to use its
``.fileno()``.
:param udp: Set to True to use udp connections when using the
connect or listen parameters
:param ipv6: Force using ipv6 when using the connect or listen
parameters
:param verbose: Set to True to log data sent/received. The echo_*
properties on this object can be tweaked to
describe exactly what you want logged.
:param log_send: Pass a file-like object open for writing and all
data sent over the socket will be written to it.
:param log_recv: Pass a file-like object open for writing and all
data recieved from the socket will be written to it.
:param raise_timeout:
Whether to raise a NetcatTimeout exception when a
timeout is received. The default is to return the
empty string and set self.timed_out = True
:param retry: Whether to continuously retry establishing a
connection if it fails.
:param log_yield: Control when logging messages are generated on
recv. By default, logging is done when data is
received from the socket, and may be buffered.
By setting this to true, logging is done when data
is yielded to the user, either directly from the
socket or from a buffer.
Any data that is extracted from the target address will override the
options specified here. For example, a url with the ``http:// scheme``
will go over tcp and port 80.
Some properties that may be tweaked to change the logging behavior:
- nc.echo_headers controls whether to print a header describing each
network operation before the data (True)
- nc.echo_perline controls whether the data should be split on newlines
for logging (True)
- nc.echo_sending controls whether to log data on send (True)
- nc.echo_recving controls whether to log data on recv (True)
- nc.echo_hex controls whether to log data hex-encoded (False)
- nc.echo_send_prefix controls a prefix to print before each logged
line of sent data ('>> ')
- nc.echo_recv_prefix controls a prefix to print before each logged
line of received data ('<< ')
Note that these settings ONLY affect the console logging triggered by
the verbose parameter. They don't do anything to the logging triggered
by `log_send` and `log_recv`, which are meant to provide pristine
untouched records of network traffic.
*Example 1:* Send a greeting to a UDP server listening at 192.168.3.6:8888
and log the response as hex:
>>> nc = nclib.Netcat(('192.168.3.6', 8888), udp=True, verbose=True)
>>> nc.echo_hex = True
>>> nc.send(b'\\x00\\x0dHello, world!')
======== Sending (15) ========
>> 00 0D 48 65 6C 6C 6F 2C 20 77 6F 72 6C 64 21 |..Hello, world! |
>>> nc.recv()
======== Receiving 4096B or until timeout (default) ========
<< 00 57 68 65 6C 6C 6F 20 66 72 69 65 6E 64 2E 20 |.Whello friend. |
<< 74 69 6D 65 20 69 73 20 73 68 6F 72 74 2E 20 70 |time is short. p|
<< 6C 65 61 73 65 20 64 6F 20 6E 6F 74 20 77 6F 72 |lease do not wor|
<< 72 79 2C 20 79 6F 75 20 77 69 6C 6C 20 66 69 6E |ry, you will fin|
<< 64 20 79 6F 75 72 20 77 61 79 2E 20 62 75 74 20 |d your way. but |
<< 64 6F 20 68 75 72 72 79 2E |do hurry. |
*Example 2:* Listen for a local TCP connection on port 1234, allow the user
to interact with the client. Log the entire interaction to log.txt.
>>> logfile = open('log.txt', 'wb')
>>> nc = nclib.Netcat(listen=('localhost', 1234), log_send=logfile, log_recv=logfile)
>>> nc.interact()
"""
def __init__(self,
connect=None,
sock=None,
listen=None,
server=None,
sock_send=None,
udp=False,
ipv6=False,
verbose=0,
log_send=None,
log_recv=None,
raise_timeout=False,
retry=False,
log_yield=False):
self.buf = b''
self.verbose = verbose
self.log_send = log_send
self.log_recv = log_recv
self.log_yield = log_yield
self.echo_headers = True
self.echo_perline = True
self.echo_sending = True
self.echo_recving = True
self.echo_hex = False
self.echo_send_prefix = '>> '
self.echo_recv_prefix = '<< '
self.sock = None
self._sock_send = sock_send
self.peer = None
# case: Netcat(host, port)
if isinstance(connect, str) and isinstance(listen, int):
connect = (connect, listen)
# case: Netcat(sock)
if isinstance(connect, socket.socket):
sock = connect
connect = None
# deprecated server kwarg
if server is not None:
connect = server
if sock is None and listen is None and connect is None:
raise ValueError('Not enough arguments, need at least an '
'address or a socket or a listening address!')
## we support passing connect as the "name" of the socket
#if sock is not None and (listen is not None or connect is not None):
# raise ValueError("connect or listen arguments may not be "
# "provided if sock is provided")
if listen is not None and connect is not None:
raise ValueError("connect and listen arguments cannot be provided at the same time")
if sock is None:
if listen is not None:
target = listen
listen = True
else:
target = connect
listen = False
target, listen, udp, ipv6 = self._parse_target(target, listen, udp, ipv6)
self._connect(target, listen, udp, ipv6, retry)
else:
self.sock = sock
self.peer = connect
try:
self._timeout = self.sock.gettimeout()
except AttributeError:
self._timeout = None
self.timed_out = False # set when an operation times out
self._raise_timeout = raise_timeout
@property
def sock_send(self):
if self._sock_send is None:
return self.sock
else:
return self._sock_send
@sock_send.setter
def sock_send(self, val):
self._sock_send = val
@staticmethod
def _parse_target(target, listen, udp, ipv6):
"""
Takes the basic version of the user args and extract as much data as
possible from target. Returns a tuple that is its arguments but
sanitized.
"""
if isinstance(target, str):
if target.startswith('nc '):
out_host = None
out_port = None
try:
opts, pieces = getopt.getopt(target.split()[1:], 'u46lp:',
[])
except getopt.GetoptError as exc:
raise ValueError(exc)
for opt, arg in opts:
if opt == '-u':
udp = True
elif opt == '-4':
ipv6 = False
elif opt == '-6':
ipv6 = True
elif opt == '-l':
listen = True
elif opt == '-p':
out_port = int(arg)
else:
assert False, "unhandled option"
if not pieces:
pass
elif len(pieces) == 1:
if listen and pieces[0].isdigit():
out_port = int(pieces[0])
else:
out_host = pieces[0]
elif len(pieces) == 2 and pieces[1].isdigit():
out_host = pieces[0]
out_port = int(pieces[1])
else:
raise ValueError("Bad cmdline: %s" % target)
if out_host is None:
if listen:
out_host = '::' if ipv6 else '0.0.0.0'
else:
raise ValueError("Missing address: %s" % target)
if out_port is None:
raise ValueError("Missing port: %s" % target)
if _is_ipv6_addr(out_host):
ipv6 = True
return (out_host, out_port), listen, udp, ipv6
elif PROTOCAL_RE.match(target) is not None:
parsed = urlparse(target)
port = None
try:
scheme_udp, scheme_ipv6, scheme_port = KNOWN_SCHEMES[parsed.scheme]
except KeyError:
raise ValueError("Unknown scheme: %s" % parsed.scheme)
if scheme_udp is not None:
udp = scheme_udp
if scheme_ipv6 is not None:
ipv6 = scheme_ipv6
if scheme_port is not None:
port = scheme_port
if parsed.netloc.startswith('['):
addr, extra = parsed.netloc[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
if ':' in parsed.netloc:
addr, port = parsed.netloc.split(':', 1)
port = int(port)
else:
addr = parsed.netloc
if addr is None or port is None:
raise ValueError("Can't parse addr/port from %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
else:
if target.startswith('['):
addr, extra = target[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
port = None
else:
if ':' in target:
addr, port = target.split(':', 1)
port = int(port)
else:
addr = target
port = None
if port is None:
raise ValueError("No port given: %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
elif isinstance(target, (int, long)):
if listen:
out_port = target
else:
raise ValueError("Can't deal with number as connection address")
return ('::' if ipv6 else '0.0.0.0', out_port), listen, udp, ipv6
elif isinstance(target, tuple):
if len(target) >= 1 and isinstance(target[0], str) and _is_ipv6_addr(target[0]):
ipv6 = True
return target, listen, udp, ipv6
else:
raise ValueError("Can't parse target: %r" % target)
def _connect(self, target, listen, udp, ipv6, retry):
"""
Takes target/listen/udp/ipv6 and sets self.sock and self.peer
"""
ty = socket.SOCK_DGRAM if udp else socket.SOCK_STREAM
fam = socket.AF_INET6 if ipv6 else socket.AF_INET
self.sock = socket.socket(fam, ty)
if listen:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(target)
if not udp:
self.sock.listen(1)
conn, addr = self.sock.accept()
self.sock.close()
self.sock = conn
self.peer = addr
else:
self.buf, self.peer = self.sock.recvfrom(1024)
self.sock.connect(self.peer)
self._log_recv(self.buf, False)
if self.verbose:
self._print_verbose('Connection from %s accepted' % str(self.peer))
else:
while True:
try:
self.sock.connect(target)
except (socket.gaierror, socket.herror) as exc:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
except socket.error as exc:
if retry:
time.sleep(0.2)
else:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
else:
break
self.peer = target
def close(self):
"""
Close the socket.
"""
if self._sock_send is not None:
self._sock_send.close()
return self.sock.close()
# inconsistent between sockets and files. support both
@property
def closed(self):
return self._closed
@property
def _closed(self):
if hasattr(self.sock_send, 'closed'):
return self.sock_send.closed
elif hasattr(self.sock_send, '_closed'):
return self.sock_send._closed
else:
return False # ???
def shutdown(self, how=socket.SHUT_RDWR):
"""
Send a shutdown signal for both reading and writing, or whatever
socket.SHUT_* constant you like.
Shutdown differs from closing in that it explicitly changes the state of
the socket resource to closed, whereas closing will only decrement the
number of peers on this end of the socket, since sockets can be a
resource shared by multiple peers on a single OS. When the number of
peers reaches zero, the socket is closed, but not deallocated, so you
still need to call close. (except that this is python and close is
automatically called on the deletion of the socket)
http://stackoverflow.com/questions/409783/socket-shutdown-vs-socket-close
"""
if self._sock_send is not None:
self._sock_send.shutdown(how)
return self.sock.shutdown(how)
def shutdown_rd(self):
"""
Send a shutdown signal for reading - you may no longer read from this
socket.
"""
if self._sock_send is not None:
self.sock.close()
else:
return self.shutdown(socket.SHUT_RD)
def shutdown_wr(self):
"""
Send a shutdown signal for writing - you may no longer write to this
socket.
"""
if self._sock_send is not None:
self._sock_send.close()
else:
return self.shutdown(socket.SHUT_WR)
def fileno(self):
"""
Return the file descriptor associated with this socket
"""
if self._sock_send is not None:
raise UserWarning("Calling fileno when there are in fact two filenos")
return self.sock.fileno()
def _print_verbose(self, s):
assert isinstance(s, str), "s should be str"
sys.stdout.write(s + '\n')
def _print_header(self, header):
if self.verbose and self.echo_headers:
self._print_verbose(header)
def _print_recv_header(self, fmt, timeout, *args):
if self.verbose and self.echo_headers:
if timeout == 'default':
timeout = self._timeout
if timeout is not None:
timeout_text = ' or until timeout ({0})'.format(timeout)
else:
timeout_text = ''
self._print_verbose(fmt.format(*args, timeout_text=timeout_text))
def _log_something(self, data, prefix):
if self.echo_perline:
if self.echo_hex:
self._print_hex_lines(data, prefix)
else:
self._print_lines(data, prefix)
else:
if self.echo_hex:
if hasattr(data, 'hex'):
self._print_verbose(prefix + data.hex())
else:
self._print_verbose(prefix + data.encode('hex'))
else:
self._print_verbose(prefix + str(data))
def _log_recv(self, data, yielding):
if yielding == self.log_yield:
if self.verbose and self.echo_recving:
self._log_something(data, self.echo_recv_prefix)
if self.log_recv:
self.log_recv.write(data)
def _log_send(self, data):
if self.verbose and self.echo_sending:
self._log_something(data, self.echo_send_prefix)
if self.log_send:
self.log_send.write(data)
def _print_lines(self, s, prefix):
for line in s.split(b'\n'):
self._print_verbose(prefix + str(line))
@staticmethod
def _to_spaced_hex(s):
if isinstance(s, str):
return ' '.join('%02X' % ord(a) for a in s)
if isinstance(s, bytes):
return ' '.join('%02X' % a for a in s)
raise TypeError('expected str or bytes instance')
@staticmethod
def _to_printable_str(s):
if isinstance(s, str):
return ''.join(a if ' ' <= a <= '~' else '.' for a in s)
if isinstance(s, bytes):
return ''.join(chr(a) if ord(' ') <= a <= ord('~') else '.' for a in s)
raise TypeError('expected str or bytes instance')
def _print_hex_lines(self, s, prefix):
for i in range(0, len(s), 16):
block = s[i:i+16]
spaced_hex = self._to_spaced_hex(block)
printable_str = self._to_printable_str(block)
self._print_verbose('%s%-47s |%-16s|' % (prefix, spaced_hex, printable_str))
def settimeout(self, timeout):
"""
Set the default timeout in seconds to use for subsequent socket
operations
"""
self._timeout = timeout
self._settimeout(timeout)
def _send(self, data):
if hasattr(self.sock_send, 'send'):
return self.sock_send.send(data)
elif hasattr(self.sock_send, 'write'):
return self.sock_send.write(data) # pylint: disable=no-member
else:
raise ValueError("I don't know how to write to this stream!")
def _recv(self, size):
if hasattr(self.sock, 'recv'):
return self.sock.recv(size)
elif hasattr(self.sock, 'read'):
return self.sock.read(size) # pylint: disable=no-member
else:
raise ValueError("I don't know how to read from this stream!")
def _recv_predicate(self, predicate, timeout='default', raise_eof=True):
"""
Receive until predicate returns a positive integer.
The returned number is the size to return.
"""
if timeout == 'default':
timeout = self._timeout
self.timed_out = False
start = time.time()
try:
while True:
cut_at = predicate(self.buf)
if cut_at > 0:
break
if timeout is not None:
time_elapsed = time.time() - start
if time_elapsed > timeout:
raise socket.timeout
self._settimeout(timeout - time_elapsed)
data = self._recv(4096)
self._log_recv(data, False)
self.buf += data
if not data:
if raise_eof:
raise NetcatError("Connection dropped!")
cut_at = len(self.buf)
break
except KeyboardInterrupt:
self._print_header('\n======== Connection interrupted! ========')
raise
except socket.timeout:
self.timed_out = True
if self._raise_timeout:
raise NetcatTimeout()
return b''
except socket.error as exc:
raise NetcatError('Socket error: %r' % exc)
self._settimeout(self._timeout)
ret = self.buf[:cut_at]
self.buf = self.buf[cut_at:]
self._log_recv(ret, True)
return ret
def _settimeout(self, timeout):
"""
Internal method - catches failures when working with non-timeoutable
streams, like files
"""
try:
self.sock.settimeout(timeout)
except AttributeError:
pass
def gettimeout(self):
"""
Retrieve the timeout currently associated with the socket
"""
return self._timeout
def flush(self):
# no buffering
pass
def recv(self, n=4096, timeout='default'):
"""
Receive at most n bytes (default 4096) from the socket
Aliases: read, get
"""
self._print_recv_header(
'======== Receiving {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: min(n, len(s)), timeout)
def recv_until(self, s, max_size=None, timeout='default'):
"""
Recieve data from the socket until the given substring is observed.
Data in the same datagram as the substring, following the substring,
will not be returned and will be cached for future receives.
Aliases: read_until, readuntil, recvuntil
"""
self._print_recv_header(
'======== Receiving until {0}{timeout_text} ========', timeout, repr(s))
if max_size is None:
max_size = 2 ** 62
def _predicate(buf):
try:
return min(buf.index(s) + len(s), max_size)
except ValueError:
return 0 if len(buf) < max_size else max_size
return self._recv_predicate(_predicate, timeout)
def recv_all(self, timeout='default'):
"""
Return all data recieved until connection closes.
Aliases: read_all, readall, recvall
"""
self._print_recv_header('======== Receiving until close{timeout_text} ========', timeout)
return self._recv_predicate(lambda s: 0, timeout, raise_eof=False)
def recv_exactly(self, n, timeout='default'):
"""
Recieve exactly n bytes
Aliases: read_exactly, readexactly, recvexactly
"""
self._print_recv_header(
'======== Receiving until exactly {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: n if len(s) >= n else 0, timeout)
def interact(self, insock=sys.stdin, outsock=sys.stdout):
"""
Connects the socket to the terminal for user interaction.
Alternate input and output files may be specified.
This method cannot be used with a timeout.
Aliases: interactive, interaction
"""
self._print_header('======== Beginning interactive session ========')
if hasattr(outsock, 'buffer'):
outsock = outsock.buffer # pylint: disable=no-member
self.timed_out = False
save_verbose = self.verbose
self.verbose = 0
try:
if self.buf:
outsock.write(self.buf)
outsock.flush()
self.buf = b''
while True:
readable_socks = select(self.sock, insock)
for readable in readable_socks:
if readable is insock:
data = os.read(insock.fileno(), 4096)
self.send(data)
if not data:
raise NetcatError
else:
data = self.recv(timeout=None)
outsock.write(data)
outsock.flush()
if not data:
raise NetcatError
except KeyboardInterrupt:
self.verbose = save_verbose
self._print_header('\n======== Connection interrupted! ========')
raise
except (socket.error, NetcatError):
self.verbose = save_verbose
self._print_header('\n======== Connection dropped! ========')
finally:
self.verbose = save_verbose
LINE_ENDING = b'\n'
def recv_line(self, max_size=None, timeout='default', ending=None):
"""
Recieve until the next newline , default "\\n". The newline string can
be changed by changing ``nc.LINE_ENDING``. The newline will be returned
as part of the string.
Aliases: recvline, readline, read_line, readln, recvln
"""
if ending is None:
ending = self.LINE_ENDING
return self.recv_until(ending, max_size, timeout)
def send_line(self, line, ending=None):
"""
Write the string to the wire, followed by a newline. The newline string
can be changed by changing ``nc.LINE_ENDING``.
Aliases: sendline, writeline, write_line, writeln, sendln
"""
if ending is None:
ending = self.LINE_ENDING
return self.send(line + ending)
read = recv
get = recv
write = send
put = send
sendall = send
send_all = send
read_until = recv_until
readuntil = recv_until
recvuntil = recv_until
read_all = recv_all
readall = recv_all
recvall = recv_all
read_exactly = recv_exactly
readexactly = recv_exactly
recvexactly = recv_exactly
interactive = interact
ineraction = interact
recvline = recv_line
readline = recv_line
read_line = recv_line
readln = recv_line
recvln = recv_line
sendline = send_line
writeline = send_line
write_line = send_line
writeln = send_line
sendln = send_line
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat.interact
|
python
|
def interact(self, insock=sys.stdin, outsock=sys.stdout):
self._print_header('======== Beginning interactive session ========')
if hasattr(outsock, 'buffer'):
outsock = outsock.buffer # pylint: disable=no-member
self.timed_out = False
save_verbose = self.verbose
self.verbose = 0
try:
if self.buf:
outsock.write(self.buf)
outsock.flush()
self.buf = b''
while True:
readable_socks = select(self.sock, insock)
for readable in readable_socks:
if readable is insock:
data = os.read(insock.fileno(), 4096)
self.send(data)
if not data:
raise NetcatError
else:
data = self.recv(timeout=None)
outsock.write(data)
outsock.flush()
if not data:
raise NetcatError
except KeyboardInterrupt:
self.verbose = save_verbose
self._print_header('\n======== Connection interrupted! ========')
raise
except (socket.error, NetcatError):
self.verbose = save_verbose
self._print_header('\n======== Connection dropped! ========')
finally:
self.verbose = save_verbose
|
Connects the socket to the terminal for user interaction.
Alternate input and output files may be specified.
This method cannot be used with a timeout.
Aliases: interactive, interaction
|
train
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L712-L758
|
[
"def select(*args, **kwargs):\n timeout = kwargs.get('timeout', None)\n\n if len(args) == 1 and hasattr(args, '__iter__'):\n args = list(args[0])\n\n socks = flatten(args)\n\n out = []\n toselect = []\n for sock in socks:\n if type(sock) is Netcat and sock.buf:\n out.append(sock)\n else:\n toselect.append(sock)\n\n if not toselect:\n return out\n\n newgood = _select.select(toselect, [], [], 0)[0]\n\n # I really don't understand the below clause... past me what's up\n if out or newgood or timeout == 0:\n return out + newgood\n #if out or len(newgood) == len(toselect) or timeout == 0:\n # # the `out or` part is the reason we need this clause\n # return out + newgood\n\n toselect = [x for x in toselect if x not in newgood]\n out += newgood\n\n newgood = _select.select(toselect, [], [], timeout)[0]\n return out + newgood\n",
"def _print_header(self, header):\n if self.verbose and self.echo_headers:\n self._print_verbose(header)\n",
"def recv(self, n=4096, timeout='default'):\n \"\"\"\n Receive at most n bytes (default 4096) from the socket\n\n Aliases: read, get\n \"\"\"\n\n self._print_recv_header(\n '======== Receiving {0}B{timeout_text} ========', timeout, n)\n\n return self._recv_predicate(lambda s: min(n, len(s)), timeout)\n",
"def send(self, s):\n \"\"\"\n Sends all the given data to the socket.\n\n Aliases: write, put, sendall, send_all\n \"\"\"\n self._print_header('======== Sending ({0}) ========'.format(len(s)))\n\n self._log_send(s)\n out = len(s)\n\n while s:\n s = s[self._send(s):]\n return out\n"
] |
class Netcat(object):
"""
This is the main class you will use to interact with a peer over the
network! You may instanciate this class to either connect to a server or
listen for a one-off client.
One of the following must be passed in order to initialize a Netcat
object:
:param connect: the address/port to connect to
:param listen: the address/port to bind to for listening
:param sock: a python socket or pipe object to wrap
For ``connect`` and ``listen``, they accept basically any argument format
known to mankind. If you find an input format you think would be useful but
isn't accepted, let me know :P
Additionally, the following options modify the behavior of the object:
:param sock_send: If this is specified, this Netcat object will act
as a multiplexer/demultiplexer, using the "normal"
channel for receiving and this channel for sending.
This should be specified as a python socket or pipe
object.
.. warning:: Using ``sock_send`` will cause issues if
you pass this object into a context which
expects to be able to use its
``.fileno()``.
:param udp: Set to True to use udp connections when using the
connect or listen parameters
:param ipv6: Force using ipv6 when using the connect or listen
parameters
:param verbose: Set to True to log data sent/received. The echo_*
properties on this object can be tweaked to
describe exactly what you want logged.
:param log_send: Pass a file-like object open for writing and all
data sent over the socket will be written to it.
:param log_recv: Pass a file-like object open for writing and all
data recieved from the socket will be written to it.
:param raise_timeout:
Whether to raise a NetcatTimeout exception when a
timeout is received. The default is to return the
empty string and set self.timed_out = True
:param retry: Whether to continuously retry establishing a
connection if it fails.
:param log_yield: Control when logging messages are generated on
recv. By default, logging is done when data is
received from the socket, and may be buffered.
By setting this to true, logging is done when data
is yielded to the user, either directly from the
socket or from a buffer.
Any data that is extracted from the target address will override the
options specified here. For example, a url with the ``http:// scheme``
will go over tcp and port 80.
Some properties that may be tweaked to change the logging behavior:
- nc.echo_headers controls whether to print a header describing each
network operation before the data (True)
- nc.echo_perline controls whether the data should be split on newlines
for logging (True)
- nc.echo_sending controls whether to log data on send (True)
- nc.echo_recving controls whether to log data on recv (True)
- nc.echo_hex controls whether to log data hex-encoded (False)
- nc.echo_send_prefix controls a prefix to print before each logged
line of sent data ('>> ')
- nc.echo_recv_prefix controls a prefix to print before each logged
line of received data ('<< ')
Note that these settings ONLY affect the console logging triggered by
the verbose parameter. They don't do anything to the logging triggered
by `log_send` and `log_recv`, which are meant to provide pristine
untouched records of network traffic.
*Example 1:* Send a greeting to a UDP server listening at 192.168.3.6:8888
and log the response as hex:
>>> nc = nclib.Netcat(('192.168.3.6', 8888), udp=True, verbose=True)
>>> nc.echo_hex = True
>>> nc.send(b'\\x00\\x0dHello, world!')
======== Sending (15) ========
>> 00 0D 48 65 6C 6C 6F 2C 20 77 6F 72 6C 64 21 |..Hello, world! |
>>> nc.recv()
======== Receiving 4096B or until timeout (default) ========
<< 00 57 68 65 6C 6C 6F 20 66 72 69 65 6E 64 2E 20 |.Whello friend. |
<< 74 69 6D 65 20 69 73 20 73 68 6F 72 74 2E 20 70 |time is short. p|
<< 6C 65 61 73 65 20 64 6F 20 6E 6F 74 20 77 6F 72 |lease do not wor|
<< 72 79 2C 20 79 6F 75 20 77 69 6C 6C 20 66 69 6E |ry, you will fin|
<< 64 20 79 6F 75 72 20 77 61 79 2E 20 62 75 74 20 |d your way. but |
<< 64 6F 20 68 75 72 72 79 2E |do hurry. |
*Example 2:* Listen for a local TCP connection on port 1234, allow the user
to interact with the client. Log the entire interaction to log.txt.
>>> logfile = open('log.txt', 'wb')
>>> nc = nclib.Netcat(listen=('localhost', 1234), log_send=logfile, log_recv=logfile)
>>> nc.interact()
"""
def __init__(self,
connect=None,
sock=None,
listen=None,
server=None,
sock_send=None,
udp=False,
ipv6=False,
verbose=0,
log_send=None,
log_recv=None,
raise_timeout=False,
retry=False,
log_yield=False):
self.buf = b''
self.verbose = verbose
self.log_send = log_send
self.log_recv = log_recv
self.log_yield = log_yield
self.echo_headers = True
self.echo_perline = True
self.echo_sending = True
self.echo_recving = True
self.echo_hex = False
self.echo_send_prefix = '>> '
self.echo_recv_prefix = '<< '
self.sock = None
self._sock_send = sock_send
self.peer = None
# case: Netcat(host, port)
if isinstance(connect, str) and isinstance(listen, int):
connect = (connect, listen)
# case: Netcat(sock)
if isinstance(connect, socket.socket):
sock = connect
connect = None
# deprecated server kwarg
if server is not None:
connect = server
if sock is None and listen is None and connect is None:
raise ValueError('Not enough arguments, need at least an '
'address or a socket or a listening address!')
## we support passing connect as the "name" of the socket
#if sock is not None and (listen is not None or connect is not None):
# raise ValueError("connect or listen arguments may not be "
# "provided if sock is provided")
if listen is not None and connect is not None:
raise ValueError("connect and listen arguments cannot be provided at the same time")
if sock is None:
if listen is not None:
target = listen
listen = True
else:
target = connect
listen = False
target, listen, udp, ipv6 = self._parse_target(target, listen, udp, ipv6)
self._connect(target, listen, udp, ipv6, retry)
else:
self.sock = sock
self.peer = connect
try:
self._timeout = self.sock.gettimeout()
except AttributeError:
self._timeout = None
self.timed_out = False # set when an operation times out
self._raise_timeout = raise_timeout
@property
def sock_send(self):
if self._sock_send is None:
return self.sock
else:
return self._sock_send
@sock_send.setter
def sock_send(self, val):
self._sock_send = val
@staticmethod
def _parse_target(target, listen, udp, ipv6):
"""
Takes the basic version of the user args and extract as much data as
possible from target. Returns a tuple that is its arguments but
sanitized.
"""
if isinstance(target, str):
if target.startswith('nc '):
out_host = None
out_port = None
try:
opts, pieces = getopt.getopt(target.split()[1:], 'u46lp:',
[])
except getopt.GetoptError as exc:
raise ValueError(exc)
for opt, arg in opts:
if opt == '-u':
udp = True
elif opt == '-4':
ipv6 = False
elif opt == '-6':
ipv6 = True
elif opt == '-l':
listen = True
elif opt == '-p':
out_port = int(arg)
else:
assert False, "unhandled option"
if not pieces:
pass
elif len(pieces) == 1:
if listen and pieces[0].isdigit():
out_port = int(pieces[0])
else:
out_host = pieces[0]
elif len(pieces) == 2 and pieces[1].isdigit():
out_host = pieces[0]
out_port = int(pieces[1])
else:
raise ValueError("Bad cmdline: %s" % target)
if out_host is None:
if listen:
out_host = '::' if ipv6 else '0.0.0.0'
else:
raise ValueError("Missing address: %s" % target)
if out_port is None:
raise ValueError("Missing port: %s" % target)
if _is_ipv6_addr(out_host):
ipv6 = True
return (out_host, out_port), listen, udp, ipv6
elif PROTOCAL_RE.match(target) is not None:
parsed = urlparse(target)
port = None
try:
scheme_udp, scheme_ipv6, scheme_port = KNOWN_SCHEMES[parsed.scheme]
except KeyError:
raise ValueError("Unknown scheme: %s" % parsed.scheme)
if scheme_udp is not None:
udp = scheme_udp
if scheme_ipv6 is not None:
ipv6 = scheme_ipv6
if scheme_port is not None:
port = scheme_port
if parsed.netloc.startswith('['):
addr, extra = parsed.netloc[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
if ':' in parsed.netloc:
addr, port = parsed.netloc.split(':', 1)
port = int(port)
else:
addr = parsed.netloc
if addr is None or port is None:
raise ValueError("Can't parse addr/port from %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
else:
if target.startswith('['):
addr, extra = target[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
port = None
else:
if ':' in target:
addr, port = target.split(':', 1)
port = int(port)
else:
addr = target
port = None
if port is None:
raise ValueError("No port given: %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
elif isinstance(target, (int, long)):
if listen:
out_port = target
else:
raise ValueError("Can't deal with number as connection address")
return ('::' if ipv6 else '0.0.0.0', out_port), listen, udp, ipv6
elif isinstance(target, tuple):
if len(target) >= 1 and isinstance(target[0], str) and _is_ipv6_addr(target[0]):
ipv6 = True
return target, listen, udp, ipv6
else:
raise ValueError("Can't parse target: %r" % target)
def _connect(self, target, listen, udp, ipv6, retry):
"""
Takes target/listen/udp/ipv6 and sets self.sock and self.peer
"""
ty = socket.SOCK_DGRAM if udp else socket.SOCK_STREAM
fam = socket.AF_INET6 if ipv6 else socket.AF_INET
self.sock = socket.socket(fam, ty)
if listen:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(target)
if not udp:
self.sock.listen(1)
conn, addr = self.sock.accept()
self.sock.close()
self.sock = conn
self.peer = addr
else:
self.buf, self.peer = self.sock.recvfrom(1024)
self.sock.connect(self.peer)
self._log_recv(self.buf, False)
if self.verbose:
self._print_verbose('Connection from %s accepted' % str(self.peer))
else:
while True:
try:
self.sock.connect(target)
except (socket.gaierror, socket.herror) as exc:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
except socket.error as exc:
if retry:
time.sleep(0.2)
else:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
else:
break
self.peer = target
def close(self):
"""
Close the socket.
"""
if self._sock_send is not None:
self._sock_send.close()
return self.sock.close()
# inconsistent between sockets and files. support both
@property
def closed(self):
return self._closed
@property
def _closed(self):
if hasattr(self.sock_send, 'closed'):
return self.sock_send.closed
elif hasattr(self.sock_send, '_closed'):
return self.sock_send._closed
else:
return False # ???
def shutdown(self, how=socket.SHUT_RDWR):
"""
Send a shutdown signal for both reading and writing, or whatever
socket.SHUT_* constant you like.
Shutdown differs from closing in that it explicitly changes the state of
the socket resource to closed, whereas closing will only decrement the
number of peers on this end of the socket, since sockets can be a
resource shared by multiple peers on a single OS. When the number of
peers reaches zero, the socket is closed, but not deallocated, so you
still need to call close. (except that this is python and close is
automatically called on the deletion of the socket)
http://stackoverflow.com/questions/409783/socket-shutdown-vs-socket-close
"""
if self._sock_send is not None:
self._sock_send.shutdown(how)
return self.sock.shutdown(how)
def shutdown_rd(self):
"""
Send a shutdown signal for reading - you may no longer read from this
socket.
"""
if self._sock_send is not None:
self.sock.close()
else:
return self.shutdown(socket.SHUT_RD)
def shutdown_wr(self):
"""
Send a shutdown signal for writing - you may no longer write to this
socket.
"""
if self._sock_send is not None:
self._sock_send.close()
else:
return self.shutdown(socket.SHUT_WR)
def fileno(self):
"""
Return the file descriptor associated with this socket
"""
if self._sock_send is not None:
raise UserWarning("Calling fileno when there are in fact two filenos")
return self.sock.fileno()
def _print_verbose(self, s):
assert isinstance(s, str), "s should be str"
sys.stdout.write(s + '\n')
def _print_header(self, header):
if self.verbose and self.echo_headers:
self._print_verbose(header)
def _print_recv_header(self, fmt, timeout, *args):
if self.verbose and self.echo_headers:
if timeout == 'default':
timeout = self._timeout
if timeout is not None:
timeout_text = ' or until timeout ({0})'.format(timeout)
else:
timeout_text = ''
self._print_verbose(fmt.format(*args, timeout_text=timeout_text))
def _log_something(self, data, prefix):
if self.echo_perline:
if self.echo_hex:
self._print_hex_lines(data, prefix)
else:
self._print_lines(data, prefix)
else:
if self.echo_hex:
if hasattr(data, 'hex'):
self._print_verbose(prefix + data.hex())
else:
self._print_verbose(prefix + data.encode('hex'))
else:
self._print_verbose(prefix + str(data))
def _log_recv(self, data, yielding):
if yielding == self.log_yield:
if self.verbose and self.echo_recving:
self._log_something(data, self.echo_recv_prefix)
if self.log_recv:
self.log_recv.write(data)
def _log_send(self, data):
if self.verbose and self.echo_sending:
self._log_something(data, self.echo_send_prefix)
if self.log_send:
self.log_send.write(data)
def _print_lines(self, s, prefix):
for line in s.split(b'\n'):
self._print_verbose(prefix + str(line))
@staticmethod
def _to_spaced_hex(s):
if isinstance(s, str):
return ' '.join('%02X' % ord(a) for a in s)
if isinstance(s, bytes):
return ' '.join('%02X' % a for a in s)
raise TypeError('expected str or bytes instance')
@staticmethod
def _to_printable_str(s):
if isinstance(s, str):
return ''.join(a if ' ' <= a <= '~' else '.' for a in s)
if isinstance(s, bytes):
return ''.join(chr(a) if ord(' ') <= a <= ord('~') else '.' for a in s)
raise TypeError('expected str or bytes instance')
def _print_hex_lines(self, s, prefix):
for i in range(0, len(s), 16):
block = s[i:i+16]
spaced_hex = self._to_spaced_hex(block)
printable_str = self._to_printable_str(block)
self._print_verbose('%s%-47s |%-16s|' % (prefix, spaced_hex, printable_str))
def settimeout(self, timeout):
"""
Set the default timeout in seconds to use for subsequent socket
operations
"""
self._timeout = timeout
self._settimeout(timeout)
def _send(self, data):
if hasattr(self.sock_send, 'send'):
return self.sock_send.send(data)
elif hasattr(self.sock_send, 'write'):
return self.sock_send.write(data) # pylint: disable=no-member
else:
raise ValueError("I don't know how to write to this stream!")
def _recv(self, size):
if hasattr(self.sock, 'recv'):
return self.sock.recv(size)
elif hasattr(self.sock, 'read'):
return self.sock.read(size) # pylint: disable=no-member
else:
raise ValueError("I don't know how to read from this stream!")
def _recv_predicate(self, predicate, timeout='default', raise_eof=True):
"""
Receive until predicate returns a positive integer.
The returned number is the size to return.
"""
if timeout == 'default':
timeout = self._timeout
self.timed_out = False
start = time.time()
try:
while True:
cut_at = predicate(self.buf)
if cut_at > 0:
break
if timeout is not None:
time_elapsed = time.time() - start
if time_elapsed > timeout:
raise socket.timeout
self._settimeout(timeout - time_elapsed)
data = self._recv(4096)
self._log_recv(data, False)
self.buf += data
if not data:
if raise_eof:
raise NetcatError("Connection dropped!")
cut_at = len(self.buf)
break
except KeyboardInterrupt:
self._print_header('\n======== Connection interrupted! ========')
raise
except socket.timeout:
self.timed_out = True
if self._raise_timeout:
raise NetcatTimeout()
return b''
except socket.error as exc:
raise NetcatError('Socket error: %r' % exc)
self._settimeout(self._timeout)
ret = self.buf[:cut_at]
self.buf = self.buf[cut_at:]
self._log_recv(ret, True)
return ret
def _settimeout(self, timeout):
"""
Internal method - catches failures when working with non-timeoutable
streams, like files
"""
try:
self.sock.settimeout(timeout)
except AttributeError:
pass
def gettimeout(self):
"""
Retrieve the timeout currently associated with the socket
"""
return self._timeout
def flush(self):
# no buffering
pass
def recv(self, n=4096, timeout='default'):
"""
Receive at most n bytes (default 4096) from the socket
Aliases: read, get
"""
self._print_recv_header(
'======== Receiving {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: min(n, len(s)), timeout)
def recv_until(self, s, max_size=None, timeout='default'):
"""
Recieve data from the socket until the given substring is observed.
Data in the same datagram as the substring, following the substring,
will not be returned and will be cached for future receives.
Aliases: read_until, readuntil, recvuntil
"""
self._print_recv_header(
'======== Receiving until {0}{timeout_text} ========', timeout, repr(s))
if max_size is None:
max_size = 2 ** 62
def _predicate(buf):
try:
return min(buf.index(s) + len(s), max_size)
except ValueError:
return 0 if len(buf) < max_size else max_size
return self._recv_predicate(_predicate, timeout)
def recv_all(self, timeout='default'):
"""
Return all data recieved until connection closes.
Aliases: read_all, readall, recvall
"""
self._print_recv_header('======== Receiving until close{timeout_text} ========', timeout)
return self._recv_predicate(lambda s: 0, timeout, raise_eof=False)
def recv_exactly(self, n, timeout='default'):
"""
Recieve exactly n bytes
Aliases: read_exactly, readexactly, recvexactly
"""
self._print_recv_header(
'======== Receiving until exactly {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: n if len(s) >= n else 0, timeout)
def send(self, s):
"""
Sends all the given data to the socket.
Aliases: write, put, sendall, send_all
"""
self._print_header('======== Sending ({0}) ========'.format(len(s)))
self._log_send(s)
out = len(s)
while s:
s = s[self._send(s):]
return out
LINE_ENDING = b'\n'
def recv_line(self, max_size=None, timeout='default', ending=None):
"""
Recieve until the next newline , default "\\n". The newline string can
be changed by changing ``nc.LINE_ENDING``. The newline will be returned
as part of the string.
Aliases: recvline, readline, read_line, readln, recvln
"""
if ending is None:
ending = self.LINE_ENDING
return self.recv_until(ending, max_size, timeout)
def send_line(self, line, ending=None):
"""
Write the string to the wire, followed by a newline. The newline string
can be changed by changing ``nc.LINE_ENDING``.
Aliases: sendline, writeline, write_line, writeln, sendln
"""
if ending is None:
ending = self.LINE_ENDING
return self.send(line + ending)
read = recv
get = recv
write = send
put = send
sendall = send
send_all = send
read_until = recv_until
readuntil = recv_until
recvuntil = recv_until
read_all = recv_all
readall = recv_all
recvall = recv_all
read_exactly = recv_exactly
readexactly = recv_exactly
recvexactly = recv_exactly
interactive = interact
ineraction = interact
recvline = recv_line
readline = recv_line
read_line = recv_line
readln = recv_line
recvln = recv_line
sendline = send_line
writeline = send_line
write_line = send_line
writeln = send_line
sendln = send_line
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat.recv_line
|
python
|
def recv_line(self, max_size=None, timeout='default', ending=None):
if ending is None:
ending = self.LINE_ENDING
return self.recv_until(ending, max_size, timeout)
|
Recieve until the next newline , default "\\n". The newline string can
be changed by changing ``nc.LINE_ENDING``. The newline will be returned
as part of the string.
Aliases: recvline, readline, read_line, readln, recvln
|
train
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L762-L772
|
[
"def recv_until(self, s, max_size=None, timeout='default'):\n \"\"\"\n Recieve data from the socket until the given substring is observed.\n Data in the same datagram as the substring, following the substring,\n will not be returned and will be cached for future receives.\n\n Aliases: read_until, readuntil, recvuntil\n \"\"\"\n\n self._print_recv_header(\n '======== Receiving until {0}{timeout_text} ========', timeout, repr(s))\n\n if max_size is None:\n max_size = 2 ** 62\n\n def _predicate(buf):\n try:\n return min(buf.index(s) + len(s), max_size)\n except ValueError:\n return 0 if len(buf) < max_size else max_size\n return self._recv_predicate(_predicate, timeout)\n"
] |
class Netcat(object):
"""
This is the main class you will use to interact with a peer over the
network! You may instanciate this class to either connect to a server or
listen for a one-off client.
One of the following must be passed in order to initialize a Netcat
object:
:param connect: the address/port to connect to
:param listen: the address/port to bind to for listening
:param sock: a python socket or pipe object to wrap
For ``connect`` and ``listen``, they accept basically any argument format
known to mankind. If you find an input format you think would be useful but
isn't accepted, let me know :P
Additionally, the following options modify the behavior of the object:
:param sock_send: If this is specified, this Netcat object will act
as a multiplexer/demultiplexer, using the "normal"
channel for receiving and this channel for sending.
This should be specified as a python socket or pipe
object.
.. warning:: Using ``sock_send`` will cause issues if
you pass this object into a context which
expects to be able to use its
``.fileno()``.
:param udp: Set to True to use udp connections when using the
connect or listen parameters
:param ipv6: Force using ipv6 when using the connect or listen
parameters
:param verbose: Set to True to log data sent/received. The echo_*
properties on this object can be tweaked to
describe exactly what you want logged.
:param log_send: Pass a file-like object open for writing and all
data sent over the socket will be written to it.
:param log_recv: Pass a file-like object open for writing and all
data recieved from the socket will be written to it.
:param raise_timeout:
Whether to raise a NetcatTimeout exception when a
timeout is received. The default is to return the
empty string and set self.timed_out = True
:param retry: Whether to continuously retry establishing a
connection if it fails.
:param log_yield: Control when logging messages are generated on
recv. By default, logging is done when data is
received from the socket, and may be buffered.
By setting this to true, logging is done when data
is yielded to the user, either directly from the
socket or from a buffer.
Any data that is extracted from the target address will override the
options specified here. For example, a url with the ``http:// scheme``
will go over tcp and port 80.
Some properties that may be tweaked to change the logging behavior:
- nc.echo_headers controls whether to print a header describing each
network operation before the data (True)
- nc.echo_perline controls whether the data should be split on newlines
for logging (True)
- nc.echo_sending controls whether to log data on send (True)
- nc.echo_recving controls whether to log data on recv (True)
- nc.echo_hex controls whether to log data hex-encoded (False)
- nc.echo_send_prefix controls a prefix to print before each logged
line of sent data ('>> ')
- nc.echo_recv_prefix controls a prefix to print before each logged
line of received data ('<< ')
Note that these settings ONLY affect the console logging triggered by
the verbose parameter. They don't do anything to the logging triggered
by `log_send` and `log_recv`, which are meant to provide pristine
untouched records of network traffic.
*Example 1:* Send a greeting to a UDP server listening at 192.168.3.6:8888
and log the response as hex:
>>> nc = nclib.Netcat(('192.168.3.6', 8888), udp=True, verbose=True)
>>> nc.echo_hex = True
>>> nc.send(b'\\x00\\x0dHello, world!')
======== Sending (15) ========
>> 00 0D 48 65 6C 6C 6F 2C 20 77 6F 72 6C 64 21 |..Hello, world! |
>>> nc.recv()
======== Receiving 4096B or until timeout (default) ========
<< 00 57 68 65 6C 6C 6F 20 66 72 69 65 6E 64 2E 20 |.Whello friend. |
<< 74 69 6D 65 20 69 73 20 73 68 6F 72 74 2E 20 70 |time is short. p|
<< 6C 65 61 73 65 20 64 6F 20 6E 6F 74 20 77 6F 72 |lease do not wor|
<< 72 79 2C 20 79 6F 75 20 77 69 6C 6C 20 66 69 6E |ry, you will fin|
<< 64 20 79 6F 75 72 20 77 61 79 2E 20 62 75 74 20 |d your way. but |
<< 64 6F 20 68 75 72 72 79 2E |do hurry. |
*Example 2:* Listen for a local TCP connection on port 1234, allow the user
to interact with the client. Log the entire interaction to log.txt.
>>> logfile = open('log.txt', 'wb')
>>> nc = nclib.Netcat(listen=('localhost', 1234), log_send=logfile, log_recv=logfile)
>>> nc.interact()
"""
def __init__(self,
connect=None,
sock=None,
listen=None,
server=None,
sock_send=None,
udp=False,
ipv6=False,
verbose=0,
log_send=None,
log_recv=None,
raise_timeout=False,
retry=False,
log_yield=False):
self.buf = b''
self.verbose = verbose
self.log_send = log_send
self.log_recv = log_recv
self.log_yield = log_yield
self.echo_headers = True
self.echo_perline = True
self.echo_sending = True
self.echo_recving = True
self.echo_hex = False
self.echo_send_prefix = '>> '
self.echo_recv_prefix = '<< '
self.sock = None
self._sock_send = sock_send
self.peer = None
# case: Netcat(host, port)
if isinstance(connect, str) and isinstance(listen, int):
connect = (connect, listen)
# case: Netcat(sock)
if isinstance(connect, socket.socket):
sock = connect
connect = None
# deprecated server kwarg
if server is not None:
connect = server
if sock is None and listen is None and connect is None:
raise ValueError('Not enough arguments, need at least an '
'address or a socket or a listening address!')
## we support passing connect as the "name" of the socket
#if sock is not None and (listen is not None or connect is not None):
# raise ValueError("connect or listen arguments may not be "
# "provided if sock is provided")
if listen is not None and connect is not None:
raise ValueError("connect and listen arguments cannot be provided at the same time")
if sock is None:
if listen is not None:
target = listen
listen = True
else:
target = connect
listen = False
target, listen, udp, ipv6 = self._parse_target(target, listen, udp, ipv6)
self._connect(target, listen, udp, ipv6, retry)
else:
self.sock = sock
self.peer = connect
try:
self._timeout = self.sock.gettimeout()
except AttributeError:
self._timeout = None
self.timed_out = False # set when an operation times out
self._raise_timeout = raise_timeout
@property
def sock_send(self):
if self._sock_send is None:
return self.sock
else:
return self._sock_send
@sock_send.setter
def sock_send(self, val):
self._sock_send = val
@staticmethod
def _parse_target(target, listen, udp, ipv6):
"""
Takes the basic version of the user args and extract as much data as
possible from target. Returns a tuple that is its arguments but
sanitized.
"""
if isinstance(target, str):
if target.startswith('nc '):
out_host = None
out_port = None
try:
opts, pieces = getopt.getopt(target.split()[1:], 'u46lp:',
[])
except getopt.GetoptError as exc:
raise ValueError(exc)
for opt, arg in opts:
if opt == '-u':
udp = True
elif opt == '-4':
ipv6 = False
elif opt == '-6':
ipv6 = True
elif opt == '-l':
listen = True
elif opt == '-p':
out_port = int(arg)
else:
assert False, "unhandled option"
if not pieces:
pass
elif len(pieces) == 1:
if listen and pieces[0].isdigit():
out_port = int(pieces[0])
else:
out_host = pieces[0]
elif len(pieces) == 2 and pieces[1].isdigit():
out_host = pieces[0]
out_port = int(pieces[1])
else:
raise ValueError("Bad cmdline: %s" % target)
if out_host is None:
if listen:
out_host = '::' if ipv6 else '0.0.0.0'
else:
raise ValueError("Missing address: %s" % target)
if out_port is None:
raise ValueError("Missing port: %s" % target)
if _is_ipv6_addr(out_host):
ipv6 = True
return (out_host, out_port), listen, udp, ipv6
elif PROTOCAL_RE.match(target) is not None:
parsed = urlparse(target)
port = None
try:
scheme_udp, scheme_ipv6, scheme_port = KNOWN_SCHEMES[parsed.scheme]
except KeyError:
raise ValueError("Unknown scheme: %s" % parsed.scheme)
if scheme_udp is not None:
udp = scheme_udp
if scheme_ipv6 is not None:
ipv6 = scheme_ipv6
if scheme_port is not None:
port = scheme_port
if parsed.netloc.startswith('['):
addr, extra = parsed.netloc[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
if ':' in parsed.netloc:
addr, port = parsed.netloc.split(':', 1)
port = int(port)
else:
addr = parsed.netloc
if addr is None or port is None:
raise ValueError("Can't parse addr/port from %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
else:
if target.startswith('['):
addr, extra = target[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
port = None
else:
if ':' in target:
addr, port = target.split(':', 1)
port = int(port)
else:
addr = target
port = None
if port is None:
raise ValueError("No port given: %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
elif isinstance(target, (int, long)):
if listen:
out_port = target
else:
raise ValueError("Can't deal with number as connection address")
return ('::' if ipv6 else '0.0.0.0', out_port), listen, udp, ipv6
elif isinstance(target, tuple):
if len(target) >= 1 and isinstance(target[0], str) and _is_ipv6_addr(target[0]):
ipv6 = True
return target, listen, udp, ipv6
else:
raise ValueError("Can't parse target: %r" % target)
def _connect(self, target, listen, udp, ipv6, retry):
"""
Takes target/listen/udp/ipv6 and sets self.sock and self.peer
"""
ty = socket.SOCK_DGRAM if udp else socket.SOCK_STREAM
fam = socket.AF_INET6 if ipv6 else socket.AF_INET
self.sock = socket.socket(fam, ty)
if listen:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(target)
if not udp:
self.sock.listen(1)
conn, addr = self.sock.accept()
self.sock.close()
self.sock = conn
self.peer = addr
else:
self.buf, self.peer = self.sock.recvfrom(1024)
self.sock.connect(self.peer)
self._log_recv(self.buf, False)
if self.verbose:
self._print_verbose('Connection from %s accepted' % str(self.peer))
else:
while True:
try:
self.sock.connect(target)
except (socket.gaierror, socket.herror) as exc:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
except socket.error as exc:
if retry:
time.sleep(0.2)
else:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
else:
break
self.peer = target
def close(self):
"""
Close the socket.
"""
if self._sock_send is not None:
self._sock_send.close()
return self.sock.close()
# inconsistent between sockets and files. support both
@property
def closed(self):
return self._closed
@property
def _closed(self):
if hasattr(self.sock_send, 'closed'):
return self.sock_send.closed
elif hasattr(self.sock_send, '_closed'):
return self.sock_send._closed
else:
return False # ???
def shutdown(self, how=socket.SHUT_RDWR):
"""
Send a shutdown signal for both reading and writing, or whatever
socket.SHUT_* constant you like.
Shutdown differs from closing in that it explicitly changes the state of
the socket resource to closed, whereas closing will only decrement the
number of peers on this end of the socket, since sockets can be a
resource shared by multiple peers on a single OS. When the number of
peers reaches zero, the socket is closed, but not deallocated, so you
still need to call close. (except that this is python and close is
automatically called on the deletion of the socket)
http://stackoverflow.com/questions/409783/socket-shutdown-vs-socket-close
"""
if self._sock_send is not None:
self._sock_send.shutdown(how)
return self.sock.shutdown(how)
def shutdown_rd(self):
"""
Send a shutdown signal for reading - you may no longer read from this
socket.
"""
if self._sock_send is not None:
self.sock.close()
else:
return self.shutdown(socket.SHUT_RD)
def shutdown_wr(self):
"""
Send a shutdown signal for writing - you may no longer write to this
socket.
"""
if self._sock_send is not None:
self._sock_send.close()
else:
return self.shutdown(socket.SHUT_WR)
def fileno(self):
"""
Return the file descriptor associated with this socket
"""
if self._sock_send is not None:
raise UserWarning("Calling fileno when there are in fact two filenos")
return self.sock.fileno()
def _print_verbose(self, s):
assert isinstance(s, str), "s should be str"
sys.stdout.write(s + '\n')
def _print_header(self, header):
if self.verbose and self.echo_headers:
self._print_verbose(header)
def _print_recv_header(self, fmt, timeout, *args):
if self.verbose and self.echo_headers:
if timeout == 'default':
timeout = self._timeout
if timeout is not None:
timeout_text = ' or until timeout ({0})'.format(timeout)
else:
timeout_text = ''
self._print_verbose(fmt.format(*args, timeout_text=timeout_text))
def _log_something(self, data, prefix):
if self.echo_perline:
if self.echo_hex:
self._print_hex_lines(data, prefix)
else:
self._print_lines(data, prefix)
else:
if self.echo_hex:
if hasattr(data, 'hex'):
self._print_verbose(prefix + data.hex())
else:
self._print_verbose(prefix + data.encode('hex'))
else:
self._print_verbose(prefix + str(data))
def _log_recv(self, data, yielding):
if yielding == self.log_yield:
if self.verbose and self.echo_recving:
self._log_something(data, self.echo_recv_prefix)
if self.log_recv:
self.log_recv.write(data)
def _log_send(self, data):
if self.verbose and self.echo_sending:
self._log_something(data, self.echo_send_prefix)
if self.log_send:
self.log_send.write(data)
def _print_lines(self, s, prefix):
for line in s.split(b'\n'):
self._print_verbose(prefix + str(line))
@staticmethod
def _to_spaced_hex(s):
if isinstance(s, str):
return ' '.join('%02X' % ord(a) for a in s)
if isinstance(s, bytes):
return ' '.join('%02X' % a for a in s)
raise TypeError('expected str or bytes instance')
@staticmethod
def _to_printable_str(s):
if isinstance(s, str):
return ''.join(a if ' ' <= a <= '~' else '.' for a in s)
if isinstance(s, bytes):
return ''.join(chr(a) if ord(' ') <= a <= ord('~') else '.' for a in s)
raise TypeError('expected str or bytes instance')
def _print_hex_lines(self, s, prefix):
for i in range(0, len(s), 16):
block = s[i:i+16]
spaced_hex = self._to_spaced_hex(block)
printable_str = self._to_printable_str(block)
self._print_verbose('%s%-47s |%-16s|' % (prefix, spaced_hex, printable_str))
def settimeout(self, timeout):
"""
Set the default timeout in seconds to use for subsequent socket
operations
"""
self._timeout = timeout
self._settimeout(timeout)
def _send(self, data):
if hasattr(self.sock_send, 'send'):
return self.sock_send.send(data)
elif hasattr(self.sock_send, 'write'):
return self.sock_send.write(data) # pylint: disable=no-member
else:
raise ValueError("I don't know how to write to this stream!")
def _recv(self, size):
if hasattr(self.sock, 'recv'):
return self.sock.recv(size)
elif hasattr(self.sock, 'read'):
return self.sock.read(size) # pylint: disable=no-member
else:
raise ValueError("I don't know how to read from this stream!")
def _recv_predicate(self, predicate, timeout='default', raise_eof=True):
"""
Receive until predicate returns a positive integer.
The returned number is the size to return.
"""
if timeout == 'default':
timeout = self._timeout
self.timed_out = False
start = time.time()
try:
while True:
cut_at = predicate(self.buf)
if cut_at > 0:
break
if timeout is not None:
time_elapsed = time.time() - start
if time_elapsed > timeout:
raise socket.timeout
self._settimeout(timeout - time_elapsed)
data = self._recv(4096)
self._log_recv(data, False)
self.buf += data
if not data:
if raise_eof:
raise NetcatError("Connection dropped!")
cut_at = len(self.buf)
break
except KeyboardInterrupt:
self._print_header('\n======== Connection interrupted! ========')
raise
except socket.timeout:
self.timed_out = True
if self._raise_timeout:
raise NetcatTimeout()
return b''
except socket.error as exc:
raise NetcatError('Socket error: %r' % exc)
self._settimeout(self._timeout)
ret = self.buf[:cut_at]
self.buf = self.buf[cut_at:]
self._log_recv(ret, True)
return ret
def _settimeout(self, timeout):
"""
Internal method - catches failures when working with non-timeoutable
streams, like files
"""
try:
self.sock.settimeout(timeout)
except AttributeError:
pass
def gettimeout(self):
"""
Retrieve the timeout currently associated with the socket
"""
return self._timeout
def flush(self):
# no buffering
pass
def recv(self, n=4096, timeout='default'):
"""
Receive at most n bytes (default 4096) from the socket
Aliases: read, get
"""
self._print_recv_header(
'======== Receiving {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: min(n, len(s)), timeout)
def recv_until(self, s, max_size=None, timeout='default'):
"""
Recieve data from the socket until the given substring is observed.
Data in the same datagram as the substring, following the substring,
will not be returned and will be cached for future receives.
Aliases: read_until, readuntil, recvuntil
"""
self._print_recv_header(
'======== Receiving until {0}{timeout_text} ========', timeout, repr(s))
if max_size is None:
max_size = 2 ** 62
def _predicate(buf):
try:
return min(buf.index(s) + len(s), max_size)
except ValueError:
return 0 if len(buf) < max_size else max_size
return self._recv_predicate(_predicate, timeout)
def recv_all(self, timeout='default'):
"""
Return all data recieved until connection closes.
Aliases: read_all, readall, recvall
"""
self._print_recv_header('======== Receiving until close{timeout_text} ========', timeout)
return self._recv_predicate(lambda s: 0, timeout, raise_eof=False)
def recv_exactly(self, n, timeout='default'):
"""
Recieve exactly n bytes
Aliases: read_exactly, readexactly, recvexactly
"""
self._print_recv_header(
'======== Receiving until exactly {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: n if len(s) >= n else 0, timeout)
def send(self, s):
"""
Sends all the given data to the socket.
Aliases: write, put, sendall, send_all
"""
self._print_header('======== Sending ({0}) ========'.format(len(s)))
self._log_send(s)
out = len(s)
while s:
s = s[self._send(s):]
return out
def interact(self, insock=sys.stdin, outsock=sys.stdout):
"""
Connects the socket to the terminal for user interaction.
Alternate input and output files may be specified.
This method cannot be used with a timeout.
Aliases: interactive, interaction
"""
self._print_header('======== Beginning interactive session ========')
if hasattr(outsock, 'buffer'):
outsock = outsock.buffer # pylint: disable=no-member
self.timed_out = False
save_verbose = self.verbose
self.verbose = 0
try:
if self.buf:
outsock.write(self.buf)
outsock.flush()
self.buf = b''
while True:
readable_socks = select(self.sock, insock)
for readable in readable_socks:
if readable is insock:
data = os.read(insock.fileno(), 4096)
self.send(data)
if not data:
raise NetcatError
else:
data = self.recv(timeout=None)
outsock.write(data)
outsock.flush()
if not data:
raise NetcatError
except KeyboardInterrupt:
self.verbose = save_verbose
self._print_header('\n======== Connection interrupted! ========')
raise
except (socket.error, NetcatError):
self.verbose = save_verbose
self._print_header('\n======== Connection dropped! ========')
finally:
self.verbose = save_verbose
LINE_ENDING = b'\n'
def send_line(self, line, ending=None):
"""
Write the string to the wire, followed by a newline. The newline string
can be changed by changing ``nc.LINE_ENDING``.
Aliases: sendline, writeline, write_line, writeln, sendln
"""
if ending is None:
ending = self.LINE_ENDING
return self.send(line + ending)
read = recv
get = recv
write = send
put = send
sendall = send
send_all = send
read_until = recv_until
readuntil = recv_until
recvuntil = recv_until
read_all = recv_all
readall = recv_all
recvall = recv_all
read_exactly = recv_exactly
readexactly = recv_exactly
recvexactly = recv_exactly
interactive = interact
ineraction = interact
recvline = recv_line
readline = recv_line
read_line = recv_line
readln = recv_line
recvln = recv_line
sendline = send_line
writeline = send_line
write_line = send_line
writeln = send_line
sendln = send_line
|
rhelmot/nclib
|
nclib/netcat.py
|
Netcat.send_line
|
python
|
def send_line(self, line, ending=None):
if ending is None:
ending = self.LINE_ENDING
return self.send(line + ending)
|
Write the string to the wire, followed by a newline. The newline string
can be changed by changing ``nc.LINE_ENDING``.
Aliases: sendline, writeline, write_line, writeln, sendln
|
train
|
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L774-L783
|
[
"def send(self, s):\n \"\"\"\n Sends all the given data to the socket.\n\n Aliases: write, put, sendall, send_all\n \"\"\"\n self._print_header('======== Sending ({0}) ========'.format(len(s)))\n\n self._log_send(s)\n out = len(s)\n\n while s:\n s = s[self._send(s):]\n return out\n"
] |
class Netcat(object):
"""
This is the main class you will use to interact with a peer over the
network! You may instanciate this class to either connect to a server or
listen for a one-off client.
One of the following must be passed in order to initialize a Netcat
object:
:param connect: the address/port to connect to
:param listen: the address/port to bind to for listening
:param sock: a python socket or pipe object to wrap
For ``connect`` and ``listen``, they accept basically any argument format
known to mankind. If you find an input format you think would be useful but
isn't accepted, let me know :P
Additionally, the following options modify the behavior of the object:
:param sock_send: If this is specified, this Netcat object will act
as a multiplexer/demultiplexer, using the "normal"
channel for receiving and this channel for sending.
This should be specified as a python socket or pipe
object.
.. warning:: Using ``sock_send`` will cause issues if
you pass this object into a context which
expects to be able to use its
``.fileno()``.
:param udp: Set to True to use udp connections when using the
connect or listen parameters
:param ipv6: Force using ipv6 when using the connect or listen
parameters
:param verbose: Set to True to log data sent/received. The echo_*
properties on this object can be tweaked to
describe exactly what you want logged.
:param log_send: Pass a file-like object open for writing and all
data sent over the socket will be written to it.
:param log_recv: Pass a file-like object open for writing and all
data recieved from the socket will be written to it.
:param raise_timeout:
Whether to raise a NetcatTimeout exception when a
timeout is received. The default is to return the
empty string and set self.timed_out = True
:param retry: Whether to continuously retry establishing a
connection if it fails.
:param log_yield: Control when logging messages are generated on
recv. By default, logging is done when data is
received from the socket, and may be buffered.
By setting this to true, logging is done when data
is yielded to the user, either directly from the
socket or from a buffer.
Any data that is extracted from the target address will override the
options specified here. For example, a url with the ``http:// scheme``
will go over tcp and port 80.
Some properties that may be tweaked to change the logging behavior:
- nc.echo_headers controls whether to print a header describing each
network operation before the data (True)
- nc.echo_perline controls whether the data should be split on newlines
for logging (True)
- nc.echo_sending controls whether to log data on send (True)
- nc.echo_recving controls whether to log data on recv (True)
- nc.echo_hex controls whether to log data hex-encoded (False)
- nc.echo_send_prefix controls a prefix to print before each logged
line of sent data ('>> ')
- nc.echo_recv_prefix controls a prefix to print before each logged
line of received data ('<< ')
Note that these settings ONLY affect the console logging triggered by
the verbose parameter. They don't do anything to the logging triggered
by `log_send` and `log_recv`, which are meant to provide pristine
untouched records of network traffic.
*Example 1:* Send a greeting to a UDP server listening at 192.168.3.6:8888
and log the response as hex:
>>> nc = nclib.Netcat(('192.168.3.6', 8888), udp=True, verbose=True)
>>> nc.echo_hex = True
>>> nc.send(b'\\x00\\x0dHello, world!')
======== Sending (15) ========
>> 00 0D 48 65 6C 6C 6F 2C 20 77 6F 72 6C 64 21 |..Hello, world! |
>>> nc.recv()
======== Receiving 4096B or until timeout (default) ========
<< 00 57 68 65 6C 6C 6F 20 66 72 69 65 6E 64 2E 20 |.Whello friend. |
<< 74 69 6D 65 20 69 73 20 73 68 6F 72 74 2E 20 70 |time is short. p|
<< 6C 65 61 73 65 20 64 6F 20 6E 6F 74 20 77 6F 72 |lease do not wor|
<< 72 79 2C 20 79 6F 75 20 77 69 6C 6C 20 66 69 6E |ry, you will fin|
<< 64 20 79 6F 75 72 20 77 61 79 2E 20 62 75 74 20 |d your way. but |
<< 64 6F 20 68 75 72 72 79 2E |do hurry. |
*Example 2:* Listen for a local TCP connection on port 1234, allow the user
to interact with the client. Log the entire interaction to log.txt.
>>> logfile = open('log.txt', 'wb')
>>> nc = nclib.Netcat(listen=('localhost', 1234), log_send=logfile, log_recv=logfile)
>>> nc.interact()
"""
def __init__(self,
connect=None,
sock=None,
listen=None,
server=None,
sock_send=None,
udp=False,
ipv6=False,
verbose=0,
log_send=None,
log_recv=None,
raise_timeout=False,
retry=False,
log_yield=False):
self.buf = b''
self.verbose = verbose
self.log_send = log_send
self.log_recv = log_recv
self.log_yield = log_yield
self.echo_headers = True
self.echo_perline = True
self.echo_sending = True
self.echo_recving = True
self.echo_hex = False
self.echo_send_prefix = '>> '
self.echo_recv_prefix = '<< '
self.sock = None
self._sock_send = sock_send
self.peer = None
# case: Netcat(host, port)
if isinstance(connect, str) and isinstance(listen, int):
connect = (connect, listen)
# case: Netcat(sock)
if isinstance(connect, socket.socket):
sock = connect
connect = None
# deprecated server kwarg
if server is not None:
connect = server
if sock is None and listen is None and connect is None:
raise ValueError('Not enough arguments, need at least an '
'address or a socket or a listening address!')
## we support passing connect as the "name" of the socket
#if sock is not None and (listen is not None or connect is not None):
# raise ValueError("connect or listen arguments may not be "
# "provided if sock is provided")
if listen is not None and connect is not None:
raise ValueError("connect and listen arguments cannot be provided at the same time")
if sock is None:
if listen is not None:
target = listen
listen = True
else:
target = connect
listen = False
target, listen, udp, ipv6 = self._parse_target(target, listen, udp, ipv6)
self._connect(target, listen, udp, ipv6, retry)
else:
self.sock = sock
self.peer = connect
try:
self._timeout = self.sock.gettimeout()
except AttributeError:
self._timeout = None
self.timed_out = False # set when an operation times out
self._raise_timeout = raise_timeout
@property
def sock_send(self):
if self._sock_send is None:
return self.sock
else:
return self._sock_send
@sock_send.setter
def sock_send(self, val):
self._sock_send = val
@staticmethod
def _parse_target(target, listen, udp, ipv6):
"""
Takes the basic version of the user args and extract as much data as
possible from target. Returns a tuple that is its arguments but
sanitized.
"""
if isinstance(target, str):
if target.startswith('nc '):
out_host = None
out_port = None
try:
opts, pieces = getopt.getopt(target.split()[1:], 'u46lp:',
[])
except getopt.GetoptError as exc:
raise ValueError(exc)
for opt, arg in opts:
if opt == '-u':
udp = True
elif opt == '-4':
ipv6 = False
elif opt == '-6':
ipv6 = True
elif opt == '-l':
listen = True
elif opt == '-p':
out_port = int(arg)
else:
assert False, "unhandled option"
if not pieces:
pass
elif len(pieces) == 1:
if listen and pieces[0].isdigit():
out_port = int(pieces[0])
else:
out_host = pieces[0]
elif len(pieces) == 2 and pieces[1].isdigit():
out_host = pieces[0]
out_port = int(pieces[1])
else:
raise ValueError("Bad cmdline: %s" % target)
if out_host is None:
if listen:
out_host = '::' if ipv6 else '0.0.0.0'
else:
raise ValueError("Missing address: %s" % target)
if out_port is None:
raise ValueError("Missing port: %s" % target)
if _is_ipv6_addr(out_host):
ipv6 = True
return (out_host, out_port), listen, udp, ipv6
elif PROTOCAL_RE.match(target) is not None:
parsed = urlparse(target)
port = None
try:
scheme_udp, scheme_ipv6, scheme_port = KNOWN_SCHEMES[parsed.scheme]
except KeyError:
raise ValueError("Unknown scheme: %s" % parsed.scheme)
if scheme_udp is not None:
udp = scheme_udp
if scheme_ipv6 is not None:
ipv6 = scheme_ipv6
if scheme_port is not None:
port = scheme_port
if parsed.netloc.startswith('['):
addr, extra = parsed.netloc[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
if ':' in parsed.netloc:
addr, port = parsed.netloc.split(':', 1)
port = int(port)
else:
addr = parsed.netloc
if addr is None or port is None:
raise ValueError("Can't parse addr/port from %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
else:
if target.startswith('['):
addr, extra = target[1:].split(']', 1)
if extra.startswith(':'):
port = int(extra[1:])
else:
port = None
else:
if ':' in target:
addr, port = target.split(':', 1)
port = int(port)
else:
addr = target
port = None
if port is None:
raise ValueError("No port given: %s" % target)
if _is_ipv6_addr(addr):
ipv6 = True
return (addr, port), listen, udp, ipv6
elif isinstance(target, (int, long)):
if listen:
out_port = target
else:
raise ValueError("Can't deal with number as connection address")
return ('::' if ipv6 else '0.0.0.0', out_port), listen, udp, ipv6
elif isinstance(target, tuple):
if len(target) >= 1 and isinstance(target[0], str) and _is_ipv6_addr(target[0]):
ipv6 = True
return target, listen, udp, ipv6
else:
raise ValueError("Can't parse target: %r" % target)
def _connect(self, target, listen, udp, ipv6, retry):
"""
Takes target/listen/udp/ipv6 and sets self.sock and self.peer
"""
ty = socket.SOCK_DGRAM if udp else socket.SOCK_STREAM
fam = socket.AF_INET6 if ipv6 else socket.AF_INET
self.sock = socket.socket(fam, ty)
if listen:
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(target)
if not udp:
self.sock.listen(1)
conn, addr = self.sock.accept()
self.sock.close()
self.sock = conn
self.peer = addr
else:
self.buf, self.peer = self.sock.recvfrom(1024)
self.sock.connect(self.peer)
self._log_recv(self.buf, False)
if self.verbose:
self._print_verbose('Connection from %s accepted' % str(self.peer))
else:
while True:
try:
self.sock.connect(target)
except (socket.gaierror, socket.herror) as exc:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
except socket.error as exc:
if retry:
time.sleep(0.2)
else:
raise NetcatError('Could not connect to %r: %r' \
% (target, exc))
else:
break
self.peer = target
def close(self):
"""
Close the socket.
"""
if self._sock_send is not None:
self._sock_send.close()
return self.sock.close()
# inconsistent between sockets and files. support both
@property
def closed(self):
return self._closed
@property
def _closed(self):
if hasattr(self.sock_send, 'closed'):
return self.sock_send.closed
elif hasattr(self.sock_send, '_closed'):
return self.sock_send._closed
else:
return False # ???
def shutdown(self, how=socket.SHUT_RDWR):
"""
Send a shutdown signal for both reading and writing, or whatever
socket.SHUT_* constant you like.
Shutdown differs from closing in that it explicitly changes the state of
the socket resource to closed, whereas closing will only decrement the
number of peers on this end of the socket, since sockets can be a
resource shared by multiple peers on a single OS. When the number of
peers reaches zero, the socket is closed, but not deallocated, so you
still need to call close. (except that this is python and close is
automatically called on the deletion of the socket)
http://stackoverflow.com/questions/409783/socket-shutdown-vs-socket-close
"""
if self._sock_send is not None:
self._sock_send.shutdown(how)
return self.sock.shutdown(how)
def shutdown_rd(self):
"""
Send a shutdown signal for reading - you may no longer read from this
socket.
"""
if self._sock_send is not None:
self.sock.close()
else:
return self.shutdown(socket.SHUT_RD)
def shutdown_wr(self):
"""
Send a shutdown signal for writing - you may no longer write to this
socket.
"""
if self._sock_send is not None:
self._sock_send.close()
else:
return self.shutdown(socket.SHUT_WR)
def fileno(self):
"""
Return the file descriptor associated with this socket
"""
if self._sock_send is not None:
raise UserWarning("Calling fileno when there are in fact two filenos")
return self.sock.fileno()
def _print_verbose(self, s):
assert isinstance(s, str), "s should be str"
sys.stdout.write(s + '\n')
def _print_header(self, header):
if self.verbose and self.echo_headers:
self._print_verbose(header)
def _print_recv_header(self, fmt, timeout, *args):
if self.verbose and self.echo_headers:
if timeout == 'default':
timeout = self._timeout
if timeout is not None:
timeout_text = ' or until timeout ({0})'.format(timeout)
else:
timeout_text = ''
self._print_verbose(fmt.format(*args, timeout_text=timeout_text))
def _log_something(self, data, prefix):
if self.echo_perline:
if self.echo_hex:
self._print_hex_lines(data, prefix)
else:
self._print_lines(data, prefix)
else:
if self.echo_hex:
if hasattr(data, 'hex'):
self._print_verbose(prefix + data.hex())
else:
self._print_verbose(prefix + data.encode('hex'))
else:
self._print_verbose(prefix + str(data))
def _log_recv(self, data, yielding):
if yielding == self.log_yield:
if self.verbose and self.echo_recving:
self._log_something(data, self.echo_recv_prefix)
if self.log_recv:
self.log_recv.write(data)
def _log_send(self, data):
if self.verbose and self.echo_sending:
self._log_something(data, self.echo_send_prefix)
if self.log_send:
self.log_send.write(data)
def _print_lines(self, s, prefix):
for line in s.split(b'\n'):
self._print_verbose(prefix + str(line))
@staticmethod
def _to_spaced_hex(s):
if isinstance(s, str):
return ' '.join('%02X' % ord(a) for a in s)
if isinstance(s, bytes):
return ' '.join('%02X' % a for a in s)
raise TypeError('expected str or bytes instance')
@staticmethod
def _to_printable_str(s):
if isinstance(s, str):
return ''.join(a if ' ' <= a <= '~' else '.' for a in s)
if isinstance(s, bytes):
return ''.join(chr(a) if ord(' ') <= a <= ord('~') else '.' for a in s)
raise TypeError('expected str or bytes instance')
def _print_hex_lines(self, s, prefix):
for i in range(0, len(s), 16):
block = s[i:i+16]
spaced_hex = self._to_spaced_hex(block)
printable_str = self._to_printable_str(block)
self._print_verbose('%s%-47s |%-16s|' % (prefix, spaced_hex, printable_str))
def settimeout(self, timeout):
"""
Set the default timeout in seconds to use for subsequent socket
operations
"""
self._timeout = timeout
self._settimeout(timeout)
def _send(self, data):
if hasattr(self.sock_send, 'send'):
return self.sock_send.send(data)
elif hasattr(self.sock_send, 'write'):
return self.sock_send.write(data) # pylint: disable=no-member
else:
raise ValueError("I don't know how to write to this stream!")
def _recv(self, size):
if hasattr(self.sock, 'recv'):
return self.sock.recv(size)
elif hasattr(self.sock, 'read'):
return self.sock.read(size) # pylint: disable=no-member
else:
raise ValueError("I don't know how to read from this stream!")
def _recv_predicate(self, predicate, timeout='default', raise_eof=True):
"""
Receive until predicate returns a positive integer.
The returned number is the size to return.
"""
if timeout == 'default':
timeout = self._timeout
self.timed_out = False
start = time.time()
try:
while True:
cut_at = predicate(self.buf)
if cut_at > 0:
break
if timeout is not None:
time_elapsed = time.time() - start
if time_elapsed > timeout:
raise socket.timeout
self._settimeout(timeout - time_elapsed)
data = self._recv(4096)
self._log_recv(data, False)
self.buf += data
if not data:
if raise_eof:
raise NetcatError("Connection dropped!")
cut_at = len(self.buf)
break
except KeyboardInterrupt:
self._print_header('\n======== Connection interrupted! ========')
raise
except socket.timeout:
self.timed_out = True
if self._raise_timeout:
raise NetcatTimeout()
return b''
except socket.error as exc:
raise NetcatError('Socket error: %r' % exc)
self._settimeout(self._timeout)
ret = self.buf[:cut_at]
self.buf = self.buf[cut_at:]
self._log_recv(ret, True)
return ret
def _settimeout(self, timeout):
"""
Internal method - catches failures when working with non-timeoutable
streams, like files
"""
try:
self.sock.settimeout(timeout)
except AttributeError:
pass
def gettimeout(self):
"""
Retrieve the timeout currently associated with the socket
"""
return self._timeout
def flush(self):
# no buffering
pass
def recv(self, n=4096, timeout='default'):
"""
Receive at most n bytes (default 4096) from the socket
Aliases: read, get
"""
self._print_recv_header(
'======== Receiving {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: min(n, len(s)), timeout)
def recv_until(self, s, max_size=None, timeout='default'):
"""
Recieve data from the socket until the given substring is observed.
Data in the same datagram as the substring, following the substring,
will not be returned and will be cached for future receives.
Aliases: read_until, readuntil, recvuntil
"""
self._print_recv_header(
'======== Receiving until {0}{timeout_text} ========', timeout, repr(s))
if max_size is None:
max_size = 2 ** 62
def _predicate(buf):
try:
return min(buf.index(s) + len(s), max_size)
except ValueError:
return 0 if len(buf) < max_size else max_size
return self._recv_predicate(_predicate, timeout)
def recv_all(self, timeout='default'):
"""
Return all data recieved until connection closes.
Aliases: read_all, readall, recvall
"""
self._print_recv_header('======== Receiving until close{timeout_text} ========', timeout)
return self._recv_predicate(lambda s: 0, timeout, raise_eof=False)
def recv_exactly(self, n, timeout='default'):
"""
Recieve exactly n bytes
Aliases: read_exactly, readexactly, recvexactly
"""
self._print_recv_header(
'======== Receiving until exactly {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: n if len(s) >= n else 0, timeout)
def send(self, s):
"""
Sends all the given data to the socket.
Aliases: write, put, sendall, send_all
"""
self._print_header('======== Sending ({0}) ========'.format(len(s)))
self._log_send(s)
out = len(s)
while s:
s = s[self._send(s):]
return out
def interact(self, insock=sys.stdin, outsock=sys.stdout):
"""
Connects the socket to the terminal for user interaction.
Alternate input and output files may be specified.
This method cannot be used with a timeout.
Aliases: interactive, interaction
"""
self._print_header('======== Beginning interactive session ========')
if hasattr(outsock, 'buffer'):
outsock = outsock.buffer # pylint: disable=no-member
self.timed_out = False
save_verbose = self.verbose
self.verbose = 0
try:
if self.buf:
outsock.write(self.buf)
outsock.flush()
self.buf = b''
while True:
readable_socks = select(self.sock, insock)
for readable in readable_socks:
if readable is insock:
data = os.read(insock.fileno(), 4096)
self.send(data)
if not data:
raise NetcatError
else:
data = self.recv(timeout=None)
outsock.write(data)
outsock.flush()
if not data:
raise NetcatError
except KeyboardInterrupt:
self.verbose = save_verbose
self._print_header('\n======== Connection interrupted! ========')
raise
except (socket.error, NetcatError):
self.verbose = save_verbose
self._print_header('\n======== Connection dropped! ========')
finally:
self.verbose = save_verbose
LINE_ENDING = b'\n'
def recv_line(self, max_size=None, timeout='default', ending=None):
"""
Recieve until the next newline , default "\\n". The newline string can
be changed by changing ``nc.LINE_ENDING``. The newline will be returned
as part of the string.
Aliases: recvline, readline, read_line, readln, recvln
"""
if ending is None:
ending = self.LINE_ENDING
return self.recv_until(ending, max_size, timeout)
read = recv
get = recv
write = send
put = send
sendall = send
send_all = send
read_until = recv_until
readuntil = recv_until
recvuntil = recv_until
read_all = recv_all
readall = recv_all
recvall = recv_all
read_exactly = recv_exactly
readexactly = recv_exactly
recvexactly = recv_exactly
interactive = interact
ineraction = interact
recvline = recv_line
readline = recv_line
read_line = recv_line
readln = recv_line
recvln = recv_line
sendline = send_line
writeline = send_line
write_line = send_line
writeln = send_line
sendln = send_line
|
pythongssapi/python-gssapi
|
gssapi/creds.py
|
Credentials.name
|
python
|
def name(self):
return self.inquire(name=True, lifetime=False,
usage=False, mechs=False).name
|
Get the name associated with these credentials
|
train
|
https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/creds.py#L70-L73
| null |
class Credentials(rcreds.Creds):
"""GSSAPI Credentials
This class represents a set of GSSAPI credentials which may
be used with and/or returned by other GSSAPI methods.
It inherits from the low-level GSSAPI :class:`~gssapi.raw.creds.Creds`
class, and thus may used with both low-level and high-level API methods.
If your implementation of GSSAPI supports the credentials import-export
extension, you may pickle and unpickle this object.
The constructor either acquires or imports a set of GSSAPI
credentials.
If the `base` argument is used, an existing
:class:`~gssapi.raw.creds.Cred` object from the low-level API is
converted into a high-level object.
If the `token` argument is used, the credentials
are imported using the token, if the credentials import-export
extension is supported (:requires-ext:`cred_imp_exp`).
Otherwise, the credentials are acquired as per the
:meth:`acquire` method.
Raises:
BadMechanismError
BadNameTypeError
BadNameError
ExpiredCredentialsError
MissingCredentialsError
"""
__slots__ = ()
def __new__(cls, base=None, token=None, name=None, lifetime=None,
mechs=None, usage='both', store=None):
# TODO(directxman12): this is missing support for password
# (non-RFC method)
if base is not None:
base_creds = base
elif token is not None:
if rcred_imp_exp is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for importing and "
"exporting creditials")
base_creds = rcred_imp_exp.import_cred(token)
else:
res = cls.acquire(name, lifetime, mechs, usage,
store=store)
base_creds = res.creds
return super(Credentials, cls).__new__(cls, base_creds)
@property
@property
def lifetime(self):
"""Get the remaining lifetime of these credentials"""
return self.inquire(name=False, lifetime=True,
usage=False, mechs=False).lifetime
@property
def mechs(self):
"""Get the mechanisms for these credentials"""
return self.inquire(name=False, lifetime=False,
usage=False, mechs=True).mechs
@property
def usage(self):
"""Get the usage (initiate, accept, or both) of these credentials"""
return self.inquire(name=False, lifetime=False,
usage=True, mechs=False).usage
@classmethod
def acquire(cls, name=None, lifetime=None, mechs=None, usage='both',
store=None):
"""Acquire GSSAPI credentials
This method acquires credentials. If the `store` argument is
used, the credentials will be acquired from the given
credential store (if supported). Otherwise, the credentials are
acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Using a non-default store requires support for the credentials store
extension.
Args:
name (Name): the name associated with the credentials,
or None for the default name
lifetime (int): the desired lifetime of the credentials, or None
for indefinite
mechs (list): the desired :class:`MechType` OIDs to be used
with the credentials, or None for the default set
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
AcquireCredResult: the acquired credentials and information about
them
Raises:
BadMechanismError
BadNameTypeError
BadNameError
ExpiredCredentialsError
MissingCredentialsError
"""
if store is None:
res = rcreds.acquire_cred(name, lifetime,
mechs, usage)
else:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores")
store = _encode_dict(store)
res = rcred_cred_store.acquire_cred_from(store, name,
lifetime, mechs,
usage)
return tuples.AcquireCredResult(cls(base=res.creds), res.mechs,
res.lifetime)
def store(self, store=None, usage='both', mech=None,
overwrite=False, set_default=False):
"""Store these credentials into the given store
This method stores the current credentials into the specified
credentials store. If the default store is used, support for
:rfc:`5588` is required. Otherwise, support for the credentials
store extension is required.
:requires-ext:`rfc5588` or :requires-ext:`cred_store`
Args:
store (dict): the store into which to store the credentials,
or None for the default store.
usage (str): the usage to store the credentials with -- either
'both', 'initiate', or 'accept'
mech (OID): the :class:`MechType` to associate with the
stored credentials
overwrite (bool): whether or not to overwrite existing credentials
stored with the same name, etc
set_default (bool): whether or not to set these credentials as
the default credentials for the given store.
Returns:
StoreCredResult: the results of the credential storing operation
Raises:
GSSError
ExpiredCredentialsError
MissingCredentialsError
OperationUnavailableError
DuplicateCredentialsElementError
"""
if store is None:
if rcred_rfc5588 is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for RFC 5588")
return rcred_rfc5588.store_cred(self, usage, mech,
overwrite, set_default)
else:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores directly")
store = _encode_dict(store)
return rcred_cred_store.store_cred_into(store, self, usage, mech,
overwrite, set_default)
def impersonate(self, name=None, lifetime=None, mechs=None,
usage='initiate'):
"""Impersonate a name using the current credentials
This method acquires credentials by impersonating another
name using the current credentials.
:requires-ext:`s4u`
Args:
name (Name): the name to impersonate
lifetime (int): the desired lifetime of the new credentials,
or None for indefinite
mechs (list): the desired :class:`MechType` OIDs for the new
credentials
usage (str): the desired usage for the new credentials -- either
'both', 'initiate', or 'accept'. Note that some mechanisms
may only support 'initiate'.
Returns:
Credentials: the new credentials impersonating the given name
"""
if rcred_s4u is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for S4U")
res = rcred_s4u.acquire_cred_impersonate_name(self, name,
lifetime, mechs,
usage)
return type(self)(base=res.creds)
def inquire(self, name=True, lifetime=True, usage=True, mechs=True):
"""Inspect these credentials for information
This method inspects these credentials for information about them.
Args:
name (bool): get the name associated with the credentials
lifetime (bool): get the remaining lifetime for the credentials
usage (bool): get the usage for the credentials
mechs (bool): get the mechanisms associated with the credentials
Returns:
InquireCredResult: the information about the credentials,
with None used when the corresponding argument was False
Raises:
MissingCredentialsError
InvalidCredentialsError
ExpiredCredentialsError
"""
res = rcreds.inquire_cred(self, name, lifetime, usage, mechs)
if res.name is not None:
res_name = names.Name(res.name)
else:
res_name = None
return tuples.InquireCredResult(res_name, res.lifetime,
res.usage, res.mechs)
def inquire_by_mech(self, mech, name=True, init_lifetime=True,
accept_lifetime=True, usage=True):
"""Inspect these credentials for per-mechanism information
This method inspects these credentials for per-mechanism information
about them.
Args:
mech (OID): the mechanism for which to retrive the information
name (bool): get the name associated with the credentials
init_lifetime (bool): get the remaining initiate lifetime for
the credentials
accept_lifetime (bool): get the remaining accept lifetime for
the credentials
usage (bool): get the usage for the credentials
Returns:
InquireCredByMechResult: the information about the credentials,
with None used when the corresponding argument was False
"""
res = rcreds.inquire_cred_by_mech(self, mech, name, init_lifetime,
accept_lifetime, usage)
if res.name is not None:
res_name = names.Name(res.name)
else:
res_name = None
return tuples.InquireCredByMechResult(res_name,
res.init_lifetime,
res.accept_lifetime,
res.usage)
def add(self, name, mech, usage='both',
init_lifetime=None, accept_lifetime=None, impersonator=None,
store=None):
"""Acquire more credentials to add to the current set
This method works like :meth:`acquire`, except that it adds the
acquired credentials for a single mechanism to a copy of the current
set, instead of creating a new set for multiple mechanisms.
Unlike :meth:`acquire`, you cannot pass None desired name or
mechanism.
If the `impersonator` argument is used, the credentials will
impersonate the given name using the impersonator credentials
(:requires-ext:`s4u`).
If the `store` argument is used, the credentials will be acquired
from the given credential store (:requires-ext:`cred_store`).
Otherwise, the credentials are acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Note that the `store` argument is not compatible with the
`impersonator` argument.
Args:
name (Name): the name associated with the
credentials
mech (OID): the desired :class:`MechType` to be used with the
credentials
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
init_lifetime (int): the desired initiate lifetime of the
credentials, or None for indefinite
accept_lifetime (int): the desired accept lifetime of the
credentials, or None for indefinite
impersonator (Credentials): the credentials to use to impersonate
the given name, or None to not acquire normally
(:requires-ext:`s4u`)
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
Credentials: the credentials set containing the current credentials
and the newly acquired ones.
Raises:
BadMechanismError
BadNameTypeError
BadNameError
DuplicateCredentialsElementError
ExpiredCredentialsError
MissingCredentialsError
"""
if store is not None and impersonator is not None:
raise ValueError('You cannot use both the `impersonator` and '
'`store` arguments at the same time')
if store is not None:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores")
store = _encode_dict(store)
res = rcred_cred_store.add_cred_from(store, self, name, mech,
usage, init_lifetime,
accept_lifetime)
elif impersonator is not None:
if rcred_s4u is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for S4U")
res = rcred_s4u.add_cred_impersonate_name(self, impersonator,
name, mech, usage,
init_lifetime,
accept_lifetime)
else:
res = rcreds.add_cred(self, name, mech, usage, init_lifetime,
accept_lifetime)
return Credentials(res.creds)
def export(self):
"""Export these credentials into a token
This method exports the current credentials to a token that can
then be imported by passing the `token` argument to the constructor.
This is often used to pass credentials between processes.
:requires-ext:`cred_imp_exp`
Returns:
bytes: the exported credentials in token form
"""
if rcred_imp_exp is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for importing and "
"exporting creditials")
return rcred_imp_exp.export_cred(self)
# pickle protocol support
def __reduce__(self):
# the unpickle arguments to new are (base=None, token=self.export())
return (type(self), (None, self.export()))
|
pythongssapi/python-gssapi
|
gssapi/creds.py
|
Credentials.acquire
|
python
|
def acquire(cls, name=None, lifetime=None, mechs=None, usage='both',
store=None):
if store is None:
res = rcreds.acquire_cred(name, lifetime,
mechs, usage)
else:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores")
store = _encode_dict(store)
res = rcred_cred_store.acquire_cred_from(store, name,
lifetime, mechs,
usage)
return tuples.AcquireCredResult(cls(base=res.creds), res.mechs,
res.lifetime)
|
Acquire GSSAPI credentials
This method acquires credentials. If the `store` argument is
used, the credentials will be acquired from the given
credential store (if supported). Otherwise, the credentials are
acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Using a non-default store requires support for the credentials store
extension.
Args:
name (Name): the name associated with the credentials,
or None for the default name
lifetime (int): the desired lifetime of the credentials, or None
for indefinite
mechs (list): the desired :class:`MechType` OIDs to be used
with the credentials, or None for the default set
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
AcquireCredResult: the acquired credentials and information about
them
Raises:
BadMechanismError
BadNameTypeError
BadNameError
ExpiredCredentialsError
MissingCredentialsError
|
train
|
https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/creds.py#L94-L151
| null |
class Credentials(rcreds.Creds):
"""GSSAPI Credentials
This class represents a set of GSSAPI credentials which may
be used with and/or returned by other GSSAPI methods.
It inherits from the low-level GSSAPI :class:`~gssapi.raw.creds.Creds`
class, and thus may used with both low-level and high-level API methods.
If your implementation of GSSAPI supports the credentials import-export
extension, you may pickle and unpickle this object.
The constructor either acquires or imports a set of GSSAPI
credentials.
If the `base` argument is used, an existing
:class:`~gssapi.raw.creds.Cred` object from the low-level API is
converted into a high-level object.
If the `token` argument is used, the credentials
are imported using the token, if the credentials import-export
extension is supported (:requires-ext:`cred_imp_exp`).
Otherwise, the credentials are acquired as per the
:meth:`acquire` method.
Raises:
BadMechanismError
BadNameTypeError
BadNameError
ExpiredCredentialsError
MissingCredentialsError
"""
__slots__ = ()
def __new__(cls, base=None, token=None, name=None, lifetime=None,
mechs=None, usage='both', store=None):
# TODO(directxman12): this is missing support for password
# (non-RFC method)
if base is not None:
base_creds = base
elif token is not None:
if rcred_imp_exp is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for importing and "
"exporting creditials")
base_creds = rcred_imp_exp.import_cred(token)
else:
res = cls.acquire(name, lifetime, mechs, usage,
store=store)
base_creds = res.creds
return super(Credentials, cls).__new__(cls, base_creds)
@property
def name(self):
"""Get the name associated with these credentials"""
return self.inquire(name=True, lifetime=False,
usage=False, mechs=False).name
@property
def lifetime(self):
"""Get the remaining lifetime of these credentials"""
return self.inquire(name=False, lifetime=True,
usage=False, mechs=False).lifetime
@property
def mechs(self):
"""Get the mechanisms for these credentials"""
return self.inquire(name=False, lifetime=False,
usage=False, mechs=True).mechs
@property
def usage(self):
"""Get the usage (initiate, accept, or both) of these credentials"""
return self.inquire(name=False, lifetime=False,
usage=True, mechs=False).usage
@classmethod
def store(self, store=None, usage='both', mech=None,
overwrite=False, set_default=False):
"""Store these credentials into the given store
This method stores the current credentials into the specified
credentials store. If the default store is used, support for
:rfc:`5588` is required. Otherwise, support for the credentials
store extension is required.
:requires-ext:`rfc5588` or :requires-ext:`cred_store`
Args:
store (dict): the store into which to store the credentials,
or None for the default store.
usage (str): the usage to store the credentials with -- either
'both', 'initiate', or 'accept'
mech (OID): the :class:`MechType` to associate with the
stored credentials
overwrite (bool): whether or not to overwrite existing credentials
stored with the same name, etc
set_default (bool): whether or not to set these credentials as
the default credentials for the given store.
Returns:
StoreCredResult: the results of the credential storing operation
Raises:
GSSError
ExpiredCredentialsError
MissingCredentialsError
OperationUnavailableError
DuplicateCredentialsElementError
"""
if store is None:
if rcred_rfc5588 is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for RFC 5588")
return rcred_rfc5588.store_cred(self, usage, mech,
overwrite, set_default)
else:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores directly")
store = _encode_dict(store)
return rcred_cred_store.store_cred_into(store, self, usage, mech,
overwrite, set_default)
def impersonate(self, name=None, lifetime=None, mechs=None,
usage='initiate'):
"""Impersonate a name using the current credentials
This method acquires credentials by impersonating another
name using the current credentials.
:requires-ext:`s4u`
Args:
name (Name): the name to impersonate
lifetime (int): the desired lifetime of the new credentials,
or None for indefinite
mechs (list): the desired :class:`MechType` OIDs for the new
credentials
usage (str): the desired usage for the new credentials -- either
'both', 'initiate', or 'accept'. Note that some mechanisms
may only support 'initiate'.
Returns:
Credentials: the new credentials impersonating the given name
"""
if rcred_s4u is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for S4U")
res = rcred_s4u.acquire_cred_impersonate_name(self, name,
lifetime, mechs,
usage)
return type(self)(base=res.creds)
def inquire(self, name=True, lifetime=True, usage=True, mechs=True):
"""Inspect these credentials for information
This method inspects these credentials for information about them.
Args:
name (bool): get the name associated with the credentials
lifetime (bool): get the remaining lifetime for the credentials
usage (bool): get the usage for the credentials
mechs (bool): get the mechanisms associated with the credentials
Returns:
InquireCredResult: the information about the credentials,
with None used when the corresponding argument was False
Raises:
MissingCredentialsError
InvalidCredentialsError
ExpiredCredentialsError
"""
res = rcreds.inquire_cred(self, name, lifetime, usage, mechs)
if res.name is not None:
res_name = names.Name(res.name)
else:
res_name = None
return tuples.InquireCredResult(res_name, res.lifetime,
res.usage, res.mechs)
def inquire_by_mech(self, mech, name=True, init_lifetime=True,
accept_lifetime=True, usage=True):
"""Inspect these credentials for per-mechanism information
This method inspects these credentials for per-mechanism information
about them.
Args:
mech (OID): the mechanism for which to retrive the information
name (bool): get the name associated with the credentials
init_lifetime (bool): get the remaining initiate lifetime for
the credentials
accept_lifetime (bool): get the remaining accept lifetime for
the credentials
usage (bool): get the usage for the credentials
Returns:
InquireCredByMechResult: the information about the credentials,
with None used when the corresponding argument was False
"""
res = rcreds.inquire_cred_by_mech(self, mech, name, init_lifetime,
accept_lifetime, usage)
if res.name is not None:
res_name = names.Name(res.name)
else:
res_name = None
return tuples.InquireCredByMechResult(res_name,
res.init_lifetime,
res.accept_lifetime,
res.usage)
def add(self, name, mech, usage='both',
init_lifetime=None, accept_lifetime=None, impersonator=None,
store=None):
"""Acquire more credentials to add to the current set
This method works like :meth:`acquire`, except that it adds the
acquired credentials for a single mechanism to a copy of the current
set, instead of creating a new set for multiple mechanisms.
Unlike :meth:`acquire`, you cannot pass None desired name or
mechanism.
If the `impersonator` argument is used, the credentials will
impersonate the given name using the impersonator credentials
(:requires-ext:`s4u`).
If the `store` argument is used, the credentials will be acquired
from the given credential store (:requires-ext:`cred_store`).
Otherwise, the credentials are acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Note that the `store` argument is not compatible with the
`impersonator` argument.
Args:
name (Name): the name associated with the
credentials
mech (OID): the desired :class:`MechType` to be used with the
credentials
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
init_lifetime (int): the desired initiate lifetime of the
credentials, or None for indefinite
accept_lifetime (int): the desired accept lifetime of the
credentials, or None for indefinite
impersonator (Credentials): the credentials to use to impersonate
the given name, or None to not acquire normally
(:requires-ext:`s4u`)
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
Credentials: the credentials set containing the current credentials
and the newly acquired ones.
Raises:
BadMechanismError
BadNameTypeError
BadNameError
DuplicateCredentialsElementError
ExpiredCredentialsError
MissingCredentialsError
"""
if store is not None and impersonator is not None:
raise ValueError('You cannot use both the `impersonator` and '
'`store` arguments at the same time')
if store is not None:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores")
store = _encode_dict(store)
res = rcred_cred_store.add_cred_from(store, self, name, mech,
usage, init_lifetime,
accept_lifetime)
elif impersonator is not None:
if rcred_s4u is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for S4U")
res = rcred_s4u.add_cred_impersonate_name(self, impersonator,
name, mech, usage,
init_lifetime,
accept_lifetime)
else:
res = rcreds.add_cred(self, name, mech, usage, init_lifetime,
accept_lifetime)
return Credentials(res.creds)
def export(self):
"""Export these credentials into a token
This method exports the current credentials to a token that can
then be imported by passing the `token` argument to the constructor.
This is often used to pass credentials between processes.
:requires-ext:`cred_imp_exp`
Returns:
bytes: the exported credentials in token form
"""
if rcred_imp_exp is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for importing and "
"exporting creditials")
return rcred_imp_exp.export_cred(self)
# pickle protocol support
def __reduce__(self):
# the unpickle arguments to new are (base=None, token=self.export())
return (type(self), (None, self.export()))
|
pythongssapi/python-gssapi
|
gssapi/creds.py
|
Credentials.store
|
python
|
def store(self, store=None, usage='both', mech=None,
overwrite=False, set_default=False):
if store is None:
if rcred_rfc5588 is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for RFC 5588")
return rcred_rfc5588.store_cred(self, usage, mech,
overwrite, set_default)
else:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores directly")
store = _encode_dict(store)
return rcred_cred_store.store_cred_into(store, self, usage, mech,
overwrite, set_default)
|
Store these credentials into the given store
This method stores the current credentials into the specified
credentials store. If the default store is used, support for
:rfc:`5588` is required. Otherwise, support for the credentials
store extension is required.
:requires-ext:`rfc5588` or :requires-ext:`cred_store`
Args:
store (dict): the store into which to store the credentials,
or None for the default store.
usage (str): the usage to store the credentials with -- either
'both', 'initiate', or 'accept'
mech (OID): the :class:`MechType` to associate with the
stored credentials
overwrite (bool): whether or not to overwrite existing credentials
stored with the same name, etc
set_default (bool): whether or not to set these credentials as
the default credentials for the given store.
Returns:
StoreCredResult: the results of the credential storing operation
Raises:
GSSError
ExpiredCredentialsError
MissingCredentialsError
OperationUnavailableError
DuplicateCredentialsElementError
|
train
|
https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/creds.py#L153-L203
| null |
class Credentials(rcreds.Creds):
"""GSSAPI Credentials
This class represents a set of GSSAPI credentials which may
be used with and/or returned by other GSSAPI methods.
It inherits from the low-level GSSAPI :class:`~gssapi.raw.creds.Creds`
class, and thus may used with both low-level and high-level API methods.
If your implementation of GSSAPI supports the credentials import-export
extension, you may pickle and unpickle this object.
The constructor either acquires or imports a set of GSSAPI
credentials.
If the `base` argument is used, an existing
:class:`~gssapi.raw.creds.Cred` object from the low-level API is
converted into a high-level object.
If the `token` argument is used, the credentials
are imported using the token, if the credentials import-export
extension is supported (:requires-ext:`cred_imp_exp`).
Otherwise, the credentials are acquired as per the
:meth:`acquire` method.
Raises:
BadMechanismError
BadNameTypeError
BadNameError
ExpiredCredentialsError
MissingCredentialsError
"""
__slots__ = ()
def __new__(cls, base=None, token=None, name=None, lifetime=None,
mechs=None, usage='both', store=None):
# TODO(directxman12): this is missing support for password
# (non-RFC method)
if base is not None:
base_creds = base
elif token is not None:
if rcred_imp_exp is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for importing and "
"exporting creditials")
base_creds = rcred_imp_exp.import_cred(token)
else:
res = cls.acquire(name, lifetime, mechs, usage,
store=store)
base_creds = res.creds
return super(Credentials, cls).__new__(cls, base_creds)
@property
def name(self):
"""Get the name associated with these credentials"""
return self.inquire(name=True, lifetime=False,
usage=False, mechs=False).name
@property
def lifetime(self):
"""Get the remaining lifetime of these credentials"""
return self.inquire(name=False, lifetime=True,
usage=False, mechs=False).lifetime
@property
def mechs(self):
"""Get the mechanisms for these credentials"""
return self.inquire(name=False, lifetime=False,
usage=False, mechs=True).mechs
@property
def usage(self):
"""Get the usage (initiate, accept, or both) of these credentials"""
return self.inquire(name=False, lifetime=False,
usage=True, mechs=False).usage
@classmethod
def acquire(cls, name=None, lifetime=None, mechs=None, usage='both',
store=None):
"""Acquire GSSAPI credentials
This method acquires credentials. If the `store` argument is
used, the credentials will be acquired from the given
credential store (if supported). Otherwise, the credentials are
acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Using a non-default store requires support for the credentials store
extension.
Args:
name (Name): the name associated with the credentials,
or None for the default name
lifetime (int): the desired lifetime of the credentials, or None
for indefinite
mechs (list): the desired :class:`MechType` OIDs to be used
with the credentials, or None for the default set
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
AcquireCredResult: the acquired credentials and information about
them
Raises:
BadMechanismError
BadNameTypeError
BadNameError
ExpiredCredentialsError
MissingCredentialsError
"""
if store is None:
res = rcreds.acquire_cred(name, lifetime,
mechs, usage)
else:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores")
store = _encode_dict(store)
res = rcred_cred_store.acquire_cred_from(store, name,
lifetime, mechs,
usage)
return tuples.AcquireCredResult(cls(base=res.creds), res.mechs,
res.lifetime)
def impersonate(self, name=None, lifetime=None, mechs=None,
usage='initiate'):
"""Impersonate a name using the current credentials
This method acquires credentials by impersonating another
name using the current credentials.
:requires-ext:`s4u`
Args:
name (Name): the name to impersonate
lifetime (int): the desired lifetime of the new credentials,
or None for indefinite
mechs (list): the desired :class:`MechType` OIDs for the new
credentials
usage (str): the desired usage for the new credentials -- either
'both', 'initiate', or 'accept'. Note that some mechanisms
may only support 'initiate'.
Returns:
Credentials: the new credentials impersonating the given name
"""
if rcred_s4u is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for S4U")
res = rcred_s4u.acquire_cred_impersonate_name(self, name,
lifetime, mechs,
usage)
return type(self)(base=res.creds)
def inquire(self, name=True, lifetime=True, usage=True, mechs=True):
"""Inspect these credentials for information
This method inspects these credentials for information about them.
Args:
name (bool): get the name associated with the credentials
lifetime (bool): get the remaining lifetime for the credentials
usage (bool): get the usage for the credentials
mechs (bool): get the mechanisms associated with the credentials
Returns:
InquireCredResult: the information about the credentials,
with None used when the corresponding argument was False
Raises:
MissingCredentialsError
InvalidCredentialsError
ExpiredCredentialsError
"""
res = rcreds.inquire_cred(self, name, lifetime, usage, mechs)
if res.name is not None:
res_name = names.Name(res.name)
else:
res_name = None
return tuples.InquireCredResult(res_name, res.lifetime,
res.usage, res.mechs)
def inquire_by_mech(self, mech, name=True, init_lifetime=True,
accept_lifetime=True, usage=True):
"""Inspect these credentials for per-mechanism information
This method inspects these credentials for per-mechanism information
about them.
Args:
mech (OID): the mechanism for which to retrive the information
name (bool): get the name associated with the credentials
init_lifetime (bool): get the remaining initiate lifetime for
the credentials
accept_lifetime (bool): get the remaining accept lifetime for
the credentials
usage (bool): get the usage for the credentials
Returns:
InquireCredByMechResult: the information about the credentials,
with None used when the corresponding argument was False
"""
res = rcreds.inquire_cred_by_mech(self, mech, name, init_lifetime,
accept_lifetime, usage)
if res.name is not None:
res_name = names.Name(res.name)
else:
res_name = None
return tuples.InquireCredByMechResult(res_name,
res.init_lifetime,
res.accept_lifetime,
res.usage)
def add(self, name, mech, usage='both',
init_lifetime=None, accept_lifetime=None, impersonator=None,
store=None):
"""Acquire more credentials to add to the current set
This method works like :meth:`acquire`, except that it adds the
acquired credentials for a single mechanism to a copy of the current
set, instead of creating a new set for multiple mechanisms.
Unlike :meth:`acquire`, you cannot pass None desired name or
mechanism.
If the `impersonator` argument is used, the credentials will
impersonate the given name using the impersonator credentials
(:requires-ext:`s4u`).
If the `store` argument is used, the credentials will be acquired
from the given credential store (:requires-ext:`cred_store`).
Otherwise, the credentials are acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Note that the `store` argument is not compatible with the
`impersonator` argument.
Args:
name (Name): the name associated with the
credentials
mech (OID): the desired :class:`MechType` to be used with the
credentials
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
init_lifetime (int): the desired initiate lifetime of the
credentials, or None for indefinite
accept_lifetime (int): the desired accept lifetime of the
credentials, or None for indefinite
impersonator (Credentials): the credentials to use to impersonate
the given name, or None to not acquire normally
(:requires-ext:`s4u`)
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
Credentials: the credentials set containing the current credentials
and the newly acquired ones.
Raises:
BadMechanismError
BadNameTypeError
BadNameError
DuplicateCredentialsElementError
ExpiredCredentialsError
MissingCredentialsError
"""
if store is not None and impersonator is not None:
raise ValueError('You cannot use both the `impersonator` and '
'`store` arguments at the same time')
if store is not None:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores")
store = _encode_dict(store)
res = rcred_cred_store.add_cred_from(store, self, name, mech,
usage, init_lifetime,
accept_lifetime)
elif impersonator is not None:
if rcred_s4u is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for S4U")
res = rcred_s4u.add_cred_impersonate_name(self, impersonator,
name, mech, usage,
init_lifetime,
accept_lifetime)
else:
res = rcreds.add_cred(self, name, mech, usage, init_lifetime,
accept_lifetime)
return Credentials(res.creds)
def export(self):
"""Export these credentials into a token
This method exports the current credentials to a token that can
then be imported by passing the `token` argument to the constructor.
This is often used to pass credentials between processes.
:requires-ext:`cred_imp_exp`
Returns:
bytes: the exported credentials in token form
"""
if rcred_imp_exp is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for importing and "
"exporting creditials")
return rcred_imp_exp.export_cred(self)
# pickle protocol support
def __reduce__(self):
# the unpickle arguments to new are (base=None, token=self.export())
return (type(self), (None, self.export()))
|
pythongssapi/python-gssapi
|
gssapi/creds.py
|
Credentials.impersonate
|
python
|
def impersonate(self, name=None, lifetime=None, mechs=None,
usage='initiate'):
if rcred_s4u is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for S4U")
res = rcred_s4u.acquire_cred_impersonate_name(self, name,
lifetime, mechs,
usage)
return type(self)(base=res.creds)
|
Impersonate a name using the current credentials
This method acquires credentials by impersonating another
name using the current credentials.
:requires-ext:`s4u`
Args:
name (Name): the name to impersonate
lifetime (int): the desired lifetime of the new credentials,
or None for indefinite
mechs (list): the desired :class:`MechType` OIDs for the new
credentials
usage (str): the desired usage for the new credentials -- either
'both', 'initiate', or 'accept'. Note that some mechanisms
may only support 'initiate'.
Returns:
Credentials: the new credentials impersonating the given name
|
train
|
https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/creds.py#L205-L236
| null |
class Credentials(rcreds.Creds):
"""GSSAPI Credentials
This class represents a set of GSSAPI credentials which may
be used with and/or returned by other GSSAPI methods.
It inherits from the low-level GSSAPI :class:`~gssapi.raw.creds.Creds`
class, and thus may used with both low-level and high-level API methods.
If your implementation of GSSAPI supports the credentials import-export
extension, you may pickle and unpickle this object.
The constructor either acquires or imports a set of GSSAPI
credentials.
If the `base` argument is used, an existing
:class:`~gssapi.raw.creds.Cred` object from the low-level API is
converted into a high-level object.
If the `token` argument is used, the credentials
are imported using the token, if the credentials import-export
extension is supported (:requires-ext:`cred_imp_exp`).
Otherwise, the credentials are acquired as per the
:meth:`acquire` method.
Raises:
BadMechanismError
BadNameTypeError
BadNameError
ExpiredCredentialsError
MissingCredentialsError
"""
__slots__ = ()
def __new__(cls, base=None, token=None, name=None, lifetime=None,
mechs=None, usage='both', store=None):
# TODO(directxman12): this is missing support for password
# (non-RFC method)
if base is not None:
base_creds = base
elif token is not None:
if rcred_imp_exp is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for importing and "
"exporting creditials")
base_creds = rcred_imp_exp.import_cred(token)
else:
res = cls.acquire(name, lifetime, mechs, usage,
store=store)
base_creds = res.creds
return super(Credentials, cls).__new__(cls, base_creds)
@property
def name(self):
"""Get the name associated with these credentials"""
return self.inquire(name=True, lifetime=False,
usage=False, mechs=False).name
@property
def lifetime(self):
"""Get the remaining lifetime of these credentials"""
return self.inquire(name=False, lifetime=True,
usage=False, mechs=False).lifetime
@property
def mechs(self):
"""Get the mechanisms for these credentials"""
return self.inquire(name=False, lifetime=False,
usage=False, mechs=True).mechs
@property
def usage(self):
"""Get the usage (initiate, accept, or both) of these credentials"""
return self.inquire(name=False, lifetime=False,
usage=True, mechs=False).usage
@classmethod
def acquire(cls, name=None, lifetime=None, mechs=None, usage='both',
store=None):
"""Acquire GSSAPI credentials
This method acquires credentials. If the `store` argument is
used, the credentials will be acquired from the given
credential store (if supported). Otherwise, the credentials are
acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Using a non-default store requires support for the credentials store
extension.
Args:
name (Name): the name associated with the credentials,
or None for the default name
lifetime (int): the desired lifetime of the credentials, or None
for indefinite
mechs (list): the desired :class:`MechType` OIDs to be used
with the credentials, or None for the default set
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
AcquireCredResult: the acquired credentials and information about
them
Raises:
BadMechanismError
BadNameTypeError
BadNameError
ExpiredCredentialsError
MissingCredentialsError
"""
if store is None:
res = rcreds.acquire_cred(name, lifetime,
mechs, usage)
else:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores")
store = _encode_dict(store)
res = rcred_cred_store.acquire_cred_from(store, name,
lifetime, mechs,
usage)
return tuples.AcquireCredResult(cls(base=res.creds), res.mechs,
res.lifetime)
def store(self, store=None, usage='both', mech=None,
overwrite=False, set_default=False):
"""Store these credentials into the given store
This method stores the current credentials into the specified
credentials store. If the default store is used, support for
:rfc:`5588` is required. Otherwise, support for the credentials
store extension is required.
:requires-ext:`rfc5588` or :requires-ext:`cred_store`
Args:
store (dict): the store into which to store the credentials,
or None for the default store.
usage (str): the usage to store the credentials with -- either
'both', 'initiate', or 'accept'
mech (OID): the :class:`MechType` to associate with the
stored credentials
overwrite (bool): whether or not to overwrite existing credentials
stored with the same name, etc
set_default (bool): whether or not to set these credentials as
the default credentials for the given store.
Returns:
StoreCredResult: the results of the credential storing operation
Raises:
GSSError
ExpiredCredentialsError
MissingCredentialsError
OperationUnavailableError
DuplicateCredentialsElementError
"""
if store is None:
if rcred_rfc5588 is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for RFC 5588")
return rcred_rfc5588.store_cred(self, usage, mech,
overwrite, set_default)
else:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores directly")
store = _encode_dict(store)
return rcred_cred_store.store_cred_into(store, self, usage, mech,
overwrite, set_default)
def inquire(self, name=True, lifetime=True, usage=True, mechs=True):
"""Inspect these credentials for information
This method inspects these credentials for information about them.
Args:
name (bool): get the name associated with the credentials
lifetime (bool): get the remaining lifetime for the credentials
usage (bool): get the usage for the credentials
mechs (bool): get the mechanisms associated with the credentials
Returns:
InquireCredResult: the information about the credentials,
with None used when the corresponding argument was False
Raises:
MissingCredentialsError
InvalidCredentialsError
ExpiredCredentialsError
"""
res = rcreds.inquire_cred(self, name, lifetime, usage, mechs)
if res.name is not None:
res_name = names.Name(res.name)
else:
res_name = None
return tuples.InquireCredResult(res_name, res.lifetime,
res.usage, res.mechs)
def inquire_by_mech(self, mech, name=True, init_lifetime=True,
accept_lifetime=True, usage=True):
"""Inspect these credentials for per-mechanism information
This method inspects these credentials for per-mechanism information
about them.
Args:
mech (OID): the mechanism for which to retrive the information
name (bool): get the name associated with the credentials
init_lifetime (bool): get the remaining initiate lifetime for
the credentials
accept_lifetime (bool): get the remaining accept lifetime for
the credentials
usage (bool): get the usage for the credentials
Returns:
InquireCredByMechResult: the information about the credentials,
with None used when the corresponding argument was False
"""
res = rcreds.inquire_cred_by_mech(self, mech, name, init_lifetime,
accept_lifetime, usage)
if res.name is not None:
res_name = names.Name(res.name)
else:
res_name = None
return tuples.InquireCredByMechResult(res_name,
res.init_lifetime,
res.accept_lifetime,
res.usage)
def add(self, name, mech, usage='both',
init_lifetime=None, accept_lifetime=None, impersonator=None,
store=None):
"""Acquire more credentials to add to the current set
This method works like :meth:`acquire`, except that it adds the
acquired credentials for a single mechanism to a copy of the current
set, instead of creating a new set for multiple mechanisms.
Unlike :meth:`acquire`, you cannot pass None desired name or
mechanism.
If the `impersonator` argument is used, the credentials will
impersonate the given name using the impersonator credentials
(:requires-ext:`s4u`).
If the `store` argument is used, the credentials will be acquired
from the given credential store (:requires-ext:`cred_store`).
Otherwise, the credentials are acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Note that the `store` argument is not compatible with the
`impersonator` argument.
Args:
name (Name): the name associated with the
credentials
mech (OID): the desired :class:`MechType` to be used with the
credentials
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
init_lifetime (int): the desired initiate lifetime of the
credentials, or None for indefinite
accept_lifetime (int): the desired accept lifetime of the
credentials, or None for indefinite
impersonator (Credentials): the credentials to use to impersonate
the given name, or None to not acquire normally
(:requires-ext:`s4u`)
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
Credentials: the credentials set containing the current credentials
and the newly acquired ones.
Raises:
BadMechanismError
BadNameTypeError
BadNameError
DuplicateCredentialsElementError
ExpiredCredentialsError
MissingCredentialsError
"""
if store is not None and impersonator is not None:
raise ValueError('You cannot use both the `impersonator` and '
'`store` arguments at the same time')
if store is not None:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores")
store = _encode_dict(store)
res = rcred_cred_store.add_cred_from(store, self, name, mech,
usage, init_lifetime,
accept_lifetime)
elif impersonator is not None:
if rcred_s4u is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for S4U")
res = rcred_s4u.add_cred_impersonate_name(self, impersonator,
name, mech, usage,
init_lifetime,
accept_lifetime)
else:
res = rcreds.add_cred(self, name, mech, usage, init_lifetime,
accept_lifetime)
return Credentials(res.creds)
def export(self):
"""Export these credentials into a token
This method exports the current credentials to a token that can
then be imported by passing the `token` argument to the constructor.
This is often used to pass credentials between processes.
:requires-ext:`cred_imp_exp`
Returns:
bytes: the exported credentials in token form
"""
if rcred_imp_exp is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for importing and "
"exporting creditials")
return rcred_imp_exp.export_cred(self)
# pickle protocol support
def __reduce__(self):
# the unpickle arguments to new are (base=None, token=self.export())
return (type(self), (None, self.export()))
|
pythongssapi/python-gssapi
|
gssapi/creds.py
|
Credentials.inquire
|
python
|
def inquire(self, name=True, lifetime=True, usage=True, mechs=True):
res = rcreds.inquire_cred(self, name, lifetime, usage, mechs)
if res.name is not None:
res_name = names.Name(res.name)
else:
res_name = None
return tuples.InquireCredResult(res_name, res.lifetime,
res.usage, res.mechs)
|
Inspect these credentials for information
This method inspects these credentials for information about them.
Args:
name (bool): get the name associated with the credentials
lifetime (bool): get the remaining lifetime for the credentials
usage (bool): get the usage for the credentials
mechs (bool): get the mechanisms associated with the credentials
Returns:
InquireCredResult: the information about the credentials,
with None used when the corresponding argument was False
Raises:
MissingCredentialsError
InvalidCredentialsError
ExpiredCredentialsError
|
train
|
https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/creds.py#L238-L267
| null |
class Credentials(rcreds.Creds):
"""GSSAPI Credentials
This class represents a set of GSSAPI credentials which may
be used with and/or returned by other GSSAPI methods.
It inherits from the low-level GSSAPI :class:`~gssapi.raw.creds.Creds`
class, and thus may used with both low-level and high-level API methods.
If your implementation of GSSAPI supports the credentials import-export
extension, you may pickle and unpickle this object.
The constructor either acquires or imports a set of GSSAPI
credentials.
If the `base` argument is used, an existing
:class:`~gssapi.raw.creds.Cred` object from the low-level API is
converted into a high-level object.
If the `token` argument is used, the credentials
are imported using the token, if the credentials import-export
extension is supported (:requires-ext:`cred_imp_exp`).
Otherwise, the credentials are acquired as per the
:meth:`acquire` method.
Raises:
BadMechanismError
BadNameTypeError
BadNameError
ExpiredCredentialsError
MissingCredentialsError
"""
__slots__ = ()
def __new__(cls, base=None, token=None, name=None, lifetime=None,
mechs=None, usage='both', store=None):
# TODO(directxman12): this is missing support for password
# (non-RFC method)
if base is not None:
base_creds = base
elif token is not None:
if rcred_imp_exp is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for importing and "
"exporting creditials")
base_creds = rcred_imp_exp.import_cred(token)
else:
res = cls.acquire(name, lifetime, mechs, usage,
store=store)
base_creds = res.creds
return super(Credentials, cls).__new__(cls, base_creds)
@property
def name(self):
"""Get the name associated with these credentials"""
return self.inquire(name=True, lifetime=False,
usage=False, mechs=False).name
@property
def lifetime(self):
"""Get the remaining lifetime of these credentials"""
return self.inquire(name=False, lifetime=True,
usage=False, mechs=False).lifetime
@property
def mechs(self):
"""Get the mechanisms for these credentials"""
return self.inquire(name=False, lifetime=False,
usage=False, mechs=True).mechs
@property
def usage(self):
"""Get the usage (initiate, accept, or both) of these credentials"""
return self.inquire(name=False, lifetime=False,
usage=True, mechs=False).usage
@classmethod
def acquire(cls, name=None, lifetime=None, mechs=None, usage='both',
store=None):
"""Acquire GSSAPI credentials
This method acquires credentials. If the `store` argument is
used, the credentials will be acquired from the given
credential store (if supported). Otherwise, the credentials are
acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Using a non-default store requires support for the credentials store
extension.
Args:
name (Name): the name associated with the credentials,
or None for the default name
lifetime (int): the desired lifetime of the credentials, or None
for indefinite
mechs (list): the desired :class:`MechType` OIDs to be used
with the credentials, or None for the default set
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
AcquireCredResult: the acquired credentials and information about
them
Raises:
BadMechanismError
BadNameTypeError
BadNameError
ExpiredCredentialsError
MissingCredentialsError
"""
if store is None:
res = rcreds.acquire_cred(name, lifetime,
mechs, usage)
else:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores")
store = _encode_dict(store)
res = rcred_cred_store.acquire_cred_from(store, name,
lifetime, mechs,
usage)
return tuples.AcquireCredResult(cls(base=res.creds), res.mechs,
res.lifetime)
def store(self, store=None, usage='both', mech=None,
overwrite=False, set_default=False):
"""Store these credentials into the given store
This method stores the current credentials into the specified
credentials store. If the default store is used, support for
:rfc:`5588` is required. Otherwise, support for the credentials
store extension is required.
:requires-ext:`rfc5588` or :requires-ext:`cred_store`
Args:
store (dict): the store into which to store the credentials,
or None for the default store.
usage (str): the usage to store the credentials with -- either
'both', 'initiate', or 'accept'
mech (OID): the :class:`MechType` to associate with the
stored credentials
overwrite (bool): whether or not to overwrite existing credentials
stored with the same name, etc
set_default (bool): whether or not to set these credentials as
the default credentials for the given store.
Returns:
StoreCredResult: the results of the credential storing operation
Raises:
GSSError
ExpiredCredentialsError
MissingCredentialsError
OperationUnavailableError
DuplicateCredentialsElementError
"""
if store is None:
if rcred_rfc5588 is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for RFC 5588")
return rcred_rfc5588.store_cred(self, usage, mech,
overwrite, set_default)
else:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores directly")
store = _encode_dict(store)
return rcred_cred_store.store_cred_into(store, self, usage, mech,
overwrite, set_default)
def impersonate(self, name=None, lifetime=None, mechs=None,
usage='initiate'):
"""Impersonate a name using the current credentials
This method acquires credentials by impersonating another
name using the current credentials.
:requires-ext:`s4u`
Args:
name (Name): the name to impersonate
lifetime (int): the desired lifetime of the new credentials,
or None for indefinite
mechs (list): the desired :class:`MechType` OIDs for the new
credentials
usage (str): the desired usage for the new credentials -- either
'both', 'initiate', or 'accept'. Note that some mechanisms
may only support 'initiate'.
Returns:
Credentials: the new credentials impersonating the given name
"""
if rcred_s4u is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for S4U")
res = rcred_s4u.acquire_cred_impersonate_name(self, name,
lifetime, mechs,
usage)
return type(self)(base=res.creds)
def inquire_by_mech(self, mech, name=True, init_lifetime=True,
accept_lifetime=True, usage=True):
"""Inspect these credentials for per-mechanism information
This method inspects these credentials for per-mechanism information
about them.
Args:
mech (OID): the mechanism for which to retrive the information
name (bool): get the name associated with the credentials
init_lifetime (bool): get the remaining initiate lifetime for
the credentials
accept_lifetime (bool): get the remaining accept lifetime for
the credentials
usage (bool): get the usage for the credentials
Returns:
InquireCredByMechResult: the information about the credentials,
with None used when the corresponding argument was False
"""
res = rcreds.inquire_cred_by_mech(self, mech, name, init_lifetime,
accept_lifetime, usage)
if res.name is not None:
res_name = names.Name(res.name)
else:
res_name = None
return tuples.InquireCredByMechResult(res_name,
res.init_lifetime,
res.accept_lifetime,
res.usage)
def add(self, name, mech, usage='both',
init_lifetime=None, accept_lifetime=None, impersonator=None,
store=None):
"""Acquire more credentials to add to the current set
This method works like :meth:`acquire`, except that it adds the
acquired credentials for a single mechanism to a copy of the current
set, instead of creating a new set for multiple mechanisms.
Unlike :meth:`acquire`, you cannot pass None desired name or
mechanism.
If the `impersonator` argument is used, the credentials will
impersonate the given name using the impersonator credentials
(:requires-ext:`s4u`).
If the `store` argument is used, the credentials will be acquired
from the given credential store (:requires-ext:`cred_store`).
Otherwise, the credentials are acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Note that the `store` argument is not compatible with the
`impersonator` argument.
Args:
name (Name): the name associated with the
credentials
mech (OID): the desired :class:`MechType` to be used with the
credentials
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
init_lifetime (int): the desired initiate lifetime of the
credentials, or None for indefinite
accept_lifetime (int): the desired accept lifetime of the
credentials, or None for indefinite
impersonator (Credentials): the credentials to use to impersonate
the given name, or None to not acquire normally
(:requires-ext:`s4u`)
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
Credentials: the credentials set containing the current credentials
and the newly acquired ones.
Raises:
BadMechanismError
BadNameTypeError
BadNameError
DuplicateCredentialsElementError
ExpiredCredentialsError
MissingCredentialsError
"""
if store is not None and impersonator is not None:
raise ValueError('You cannot use both the `impersonator` and '
'`store` arguments at the same time')
if store is not None:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores")
store = _encode_dict(store)
res = rcred_cred_store.add_cred_from(store, self, name, mech,
usage, init_lifetime,
accept_lifetime)
elif impersonator is not None:
if rcred_s4u is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for S4U")
res = rcred_s4u.add_cred_impersonate_name(self, impersonator,
name, mech, usage,
init_lifetime,
accept_lifetime)
else:
res = rcreds.add_cred(self, name, mech, usage, init_lifetime,
accept_lifetime)
return Credentials(res.creds)
def export(self):
"""Export these credentials into a token
This method exports the current credentials to a token that can
then be imported by passing the `token` argument to the constructor.
This is often used to pass credentials between processes.
:requires-ext:`cred_imp_exp`
Returns:
bytes: the exported credentials in token form
"""
if rcred_imp_exp is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for importing and "
"exporting creditials")
return rcred_imp_exp.export_cred(self)
# pickle protocol support
def __reduce__(self):
# the unpickle arguments to new are (base=None, token=self.export())
return (type(self), (None, self.export()))
|
pythongssapi/python-gssapi
|
gssapi/creds.py
|
Credentials.inquire_by_mech
|
python
|
def inquire_by_mech(self, mech, name=True, init_lifetime=True,
accept_lifetime=True, usage=True):
res = rcreds.inquire_cred_by_mech(self, mech, name, init_lifetime,
accept_lifetime, usage)
if res.name is not None:
res_name = names.Name(res.name)
else:
res_name = None
return tuples.InquireCredByMechResult(res_name,
res.init_lifetime,
res.accept_lifetime,
res.usage)
|
Inspect these credentials for per-mechanism information
This method inspects these credentials for per-mechanism information
about them.
Args:
mech (OID): the mechanism for which to retrive the information
name (bool): get the name associated with the credentials
init_lifetime (bool): get the remaining initiate lifetime for
the credentials
accept_lifetime (bool): get the remaining accept lifetime for
the credentials
usage (bool): get the usage for the credentials
Returns:
InquireCredByMechResult: the information about the credentials,
with None used when the corresponding argument was False
|
train
|
https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/creds.py#L269-L301
| null |
class Credentials(rcreds.Creds):
"""GSSAPI Credentials
This class represents a set of GSSAPI credentials which may
be used with and/or returned by other GSSAPI methods.
It inherits from the low-level GSSAPI :class:`~gssapi.raw.creds.Creds`
class, and thus may used with both low-level and high-level API methods.
If your implementation of GSSAPI supports the credentials import-export
extension, you may pickle and unpickle this object.
The constructor either acquires or imports a set of GSSAPI
credentials.
If the `base` argument is used, an existing
:class:`~gssapi.raw.creds.Cred` object from the low-level API is
converted into a high-level object.
If the `token` argument is used, the credentials
are imported using the token, if the credentials import-export
extension is supported (:requires-ext:`cred_imp_exp`).
Otherwise, the credentials are acquired as per the
:meth:`acquire` method.
Raises:
BadMechanismError
BadNameTypeError
BadNameError
ExpiredCredentialsError
MissingCredentialsError
"""
__slots__ = ()
def __new__(cls, base=None, token=None, name=None, lifetime=None,
mechs=None, usage='both', store=None):
# TODO(directxman12): this is missing support for password
# (non-RFC method)
if base is not None:
base_creds = base
elif token is not None:
if rcred_imp_exp is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for importing and "
"exporting creditials")
base_creds = rcred_imp_exp.import_cred(token)
else:
res = cls.acquire(name, lifetime, mechs, usage,
store=store)
base_creds = res.creds
return super(Credentials, cls).__new__(cls, base_creds)
@property
def name(self):
"""Get the name associated with these credentials"""
return self.inquire(name=True, lifetime=False,
usage=False, mechs=False).name
@property
def lifetime(self):
"""Get the remaining lifetime of these credentials"""
return self.inquire(name=False, lifetime=True,
usage=False, mechs=False).lifetime
@property
def mechs(self):
"""Get the mechanisms for these credentials"""
return self.inquire(name=False, lifetime=False,
usage=False, mechs=True).mechs
@property
def usage(self):
"""Get the usage (initiate, accept, or both) of these credentials"""
return self.inquire(name=False, lifetime=False,
usage=True, mechs=False).usage
@classmethod
def acquire(cls, name=None, lifetime=None, mechs=None, usage='both',
store=None):
"""Acquire GSSAPI credentials
This method acquires credentials. If the `store` argument is
used, the credentials will be acquired from the given
credential store (if supported). Otherwise, the credentials are
acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Using a non-default store requires support for the credentials store
extension.
Args:
name (Name): the name associated with the credentials,
or None for the default name
lifetime (int): the desired lifetime of the credentials, or None
for indefinite
mechs (list): the desired :class:`MechType` OIDs to be used
with the credentials, or None for the default set
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
AcquireCredResult: the acquired credentials and information about
them
Raises:
BadMechanismError
BadNameTypeError
BadNameError
ExpiredCredentialsError
MissingCredentialsError
"""
if store is None:
res = rcreds.acquire_cred(name, lifetime,
mechs, usage)
else:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores")
store = _encode_dict(store)
res = rcred_cred_store.acquire_cred_from(store, name,
lifetime, mechs,
usage)
return tuples.AcquireCredResult(cls(base=res.creds), res.mechs,
res.lifetime)
def store(self, store=None, usage='both', mech=None,
overwrite=False, set_default=False):
"""Store these credentials into the given store
This method stores the current credentials into the specified
credentials store. If the default store is used, support for
:rfc:`5588` is required. Otherwise, support for the credentials
store extension is required.
:requires-ext:`rfc5588` or :requires-ext:`cred_store`
Args:
store (dict): the store into which to store the credentials,
or None for the default store.
usage (str): the usage to store the credentials with -- either
'both', 'initiate', or 'accept'
mech (OID): the :class:`MechType` to associate with the
stored credentials
overwrite (bool): whether or not to overwrite existing credentials
stored with the same name, etc
set_default (bool): whether or not to set these credentials as
the default credentials for the given store.
Returns:
StoreCredResult: the results of the credential storing operation
Raises:
GSSError
ExpiredCredentialsError
MissingCredentialsError
OperationUnavailableError
DuplicateCredentialsElementError
"""
if store is None:
if rcred_rfc5588 is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for RFC 5588")
return rcred_rfc5588.store_cred(self, usage, mech,
overwrite, set_default)
else:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores directly")
store = _encode_dict(store)
return rcred_cred_store.store_cred_into(store, self, usage, mech,
overwrite, set_default)
def impersonate(self, name=None, lifetime=None, mechs=None,
usage='initiate'):
"""Impersonate a name using the current credentials
This method acquires credentials by impersonating another
name using the current credentials.
:requires-ext:`s4u`
Args:
name (Name): the name to impersonate
lifetime (int): the desired lifetime of the new credentials,
or None for indefinite
mechs (list): the desired :class:`MechType` OIDs for the new
credentials
usage (str): the desired usage for the new credentials -- either
'both', 'initiate', or 'accept'. Note that some mechanisms
may only support 'initiate'.
Returns:
Credentials: the new credentials impersonating the given name
"""
if rcred_s4u is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for S4U")
res = rcred_s4u.acquire_cred_impersonate_name(self, name,
lifetime, mechs,
usage)
return type(self)(base=res.creds)
def inquire(self, name=True, lifetime=True, usage=True, mechs=True):
"""Inspect these credentials for information
This method inspects these credentials for information about them.
Args:
name (bool): get the name associated with the credentials
lifetime (bool): get the remaining lifetime for the credentials
usage (bool): get the usage for the credentials
mechs (bool): get the mechanisms associated with the credentials
Returns:
InquireCredResult: the information about the credentials,
with None used when the corresponding argument was False
Raises:
MissingCredentialsError
InvalidCredentialsError
ExpiredCredentialsError
"""
res = rcreds.inquire_cred(self, name, lifetime, usage, mechs)
if res.name is not None:
res_name = names.Name(res.name)
else:
res_name = None
return tuples.InquireCredResult(res_name, res.lifetime,
res.usage, res.mechs)
def add(self, name, mech, usage='both',
init_lifetime=None, accept_lifetime=None, impersonator=None,
store=None):
"""Acquire more credentials to add to the current set
This method works like :meth:`acquire`, except that it adds the
acquired credentials for a single mechanism to a copy of the current
set, instead of creating a new set for multiple mechanisms.
Unlike :meth:`acquire`, you cannot pass None desired name or
mechanism.
If the `impersonator` argument is used, the credentials will
impersonate the given name using the impersonator credentials
(:requires-ext:`s4u`).
If the `store` argument is used, the credentials will be acquired
from the given credential store (:requires-ext:`cred_store`).
Otherwise, the credentials are acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Note that the `store` argument is not compatible with the
`impersonator` argument.
Args:
name (Name): the name associated with the
credentials
mech (OID): the desired :class:`MechType` to be used with the
credentials
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
init_lifetime (int): the desired initiate lifetime of the
credentials, or None for indefinite
accept_lifetime (int): the desired accept lifetime of the
credentials, or None for indefinite
impersonator (Credentials): the credentials to use to impersonate
the given name, or None to not acquire normally
(:requires-ext:`s4u`)
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
Credentials: the credentials set containing the current credentials
and the newly acquired ones.
Raises:
BadMechanismError
BadNameTypeError
BadNameError
DuplicateCredentialsElementError
ExpiredCredentialsError
MissingCredentialsError
"""
if store is not None and impersonator is not None:
raise ValueError('You cannot use both the `impersonator` and '
'`store` arguments at the same time')
if store is not None:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores")
store = _encode_dict(store)
res = rcred_cred_store.add_cred_from(store, self, name, mech,
usage, init_lifetime,
accept_lifetime)
elif impersonator is not None:
if rcred_s4u is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for S4U")
res = rcred_s4u.add_cred_impersonate_name(self, impersonator,
name, mech, usage,
init_lifetime,
accept_lifetime)
else:
res = rcreds.add_cred(self, name, mech, usage, init_lifetime,
accept_lifetime)
return Credentials(res.creds)
def export(self):
"""Export these credentials into a token
This method exports the current credentials to a token that can
then be imported by passing the `token` argument to the constructor.
This is often used to pass credentials between processes.
:requires-ext:`cred_imp_exp`
Returns:
bytes: the exported credentials in token form
"""
if rcred_imp_exp is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for importing and "
"exporting creditials")
return rcred_imp_exp.export_cred(self)
# pickle protocol support
def __reduce__(self):
# the unpickle arguments to new are (base=None, token=self.export())
return (type(self), (None, self.export()))
|
pythongssapi/python-gssapi
|
gssapi/creds.py
|
Credentials.add
|
python
|
def add(self, name, mech, usage='both',
init_lifetime=None, accept_lifetime=None, impersonator=None,
store=None):
if store is not None and impersonator is not None:
raise ValueError('You cannot use both the `impersonator` and '
'`store` arguments at the same time')
if store is not None:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores")
store = _encode_dict(store)
res = rcred_cred_store.add_cred_from(store, self, name, mech,
usage, init_lifetime,
accept_lifetime)
elif impersonator is not None:
if rcred_s4u is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for S4U")
res = rcred_s4u.add_cred_impersonate_name(self, impersonator,
name, mech, usage,
init_lifetime,
accept_lifetime)
else:
res = rcreds.add_cred(self, name, mech, usage, init_lifetime,
accept_lifetime)
return Credentials(res.creds)
|
Acquire more credentials to add to the current set
This method works like :meth:`acquire`, except that it adds the
acquired credentials for a single mechanism to a copy of the current
set, instead of creating a new set for multiple mechanisms.
Unlike :meth:`acquire`, you cannot pass None desired name or
mechanism.
If the `impersonator` argument is used, the credentials will
impersonate the given name using the impersonator credentials
(:requires-ext:`s4u`).
If the `store` argument is used, the credentials will be acquired
from the given credential store (:requires-ext:`cred_store`).
Otherwise, the credentials are acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Note that the `store` argument is not compatible with the
`impersonator` argument.
Args:
name (Name): the name associated with the
credentials
mech (OID): the desired :class:`MechType` to be used with the
credentials
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
init_lifetime (int): the desired initiate lifetime of the
credentials, or None for indefinite
accept_lifetime (int): the desired accept lifetime of the
credentials, or None for indefinite
impersonator (Credentials): the credentials to use to impersonate
the given name, or None to not acquire normally
(:requires-ext:`s4u`)
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
Credentials: the credentials set containing the current credentials
and the newly acquired ones.
Raises:
BadMechanismError
BadNameTypeError
BadNameError
DuplicateCredentialsElementError
ExpiredCredentialsError
MissingCredentialsError
|
train
|
https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/creds.py#L303-L386
| null |
class Credentials(rcreds.Creds):
"""GSSAPI Credentials
This class represents a set of GSSAPI credentials which may
be used with and/or returned by other GSSAPI methods.
It inherits from the low-level GSSAPI :class:`~gssapi.raw.creds.Creds`
class, and thus may used with both low-level and high-level API methods.
If your implementation of GSSAPI supports the credentials import-export
extension, you may pickle and unpickle this object.
The constructor either acquires or imports a set of GSSAPI
credentials.
If the `base` argument is used, an existing
:class:`~gssapi.raw.creds.Cred` object from the low-level API is
converted into a high-level object.
If the `token` argument is used, the credentials
are imported using the token, if the credentials import-export
extension is supported (:requires-ext:`cred_imp_exp`).
Otherwise, the credentials are acquired as per the
:meth:`acquire` method.
Raises:
BadMechanismError
BadNameTypeError
BadNameError
ExpiredCredentialsError
MissingCredentialsError
"""
__slots__ = ()
def __new__(cls, base=None, token=None, name=None, lifetime=None,
mechs=None, usage='both', store=None):
# TODO(directxman12): this is missing support for password
# (non-RFC method)
if base is not None:
base_creds = base
elif token is not None:
if rcred_imp_exp is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for importing and "
"exporting creditials")
base_creds = rcred_imp_exp.import_cred(token)
else:
res = cls.acquire(name, lifetime, mechs, usage,
store=store)
base_creds = res.creds
return super(Credentials, cls).__new__(cls, base_creds)
@property
def name(self):
"""Get the name associated with these credentials"""
return self.inquire(name=True, lifetime=False,
usage=False, mechs=False).name
@property
def lifetime(self):
"""Get the remaining lifetime of these credentials"""
return self.inquire(name=False, lifetime=True,
usage=False, mechs=False).lifetime
@property
def mechs(self):
"""Get the mechanisms for these credentials"""
return self.inquire(name=False, lifetime=False,
usage=False, mechs=True).mechs
@property
def usage(self):
"""Get the usage (initiate, accept, or both) of these credentials"""
return self.inquire(name=False, lifetime=False,
usage=True, mechs=False).usage
@classmethod
def acquire(cls, name=None, lifetime=None, mechs=None, usage='both',
store=None):
"""Acquire GSSAPI credentials
This method acquires credentials. If the `store` argument is
used, the credentials will be acquired from the given
credential store (if supported). Otherwise, the credentials are
acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Using a non-default store requires support for the credentials store
extension.
Args:
name (Name): the name associated with the credentials,
or None for the default name
lifetime (int): the desired lifetime of the credentials, or None
for indefinite
mechs (list): the desired :class:`MechType` OIDs to be used
with the credentials, or None for the default set
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
AcquireCredResult: the acquired credentials and information about
them
Raises:
BadMechanismError
BadNameTypeError
BadNameError
ExpiredCredentialsError
MissingCredentialsError
"""
if store is None:
res = rcreds.acquire_cred(name, lifetime,
mechs, usage)
else:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores")
store = _encode_dict(store)
res = rcred_cred_store.acquire_cred_from(store, name,
lifetime, mechs,
usage)
return tuples.AcquireCredResult(cls(base=res.creds), res.mechs,
res.lifetime)
def store(self, store=None, usage='both', mech=None,
overwrite=False, set_default=False):
"""Store these credentials into the given store
This method stores the current credentials into the specified
credentials store. If the default store is used, support for
:rfc:`5588` is required. Otherwise, support for the credentials
store extension is required.
:requires-ext:`rfc5588` or :requires-ext:`cred_store`
Args:
store (dict): the store into which to store the credentials,
or None for the default store.
usage (str): the usage to store the credentials with -- either
'both', 'initiate', or 'accept'
mech (OID): the :class:`MechType` to associate with the
stored credentials
overwrite (bool): whether or not to overwrite existing credentials
stored with the same name, etc
set_default (bool): whether or not to set these credentials as
the default credentials for the given store.
Returns:
StoreCredResult: the results of the credential storing operation
Raises:
GSSError
ExpiredCredentialsError
MissingCredentialsError
OperationUnavailableError
DuplicateCredentialsElementError
"""
if store is None:
if rcred_rfc5588 is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for RFC 5588")
return rcred_rfc5588.store_cred(self, usage, mech,
overwrite, set_default)
else:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores directly")
store = _encode_dict(store)
return rcred_cred_store.store_cred_into(store, self, usage, mech,
overwrite, set_default)
def impersonate(self, name=None, lifetime=None, mechs=None,
usage='initiate'):
"""Impersonate a name using the current credentials
This method acquires credentials by impersonating another
name using the current credentials.
:requires-ext:`s4u`
Args:
name (Name): the name to impersonate
lifetime (int): the desired lifetime of the new credentials,
or None for indefinite
mechs (list): the desired :class:`MechType` OIDs for the new
credentials
usage (str): the desired usage for the new credentials -- either
'both', 'initiate', or 'accept'. Note that some mechanisms
may only support 'initiate'.
Returns:
Credentials: the new credentials impersonating the given name
"""
if rcred_s4u is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for S4U")
res = rcred_s4u.acquire_cred_impersonate_name(self, name,
lifetime, mechs,
usage)
return type(self)(base=res.creds)
def inquire(self, name=True, lifetime=True, usage=True, mechs=True):
"""Inspect these credentials for information
This method inspects these credentials for information about them.
Args:
name (bool): get the name associated with the credentials
lifetime (bool): get the remaining lifetime for the credentials
usage (bool): get the usage for the credentials
mechs (bool): get the mechanisms associated with the credentials
Returns:
InquireCredResult: the information about the credentials,
with None used when the corresponding argument was False
Raises:
MissingCredentialsError
InvalidCredentialsError
ExpiredCredentialsError
"""
res = rcreds.inquire_cred(self, name, lifetime, usage, mechs)
if res.name is not None:
res_name = names.Name(res.name)
else:
res_name = None
return tuples.InquireCredResult(res_name, res.lifetime,
res.usage, res.mechs)
def inquire_by_mech(self, mech, name=True, init_lifetime=True,
accept_lifetime=True, usage=True):
"""Inspect these credentials for per-mechanism information
This method inspects these credentials for per-mechanism information
about them.
Args:
mech (OID): the mechanism for which to retrive the information
name (bool): get the name associated with the credentials
init_lifetime (bool): get the remaining initiate lifetime for
the credentials
accept_lifetime (bool): get the remaining accept lifetime for
the credentials
usage (bool): get the usage for the credentials
Returns:
InquireCredByMechResult: the information about the credentials,
with None used when the corresponding argument was False
"""
res = rcreds.inquire_cred_by_mech(self, mech, name, init_lifetime,
accept_lifetime, usage)
if res.name is not None:
res_name = names.Name(res.name)
else:
res_name = None
return tuples.InquireCredByMechResult(res_name,
res.init_lifetime,
res.accept_lifetime,
res.usage)
def export(self):
"""Export these credentials into a token
This method exports the current credentials to a token that can
then be imported by passing the `token` argument to the constructor.
This is often used to pass credentials between processes.
:requires-ext:`cred_imp_exp`
Returns:
bytes: the exported credentials in token form
"""
if rcred_imp_exp is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for importing and "
"exporting creditials")
return rcred_imp_exp.export_cred(self)
# pickle protocol support
def __reduce__(self):
# the unpickle arguments to new are (base=None, token=self.export())
return (type(self), (None, self.export()))
|
pythongssapi/python-gssapi
|
gssapi/names.py
|
Name.display_as
|
python
|
def display_as(self, name_type):
if rname_rfc6680 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"support RFC 6680 (the GSSAPI naming "
"extensions)")
return rname_rfc6680.display_name_ext(self, name_type).decode(
_utils._get_encoding())
|
Display this name as the given name type.
This method attempts to display the current :class:`Name`
using the syntax of the given :class:`NameType`, if possible.
Warning:
In MIT krb5 versions below 1.13.3, this method can segfault if
the name was not *originally* created with a `name_type` that was
not ``None`` (even in cases when a ``name_type``
is later "added", such as via :meth:`canonicalize`).
**Do not use this method unless you are sure the above
conditions can never happen in your code.**
Warning:
In addition to the above warning, current versions of MIT krb5 do
not actually fully implement this method, and it may return
incorrect results in the case of canonicalized names.
:requires-ext:`rfc6680`
Args:
name_type (OID): the :class:`NameType` to use to display the given
name
Returns:
str: the displayed name
Raises:
OperationUnavailableError
|
train
|
https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/names.py#L125-L165
| null |
class Name(rname.Name):
"""A GSSAPI Name
This class represents a GSSAPI name which may be used with and/or returned
by other GSSAPI methods.
It inherits from the low-level GSSAPI :class:`~gssapi.raw.names.Name`
class, and thus may used with both low-level and high-level API methods.
This class may be pickled and unpickled, as well as copied.
The :func:`str` and :func:`bytes` methods may be used to retrieve the
text of the name.
Note:
Name strings will be automatically converted to and from unicode
strings as appropriate. If a method is listed as returning a
:class:`str` object, it will return a unicode string.
The encoding used will be python-gssapi's current encoding, which
defaults to UTF-8.
"""
__slots__ = ('_attr_obj')
def __new__(cls, base=None, name_type=None, token=None,
composite=False):
if token is not None:
if composite:
if rname_rfc6680 is None:
raise NotImplementedError(
"Your GSSAPI implementation does not support RFC 6680 "
"(the GSSAPI naming extensions)")
if rname_rfc6680_comp_oid is not None:
base_name = rname.import_name(token,
NameType.composite_export)
displ_name = rname.display_name(base_name, name_type=True)
if displ_name.name_type == NameType.composite_export:
# NB(directxman12): there's a bug in MIT krb5 <= 1.13
# where GSS_C_NT_COMPOSITE_EXPORT doesn't trigger
# immediate import logic. However, we can just use
# the normal GSS_C_NT_EXPORT_NAME in this case.
base_name = rname.import_name(token, NameType.export)
else:
# NB(directxman12): some older versions of MIT krb5 don't
# have support for the GSS_C_NT_COMPOSITE_EXPORT, but do
# support composite tokens via GSS_C_NT_EXPORT_NAME.
base_name = rname.import_name(token, NameType.export)
else:
base_name = rname.import_name(token, NameType.export)
elif isinstance(base, rname.Name):
base_name = base
else:
if isinstance(base, six.text_type):
base = base.encode(_utils._get_encoding())
base_name = rname.import_name(base, name_type)
return super(Name, cls).__new__(cls, base_name)
def __init__(self, base=None, name_type=None, token=None, composite=False):
"""
The constructor can be used to "import" a name from a human readable
representation, or from a token, and can also be used to convert a
low-level :class:`gssapi.raw.names.Name` object into a high-level
object.
If a :class:`~gssapi.raw.names.Name` object from the low-level API
is passed as the `base` argument, it will be converted into a
high-level object.
If the `token` argument is used, the name will be imported using
the token. If the token was exported as a composite token,
pass `composite=True`.
Otherwise, a new name will be created, using the `base` argument as
the human-readable string and the `name_type` argument to denote the
name type.
Raises:
BadNameTypeError
BadNameError
BadMechanismError
"""
if rname_rfc6680 is not None:
self._attr_obj = _NameAttributeMapping(self)
else:
self._attr_obj = None
def __str__(self):
if issubclass(str, six.text_type):
# Python 3 -- we should return unicode
return bytes(self).decode(_utils._get_encoding())
else:
# Python 2 -- we should return a string
return self.__bytes__()
def __unicode__(self):
# Python 2 -- someone asked for unicode
return self.__bytes__().decode(_utils._get_encoding())
def __bytes__(self):
# Python 3 -- someone asked for bytes
return rname.display_name(self, name_type=False).name
@property
def name_type(self):
"""The :class:`NameType` of this name"""
return rname.display_name(self, name_type=True).name_type
def __eq__(self, other):
if not isinstance(other, rname.Name):
# maybe something else can compare this
# to other classes, but we certainly can't
return NotImplemented
else:
return rname.compare_name(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
disp_res = rname.display_name(self, name_type=True)
return "Name({name}, {name_type})".format(name=disp_res.name,
name_type=disp_res.name_type)
def export(self, composite=False):
"""Export this name as a token.
This method exports the name into a byte string which can then be
imported by using the `token` argument of the constructor.
Args:
composite (bool): whether or not use to a composite token --
:requires-ext:`rfc6680`
Returns:
bytes: the exported name in token form
Raises:
MechanismNameRequiredError
BadNameTypeError
BadNameError
"""
if composite:
if rname_rfc6680 is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not support RFC 6680 (the GSSAPI "
"naming extensions)")
return rname_rfc6680.export_name_composite(self)
else:
return rname.export_name(self)
def canonicalize(self, mech):
"""Canonicalize a name with respect to a mechanism.
This method returns a new :class:`Name` that is canonicalized according
to the given mechanism.
Args:
mech (OID): the :class:`MechType` to use
Returns:
Name: the canonicalized name
Raises:
BadMechanismError
BadNameTypeError
BadNameError
"""
return type(self)(rname.canonicalize_name(self, mech))
def __copy__(self):
return type(self)(rname.duplicate_name(self))
def __deepcopy__(self, memo):
return type(self)(rname.duplicate_name(self))
def _inquire(self, **kwargs):
"""Inspect this name for information.
This method inspects the name for information.
If no keyword arguments are passed, all available information
is returned. Otherwise, only the keyword arguments that
are passed and set to `True` are returned.
Args:
mech_name (bool): get whether this is a mechanism name,
and, if so, the associated mechanism
attrs (bool): get the attributes names for this name
Returns:
InquireNameResult: the results of the inquiry, with unused
fields set to None
Raises:
GSSError
"""
if rname_rfc6680 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"support RFC 6680 (the GSSAPI naming "
"extensions)")
if not kwargs:
default_val = True
else:
default_val = False
attrs = kwargs.get('attrs', default_val)
mech_name = kwargs.get('mech_name', default_val)
return rname_rfc6680.inquire_name(self, mech_name=mech_name,
attrs=attrs)
@property
def is_mech_name(self):
"""Whether or not this name is a mechanism name
(:requires-ext:`rfc6680`)
"""
return self._inquire(mech_name=True).is_mech_name
@property
def mech(self):
"""The mechanism associated with this name (:requires-ext:`rfc6680`)
"""
return self._inquire(mech_name=True).mech
@property
def attributes(self):
"""The attributes of this name (:requires-ext:`rfc6680`)
The attributes are presenting in the form of a
:class:`~collections.MutableMapping` (a dict-like object).
Retrieved values will always be in the form of :class:`frozensets`.
When assigning values, if iterables are used, they be considered to be
the set of values for the given attribute. If a non-iterable is used,
it will be considered a single value, and automatically wrapped in an
iterable.
Note:
String types (includes :class:`bytes`) are not considered to
be iterables in this case.
"""
if self._attr_obj is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"support RFC 6680 (the GSSAPI naming "
"extensions)")
return self._attr_obj
|
pythongssapi/python-gssapi
|
gssapi/names.py
|
Name.export
|
python
|
def export(self, composite=False):
if composite:
if rname_rfc6680 is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not support RFC 6680 (the GSSAPI "
"naming extensions)")
return rname_rfc6680.export_name_composite(self)
else:
return rname.export_name(self)
|
Export this name as a token.
This method exports the name into a byte string which can then be
imported by using the `token` argument of the constructor.
Args:
composite (bool): whether or not use to a composite token --
:requires-ext:`rfc6680`
Returns:
bytes: the exported name in token form
Raises:
MechanismNameRequiredError
BadNameTypeError
BadNameError
|
train
|
https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/names.py#L188-L215
| null |
class Name(rname.Name):
"""A GSSAPI Name
This class represents a GSSAPI name which may be used with and/or returned
by other GSSAPI methods.
It inherits from the low-level GSSAPI :class:`~gssapi.raw.names.Name`
class, and thus may used with both low-level and high-level API methods.
This class may be pickled and unpickled, as well as copied.
The :func:`str` and :func:`bytes` methods may be used to retrieve the
text of the name.
Note:
Name strings will be automatically converted to and from unicode
strings as appropriate. If a method is listed as returning a
:class:`str` object, it will return a unicode string.
The encoding used will be python-gssapi's current encoding, which
defaults to UTF-8.
"""
__slots__ = ('_attr_obj')
def __new__(cls, base=None, name_type=None, token=None,
composite=False):
if token is not None:
if composite:
if rname_rfc6680 is None:
raise NotImplementedError(
"Your GSSAPI implementation does not support RFC 6680 "
"(the GSSAPI naming extensions)")
if rname_rfc6680_comp_oid is not None:
base_name = rname.import_name(token,
NameType.composite_export)
displ_name = rname.display_name(base_name, name_type=True)
if displ_name.name_type == NameType.composite_export:
# NB(directxman12): there's a bug in MIT krb5 <= 1.13
# where GSS_C_NT_COMPOSITE_EXPORT doesn't trigger
# immediate import logic. However, we can just use
# the normal GSS_C_NT_EXPORT_NAME in this case.
base_name = rname.import_name(token, NameType.export)
else:
# NB(directxman12): some older versions of MIT krb5 don't
# have support for the GSS_C_NT_COMPOSITE_EXPORT, but do
# support composite tokens via GSS_C_NT_EXPORT_NAME.
base_name = rname.import_name(token, NameType.export)
else:
base_name = rname.import_name(token, NameType.export)
elif isinstance(base, rname.Name):
base_name = base
else:
if isinstance(base, six.text_type):
base = base.encode(_utils._get_encoding())
base_name = rname.import_name(base, name_type)
return super(Name, cls).__new__(cls, base_name)
def __init__(self, base=None, name_type=None, token=None, composite=False):
"""
The constructor can be used to "import" a name from a human readable
representation, or from a token, and can also be used to convert a
low-level :class:`gssapi.raw.names.Name` object into a high-level
object.
If a :class:`~gssapi.raw.names.Name` object from the low-level API
is passed as the `base` argument, it will be converted into a
high-level object.
If the `token` argument is used, the name will be imported using
the token. If the token was exported as a composite token,
pass `composite=True`.
Otherwise, a new name will be created, using the `base` argument as
the human-readable string and the `name_type` argument to denote the
name type.
Raises:
BadNameTypeError
BadNameError
BadMechanismError
"""
if rname_rfc6680 is not None:
self._attr_obj = _NameAttributeMapping(self)
else:
self._attr_obj = None
def __str__(self):
if issubclass(str, six.text_type):
# Python 3 -- we should return unicode
return bytes(self).decode(_utils._get_encoding())
else:
# Python 2 -- we should return a string
return self.__bytes__()
def __unicode__(self):
# Python 2 -- someone asked for unicode
return self.__bytes__().decode(_utils._get_encoding())
def __bytes__(self):
# Python 3 -- someone asked for bytes
return rname.display_name(self, name_type=False).name
def display_as(self, name_type):
"""
Display this name as the given name type.
This method attempts to display the current :class:`Name`
using the syntax of the given :class:`NameType`, if possible.
Warning:
In MIT krb5 versions below 1.13.3, this method can segfault if
the name was not *originally* created with a `name_type` that was
not ``None`` (even in cases when a ``name_type``
is later "added", such as via :meth:`canonicalize`).
**Do not use this method unless you are sure the above
conditions can never happen in your code.**
Warning:
In addition to the above warning, current versions of MIT krb5 do
not actually fully implement this method, and it may return
incorrect results in the case of canonicalized names.
:requires-ext:`rfc6680`
Args:
name_type (OID): the :class:`NameType` to use to display the given
name
Returns:
str: the displayed name
Raises:
OperationUnavailableError
"""
if rname_rfc6680 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"support RFC 6680 (the GSSAPI naming "
"extensions)")
return rname_rfc6680.display_name_ext(self, name_type).decode(
_utils._get_encoding())
@property
def name_type(self):
"""The :class:`NameType` of this name"""
return rname.display_name(self, name_type=True).name_type
def __eq__(self, other):
if not isinstance(other, rname.Name):
# maybe something else can compare this
# to other classes, but we certainly can't
return NotImplemented
else:
return rname.compare_name(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
disp_res = rname.display_name(self, name_type=True)
return "Name({name}, {name_type})".format(name=disp_res.name,
name_type=disp_res.name_type)
def canonicalize(self, mech):
"""Canonicalize a name with respect to a mechanism.
This method returns a new :class:`Name` that is canonicalized according
to the given mechanism.
Args:
mech (OID): the :class:`MechType` to use
Returns:
Name: the canonicalized name
Raises:
BadMechanismError
BadNameTypeError
BadNameError
"""
return type(self)(rname.canonicalize_name(self, mech))
def __copy__(self):
return type(self)(rname.duplicate_name(self))
def __deepcopy__(self, memo):
return type(self)(rname.duplicate_name(self))
def _inquire(self, **kwargs):
"""Inspect this name for information.
This method inspects the name for information.
If no keyword arguments are passed, all available information
is returned. Otherwise, only the keyword arguments that
are passed and set to `True` are returned.
Args:
mech_name (bool): get whether this is a mechanism name,
and, if so, the associated mechanism
attrs (bool): get the attributes names for this name
Returns:
InquireNameResult: the results of the inquiry, with unused
fields set to None
Raises:
GSSError
"""
if rname_rfc6680 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"support RFC 6680 (the GSSAPI naming "
"extensions)")
if not kwargs:
default_val = True
else:
default_val = False
attrs = kwargs.get('attrs', default_val)
mech_name = kwargs.get('mech_name', default_val)
return rname_rfc6680.inquire_name(self, mech_name=mech_name,
attrs=attrs)
@property
def is_mech_name(self):
"""Whether or not this name is a mechanism name
(:requires-ext:`rfc6680`)
"""
return self._inquire(mech_name=True).is_mech_name
@property
def mech(self):
"""The mechanism associated with this name (:requires-ext:`rfc6680`)
"""
return self._inquire(mech_name=True).mech
@property
def attributes(self):
"""The attributes of this name (:requires-ext:`rfc6680`)
The attributes are presenting in the form of a
:class:`~collections.MutableMapping` (a dict-like object).
Retrieved values will always be in the form of :class:`frozensets`.
When assigning values, if iterables are used, they be considered to be
the set of values for the given attribute. If a non-iterable is used,
it will be considered a single value, and automatically wrapped in an
iterable.
Note:
String types (includes :class:`bytes`) are not considered to
be iterables in this case.
"""
if self._attr_obj is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"support RFC 6680 (the GSSAPI naming "
"extensions)")
return self._attr_obj
|
pythongssapi/python-gssapi
|
gssapi/names.py
|
Name._inquire
|
python
|
def _inquire(self, **kwargs):
if rname_rfc6680 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"support RFC 6680 (the GSSAPI naming "
"extensions)")
if not kwargs:
default_val = True
else:
default_val = False
attrs = kwargs.get('attrs', default_val)
mech_name = kwargs.get('mech_name', default_val)
return rname_rfc6680.inquire_name(self, mech_name=mech_name,
attrs=attrs)
|
Inspect this name for information.
This method inspects the name for information.
If no keyword arguments are passed, all available information
is returned. Otherwise, only the keyword arguments that
are passed and set to `True` are returned.
Args:
mech_name (bool): get whether this is a mechanism name,
and, if so, the associated mechanism
attrs (bool): get the attributes names for this name
Returns:
InquireNameResult: the results of the inquiry, with unused
fields set to None
Raises:
GSSError
|
train
|
https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/names.py#L243-L279
| null |
class Name(rname.Name):
"""A GSSAPI Name
This class represents a GSSAPI name which may be used with and/or returned
by other GSSAPI methods.
It inherits from the low-level GSSAPI :class:`~gssapi.raw.names.Name`
class, and thus may used with both low-level and high-level API methods.
This class may be pickled and unpickled, as well as copied.
The :func:`str` and :func:`bytes` methods may be used to retrieve the
text of the name.
Note:
Name strings will be automatically converted to and from unicode
strings as appropriate. If a method is listed as returning a
:class:`str` object, it will return a unicode string.
The encoding used will be python-gssapi's current encoding, which
defaults to UTF-8.
"""
__slots__ = ('_attr_obj')
def __new__(cls, base=None, name_type=None, token=None,
composite=False):
if token is not None:
if composite:
if rname_rfc6680 is None:
raise NotImplementedError(
"Your GSSAPI implementation does not support RFC 6680 "
"(the GSSAPI naming extensions)")
if rname_rfc6680_comp_oid is not None:
base_name = rname.import_name(token,
NameType.composite_export)
displ_name = rname.display_name(base_name, name_type=True)
if displ_name.name_type == NameType.composite_export:
# NB(directxman12): there's a bug in MIT krb5 <= 1.13
# where GSS_C_NT_COMPOSITE_EXPORT doesn't trigger
# immediate import logic. However, we can just use
# the normal GSS_C_NT_EXPORT_NAME in this case.
base_name = rname.import_name(token, NameType.export)
else:
# NB(directxman12): some older versions of MIT krb5 don't
# have support for the GSS_C_NT_COMPOSITE_EXPORT, but do
# support composite tokens via GSS_C_NT_EXPORT_NAME.
base_name = rname.import_name(token, NameType.export)
else:
base_name = rname.import_name(token, NameType.export)
elif isinstance(base, rname.Name):
base_name = base
else:
if isinstance(base, six.text_type):
base = base.encode(_utils._get_encoding())
base_name = rname.import_name(base, name_type)
return super(Name, cls).__new__(cls, base_name)
def __init__(self, base=None, name_type=None, token=None, composite=False):
"""
The constructor can be used to "import" a name from a human readable
representation, or from a token, and can also be used to convert a
low-level :class:`gssapi.raw.names.Name` object into a high-level
object.
If a :class:`~gssapi.raw.names.Name` object from the low-level API
is passed as the `base` argument, it will be converted into a
high-level object.
If the `token` argument is used, the name will be imported using
the token. If the token was exported as a composite token,
pass `composite=True`.
Otherwise, a new name will be created, using the `base` argument as
the human-readable string and the `name_type` argument to denote the
name type.
Raises:
BadNameTypeError
BadNameError
BadMechanismError
"""
if rname_rfc6680 is not None:
self._attr_obj = _NameAttributeMapping(self)
else:
self._attr_obj = None
def __str__(self):
if issubclass(str, six.text_type):
# Python 3 -- we should return unicode
return bytes(self).decode(_utils._get_encoding())
else:
# Python 2 -- we should return a string
return self.__bytes__()
def __unicode__(self):
# Python 2 -- someone asked for unicode
return self.__bytes__().decode(_utils._get_encoding())
def __bytes__(self):
# Python 3 -- someone asked for bytes
return rname.display_name(self, name_type=False).name
def display_as(self, name_type):
"""
Display this name as the given name type.
This method attempts to display the current :class:`Name`
using the syntax of the given :class:`NameType`, if possible.
Warning:
In MIT krb5 versions below 1.13.3, this method can segfault if
the name was not *originally* created with a `name_type` that was
not ``None`` (even in cases when a ``name_type``
is later "added", such as via :meth:`canonicalize`).
**Do not use this method unless you are sure the above
conditions can never happen in your code.**
Warning:
In addition to the above warning, current versions of MIT krb5 do
not actually fully implement this method, and it may return
incorrect results in the case of canonicalized names.
:requires-ext:`rfc6680`
Args:
name_type (OID): the :class:`NameType` to use to display the given
name
Returns:
str: the displayed name
Raises:
OperationUnavailableError
"""
if rname_rfc6680 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"support RFC 6680 (the GSSAPI naming "
"extensions)")
return rname_rfc6680.display_name_ext(self, name_type).decode(
_utils._get_encoding())
@property
def name_type(self):
"""The :class:`NameType` of this name"""
return rname.display_name(self, name_type=True).name_type
def __eq__(self, other):
if not isinstance(other, rname.Name):
# maybe something else can compare this
# to other classes, but we certainly can't
return NotImplemented
else:
return rname.compare_name(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
disp_res = rname.display_name(self, name_type=True)
return "Name({name}, {name_type})".format(name=disp_res.name,
name_type=disp_res.name_type)
def export(self, composite=False):
"""Export this name as a token.
This method exports the name into a byte string which can then be
imported by using the `token` argument of the constructor.
Args:
composite (bool): whether or not use to a composite token --
:requires-ext:`rfc6680`
Returns:
bytes: the exported name in token form
Raises:
MechanismNameRequiredError
BadNameTypeError
BadNameError
"""
if composite:
if rname_rfc6680 is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not support RFC 6680 (the GSSAPI "
"naming extensions)")
return rname_rfc6680.export_name_composite(self)
else:
return rname.export_name(self)
def canonicalize(self, mech):
"""Canonicalize a name with respect to a mechanism.
This method returns a new :class:`Name` that is canonicalized according
to the given mechanism.
Args:
mech (OID): the :class:`MechType` to use
Returns:
Name: the canonicalized name
Raises:
BadMechanismError
BadNameTypeError
BadNameError
"""
return type(self)(rname.canonicalize_name(self, mech))
def __copy__(self):
return type(self)(rname.duplicate_name(self))
def __deepcopy__(self, memo):
return type(self)(rname.duplicate_name(self))
@property
def is_mech_name(self):
"""Whether or not this name is a mechanism name
(:requires-ext:`rfc6680`)
"""
return self._inquire(mech_name=True).is_mech_name
@property
def mech(self):
"""The mechanism associated with this name (:requires-ext:`rfc6680`)
"""
return self._inquire(mech_name=True).mech
@property
def attributes(self):
"""The attributes of this name (:requires-ext:`rfc6680`)
The attributes are presenting in the form of a
:class:`~collections.MutableMapping` (a dict-like object).
Retrieved values will always be in the form of :class:`frozensets`.
When assigning values, if iterables are used, they be considered to be
the set of values for the given attribute. If a non-iterable is used,
it will be considered a single value, and automatically wrapped in an
iterable.
Note:
String types (includes :class:`bytes`) are not considered to
be iterables in this case.
"""
if self._attr_obj is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"support RFC 6680 (the GSSAPI naming "
"extensions)")
return self._attr_obj
|
pythongssapi/python-gssapi
|
gssapi/mechs.py
|
Mechanism.from_sasl_name
|
python
|
def from_sasl_name(cls, name=None):
if rfc5801 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for RFC 5801")
if isinstance(name, six.text_type):
name = name.encode(_utils._get_encoding())
m = rfc5801.inquire_mech_for_saslname(name)
return cls(m)
|
Create a Mechanism from its SASL name
Args:
name (str): SASL name of the desired mechanism
Returns:
Mechanism: the desired mechanism
Raises:
GSSError
:requires-ext:`rfc5801`
|
train
|
https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/mechs.py#L141-L164
| null |
class Mechanism(roids.OID):
"""
A GSSAPI Mechanism
This class represents a mechanism and centralizes functions dealing with
mechanisms and can be used with any calls.
It inherits from the low-level GSSAPI :class:`~gssapi.raw.oids.OID` class,
and thus can be used with both low-level and high-level API calls.
"""
def __new__(cls, cpy=None, elements=None):
return super(Mechanism, cls).__new__(cls, cpy, elements)
@property
def name_types(self):
"""
Get the set of name types supported by this mechanism.
"""
return rmisc.inquire_names_for_mech(self)
@property
def _saslname(self):
if rfc5801 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for RFC 5801")
return rfc5801.inquire_saslname_for_mech(self)
@property
def _attrs(self):
if rfc5587 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for RFC 5587")
return rfc5587.inquire_attrs_for_mech(self)
def __str__(self):
if issubclass(str, six.text_type):
# Python 3 -- we should return unicode
return self._bytes_desc().decode(_utils._get_encoding())
else:
return self._bytes_desc()
def __unicode__(self):
return self._bytes_desc().decode(_utils._get_encoding())
def _bytes_desc(self):
base = self.dotted_form
if rfc5801 is not None and self._saslname and self._saslname.mech_name:
base = self._saslname.mech_name
if isinstance(base, six.text_type):
base = base.encode(_utils._get_encoding())
return base
def __repr__(self):
"""
Get a name representing the mechanism; always safe to call
"""
base = "<Mechanism (%s)>" % self.dotted_form
if rfc5801 is not None:
base = "<Mechanism %s (%s)>" % (
self._saslname.mech_name.decode('UTF-8'),
self.dotted_form
)
return base
@property
def sasl_name(self):
"""
Get the SASL name for the mechanism
:requires-ext:`rfc5801`
"""
return self._saslname.sasl_mech_name.decode('UTF-8')
@property
def description(self):
"""
Get the description of the mechanism
:requires-ext:`rfc5801`
"""
return self._saslname.mech_description.decode('UTF-8')
@property
def known_attrs(self):
"""
Get the known attributes of the mechanism; returns a set of OIDs
([OID])
:requires-ext:`rfc5587`
"""
return self._attrs.known_mech_attrs
@property
def attrs(self):
"""
Get the attributes of the mechanism; returns a set of OIDs ([OID])
:requires-ext:`rfc5587`
"""
return self._attrs.mech_attrs
@classmethod
def all_mechs(cls):
"""
Get a generator of all mechanisms supported by GSSAPI
"""
return (cls(mech) for mech in rmisc.indicate_mechs())
@classmethod
def from_name(cls, name=None):
"""
Get a generator of mechanisms that may be able to process the name
Args:
name (Name): a name to inquire about
Returns:
[Mechanism]: a set of mechanisms which support this name
Raises:
GSSError
"""
return (cls(mech) for mech in rmisc.inquire_mechs_for_name(name))
@classmethod
@classmethod
def from_attrs(cls, desired_attrs=None, except_attrs=None,
critical_attrs=None):
"""
Get a generator of mechanisms supporting the specified attributes. See
RFC 5587's :func:`indicate_mechs_by_attrs` for more information.
Args:
desired_attrs ([OID]): Desired attributes
except_attrs ([OID]): Except attributes
critical_attrs ([OID]): Critical attributes
Returns:
[Mechanism]: A set of mechanisms having the desired features.
Raises:
GSSError
:requires-ext:`rfc5587`
"""
if isinstance(desired_attrs, roids.OID):
desired_attrs = set([desired_attrs])
if isinstance(except_attrs, roids.OID):
except_attrs = set([except_attrs])
if isinstance(critical_attrs, roids.OID):
critical_attrs = set([critical_attrs])
if rfc5587 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for RFC 5587")
mechs = rfc5587.indicate_mechs_by_attrs(desired_attrs,
except_attrs,
critical_attrs)
return (cls(mech) for mech in mechs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.