repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
chrippa/python-librtmp | librtmp/rtmp.py | RTMP.send_packet | python | def send_packet(self, packet, queue=True):
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_SendPacket(self.rtmp, packet.packet,
int(queue)) | Sends a RTMP packet to the server.
:param packet: RTMPPacket, the packet to send to the server.
:param queue: bool, If True, queue up the packet in a internal queue rather
than sending it right away. | train | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L273-L286 | null | class RTMP(object):
""" A RTMP client session.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`.
:param playpath: str, Overrides the playpath parsed from the RTMP URL.
:param tcurl: str, URL of the target stream. Defaults to `rtmp[t][e|s]://host[:port]/app`.
:param app: str, Name of application to connect to on the RTMP server.
:param pageurl: str, URL of the web page in which the media was embedded.
:param auth: str, Authentication string to be appended to the connect string.
:param connect_data: This value will be encoded to AMF and added to the connect packet.
:param swfhash: str, SHA256 hash of the decompressed SWF file (hexdigest).
:param swfsize: int, Size of the decompressed SWF file.
:param swfurl: str, URL of the SWF player for the media.
:param swfvfy: bool, Calculate the correct swfhash and swfsize parameter
from the `swfurl` specified.
:param flashver: str, Version of the Flash plugin used to run the SWF player.
:param subscribe: str, Name of live stream to subscribe to. Defaults to `playpath`.
:param token: str, Key for SecureToken response, used if the server requires
SecureToken authentication.
:param live: bool, Specify that the media is a live stream.
:param jtv: str, JSON token used by Twitch/Justin.tv servers.
:param socks: str, Use the specified SOCKS4 proxy.
:param start: int, Start at num seconds into the stream. Not valid for live streams.
:param stop: int, Stop at num seconds into the stream.
:param buffer: int, Set buffer time to num milliseconds. This is used to control
rate of data sent by FMS servers, not buffering of data. The default is 30000.
:param timeout: int, Timeout the session after num seconds without receiving any data
from the server. The default is 30.
"""
def __init__(self, url, playpath=None, tcurl=None, app=None, pageurl=None,
auth=None, swfhash=None, swfsize=None, swfurl=None, swfvfy=None,
flashver=None, subscribe=None, token=None, live=None, jtv=None,
connect_data=None, socks=None, start=None, stop=None, buffer=None,
timeout=None):
def set_opt(key, val):
if val is not None:
self.set_option(key, val)
self.rtmp = librtmp.RTMP_Alloc()
if self.rtmp == ffi.NULL:
raise MemoryError("Failed to allocate RTMP handle")
librtmp.RTMP_Init(self.rtmp)
self._options = dict()
self._invoke_args = dict()
self._invoke_handlers = dict()
self._invoke_results = dict()
self._connect_result = None
self.url = None
if swfurl and swfvfy:
swfhash, swfsize = hash_swf(swfurl)
if swfhash:
digest = unhexlify(swfhash)
librtmp.RTMP_SetSWFHash(self.rtmp, digest, swfsize)
# Socks option must be set before setup_url.
set_opt("socks", socks)
self.setup_url(url)
set_opt("playpath", playpath)
set_opt("tcUrl", tcurl)
set_opt("app", app)
set_opt("swfUrl", swfurl)
set_opt("pageUrl", pageurl)
set_opt("auth", auth)
set_opt("flashver", flashver)
set_opt("subscribe", subscribe)
set_opt("token", token)
set_opt("jtv", jtv)
set_opt("live", live)
set_opt("start", start)
set_opt("stop", stop)
set_opt("buffer", buffer)
set_opt("timeout", timeout)
if isinstance(connect_data, (list, tuple)):
for data in connect_data:
self._parse_connect_data(data)
elif connect_data is not None:
self._parse_connect_data(connect_data)
def _parse_connect_data(self, val):
if isinstance(val, bool):
self.set_option("conn", "B:{0}".format(int(val)))
elif isinstance(val, string_types):
self.set_option("conn", "S:{0}".format(val))
elif isinstance(val, integer_types):
self.set_option("conn", "N:{0}".format(val))
elif isinstance(val, type(None)):
self.set_option("conn", "Z:")
elif isinstance(val, dict):
self.set_option("conn", "O:1")
for key, value in val.items():
if isinstance(value, bool):
self.set_option("conn", "NB:{0}:{1}".format(key, int(value)))
elif isinstance(value, string_types):
self.set_option("conn", "NS:{0}:{1}".format(key, value))
elif isinstance(value, integer_types):
self.set_option("conn", "NN:{0}:{1}".format(key, value))
self.set_option("conn", "O:0")
def set_option(self, key, value):
"""Sets a option for this session.
For a detailed list of available options see the librtmp(3) man page.
:param key: str, A valid option key.
:param value: A value, anything that can be converted to str is valid.
Raises :exc:`ValueError` if a invalid option is specified.
"""
akey = AVal(key)
aval = AVal(value)
res = librtmp.RTMP_SetOpt(self.rtmp, akey.aval, aval.aval)
if res < 1:
raise ValueError("Unable to set option {0}".format(key))
self._options[akey] = aval
def setup_url(self, url):
r"""Attempt to parse a RTMP URL.
Additional options may be specified by appending space-separated
key=value pairs to the URL. Special characters in values may need
to be escaped to prevent misinterpretation by the option parser.
The escape encoding uses a backslash followed by two hexadecimal
digits representing the ASCII value of the character. E.g., spaces
must be escaped as `\\20` and backslashes must be escaped as `\\5c`.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`
Raises :exc:`RTMPError` if URL parsing fails.
"""
self.url = bytes(url, "utf8")
res = librtmp.RTMP_SetupURL(self.rtmp, self.url)
if res < 1:
raise RTMPError("Unable to parse URL")
def connect(self, packet=None):
"""Connect to the server.
:param packet: RTMPPacket, this packet will be sent instead
of the regular "connect" packet.
Raises :exc:`RTMPError` if the connect attempt fails.
"""
if isinstance(packet, RTMPPacket):
packet = packet.packet
else:
packet = ffi.NULL
res = librtmp.RTMP_Connect(self.rtmp, packet)
if res < 1:
raise RTMPError("Failed to connect")
return RTMPCall(self, 1.0)
def create_stream(self, seek=None, writeable=False, update_buffer=True):
"""Prepares the session for streaming of audio/video
and returns a :class:`RTMPStream` object.
:param seek: int, Attempt to seek to this position.
:param writeable: bool, Make the stream writeable instead of readable.
:param update_buffer: bool, When enabled will attempt to speed up
download by telling the server our buffer can
fit the whole stream.
Raises :exc:`RTMPError` if a stream could not be created.
Usage::
>>> stream = conn.create_stream()
>>> data = stream.read(1024)
"""
if writeable:
librtmp.RTMP_EnableWrite(self.rtmp)
# Calling handle_packet() on a connect result causes
# librtmp to send a CreateStream call. This is not always
# desired when using process_packets(), therefore we do it
# here instead.
if self._connect_result:
self.handle_packet(self._connect_result)
if not seek:
seek = 0
res = librtmp.RTMP_ConnectStream(self.rtmp, seek)
if res < 1:
raise RTMPError("Failed to start RTMP playback")
return RTMPStream(self, update_buffer=update_buffer)
@property
def connected(self):
"""Returns True if connected to the server.
Usage::
>>> conn.connected
True
"""
return bool(librtmp.RTMP_IsConnected(self.rtmp))
def read_packet(self):
"""Reads a RTMP packet from the server.
Returns a :class:`RTMPPacket`.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> packet = conn.read_packet()
>>> packet.body
b'packet body ...'
"""
packet = ffi.new("RTMPPacket*")
packet_complete = False
while not packet_complete:
res = librtmp.RTMP_ReadPacket(self.rtmp, packet)
if res < 1:
if librtmp.RTMP_IsTimedout(self.rtmp):
raise RTMPTimeoutError("Timed out while reading packet")
else:
raise RTMPError("Failed to read packet")
packet_complete = packet.m_nBytesRead == packet.m_nBodySize
return RTMPPacket._from_pointer(packet)
def handle_packet(self, packet):
"""Lets librtmp look at a packet and send a response
if needed."""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_ClientPacket(self.rtmp, packet.packet)
def process_packets(self, transaction_id=None, invoked_method=None,
timeout=None):
"""Wait for packets and process them as needed.
:param transaction_id: int, Wait until the result of this
transaction ID is recieved.
:param invoked_method: int, Wait until this method is invoked
by the server.
:param timeout: int, The time to wait for a result from the server.
Note: This is the timeout used by this method only,
the connection timeout is still used when reading
packets.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> @conn.invoke_handler
... def add(x, y):
... return x + y
>>> @conn.process_packets()
"""
start = time()
while self.connected and transaction_id not in self._invoke_results:
if timeout and (time() - start) >= timeout:
raise RTMPTimeoutError("Timeout")
packet = self.read_packet()
if packet.type == PACKET_TYPE_INVOKE:
try:
decoded = decode_amf(packet.body)
except AMFError:
continue
try:
method, transaction_id_, obj = decoded[:3]
args = decoded[3:]
except ValueError:
continue
if method == "_result":
if len(args) > 0:
result = args[0]
else:
result = None
self._invoke_results[transaction_id_] = result
else:
handler = self._invoke_handlers.get(method)
if handler:
res = handler(*args)
if res is not None:
self.call("_result", res,
transaction_id=transaction_id_)
if method == invoked_method:
self._invoke_args[invoked_method] = args
break
if transaction_id_ == 1.0:
self._connect_result = packet
else:
self.handle_packet(packet)
else:
self.handle_packet(packet)
if transaction_id:
result = self._invoke_results.pop(transaction_id, None)
return result
if invoked_method:
args = self._invoke_args.pop(invoked_method, None)
return args
def call(self, method, *args, **params):
"""Calls a method on the server."""
transaction_id = params.get("transaction_id")
if not transaction_id:
self.transaction_id += 1
transaction_id = self.transaction_id
obj = params.get("obj")
args = [method, transaction_id, obj] + list(args)
args_encoded = map(lambda x: encode_amf(x), args)
body = b"".join(args_encoded)
format = params.get("format", PACKET_SIZE_MEDIUM)
channel = params.get("channel", 0x03)
packet = RTMPPacket(type=PACKET_TYPE_INVOKE,
format=format, channel=channel,
body=body)
self.send_packet(packet)
return RTMPCall(self, transaction_id)
def remote_method(self, method, block=False, **params):
"""Creates a Python function that will attempt to
call a remote method when used.
:param method: str, Method name on the server to call
:param block: bool, Wheter to wait for result or not
Usage::
>>> send_usher_token = conn.remote_method("NetStream.Authenticate.UsherToken", block=True)
>>> send_usher_token("some token")
'Token Accepted'
"""
def func(*args):
call = self.call(method, *args, **params)
if block:
return call.result()
return call
func.__name__ = method
return func
def invoke_handler(self, func=None, name=None):
if not callable(func):
return lambda f: self.invoke_handler(func=f, name=func)
method = name or func.__name__
self.register_invoke_handler(method, func)
return func
def register_invoke_handler(self, method, func):
self._invoke_handlers[method] = func
def close(self):
"""Closes the connection to the server."""
if self.connected:
librtmp.RTMP_Close(self.rtmp)
@property
def transaction_id(self):
return librtmp.RTMP_GetInvokeCount(self.rtmp)
@transaction_id.setter
def transaction_id(self, val):
librtmp.RTMP_SetInvokeCount(self.rtmp, int(val))
def __del__(self):
librtmp.RTMP_Free(self.rtmp)
|
chrippa/python-librtmp | librtmp/rtmp.py | RTMP.handle_packet | python | def handle_packet(self, packet):
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_ClientPacket(self.rtmp, packet.packet) | Lets librtmp look at a packet and send a response
if needed. | train | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L288-L295 | null | class RTMP(object):
""" A RTMP client session.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`.
:param playpath: str, Overrides the playpath parsed from the RTMP URL.
:param tcurl: str, URL of the target stream. Defaults to `rtmp[t][e|s]://host[:port]/app`.
:param app: str, Name of application to connect to on the RTMP server.
:param pageurl: str, URL of the web page in which the media was embedded.
:param auth: str, Authentication string to be appended to the connect string.
:param connect_data: This value will be encoded to AMF and added to the connect packet.
:param swfhash: str, SHA256 hash of the decompressed SWF file (hexdigest).
:param swfsize: int, Size of the decompressed SWF file.
:param swfurl: str, URL of the SWF player for the media.
:param swfvfy: bool, Calculate the correct swfhash and swfsize parameter
from the `swfurl` specified.
:param flashver: str, Version of the Flash plugin used to run the SWF player.
:param subscribe: str, Name of live stream to subscribe to. Defaults to `playpath`.
:param token: str, Key for SecureToken response, used if the server requires
SecureToken authentication.
:param live: bool, Specify that the media is a live stream.
:param jtv: str, JSON token used by Twitch/Justin.tv servers.
:param socks: str, Use the specified SOCKS4 proxy.
:param start: int, Start at num seconds into the stream. Not valid for live streams.
:param stop: int, Stop at num seconds into the stream.
:param buffer: int, Set buffer time to num milliseconds. This is used to control
rate of data sent by FMS servers, not buffering of data. The default is 30000.
:param timeout: int, Timeout the session after num seconds without receiving any data
from the server. The default is 30.
"""
def __init__(self, url, playpath=None, tcurl=None, app=None, pageurl=None,
auth=None, swfhash=None, swfsize=None, swfurl=None, swfvfy=None,
flashver=None, subscribe=None, token=None, live=None, jtv=None,
connect_data=None, socks=None, start=None, stop=None, buffer=None,
timeout=None):
def set_opt(key, val):
if val is not None:
self.set_option(key, val)
self.rtmp = librtmp.RTMP_Alloc()
if self.rtmp == ffi.NULL:
raise MemoryError("Failed to allocate RTMP handle")
librtmp.RTMP_Init(self.rtmp)
self._options = dict()
self._invoke_args = dict()
self._invoke_handlers = dict()
self._invoke_results = dict()
self._connect_result = None
self.url = None
if swfurl and swfvfy:
swfhash, swfsize = hash_swf(swfurl)
if swfhash:
digest = unhexlify(swfhash)
librtmp.RTMP_SetSWFHash(self.rtmp, digest, swfsize)
# Socks option must be set before setup_url.
set_opt("socks", socks)
self.setup_url(url)
set_opt("playpath", playpath)
set_opt("tcUrl", tcurl)
set_opt("app", app)
set_opt("swfUrl", swfurl)
set_opt("pageUrl", pageurl)
set_opt("auth", auth)
set_opt("flashver", flashver)
set_opt("subscribe", subscribe)
set_opt("token", token)
set_opt("jtv", jtv)
set_opt("live", live)
set_opt("start", start)
set_opt("stop", stop)
set_opt("buffer", buffer)
set_opt("timeout", timeout)
if isinstance(connect_data, (list, tuple)):
for data in connect_data:
self._parse_connect_data(data)
elif connect_data is not None:
self._parse_connect_data(connect_data)
def _parse_connect_data(self, val):
if isinstance(val, bool):
self.set_option("conn", "B:{0}".format(int(val)))
elif isinstance(val, string_types):
self.set_option("conn", "S:{0}".format(val))
elif isinstance(val, integer_types):
self.set_option("conn", "N:{0}".format(val))
elif isinstance(val, type(None)):
self.set_option("conn", "Z:")
elif isinstance(val, dict):
self.set_option("conn", "O:1")
for key, value in val.items():
if isinstance(value, bool):
self.set_option("conn", "NB:{0}:{1}".format(key, int(value)))
elif isinstance(value, string_types):
self.set_option("conn", "NS:{0}:{1}".format(key, value))
elif isinstance(value, integer_types):
self.set_option("conn", "NN:{0}:{1}".format(key, value))
self.set_option("conn", "O:0")
def set_option(self, key, value):
"""Sets a option for this session.
For a detailed list of available options see the librtmp(3) man page.
:param key: str, A valid option key.
:param value: A value, anything that can be converted to str is valid.
Raises :exc:`ValueError` if a invalid option is specified.
"""
akey = AVal(key)
aval = AVal(value)
res = librtmp.RTMP_SetOpt(self.rtmp, akey.aval, aval.aval)
if res < 1:
raise ValueError("Unable to set option {0}".format(key))
self._options[akey] = aval
def setup_url(self, url):
r"""Attempt to parse a RTMP URL.
Additional options may be specified by appending space-separated
key=value pairs to the URL. Special characters in values may need
to be escaped to prevent misinterpretation by the option parser.
The escape encoding uses a backslash followed by two hexadecimal
digits representing the ASCII value of the character. E.g., spaces
must be escaped as `\\20` and backslashes must be escaped as `\\5c`.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`
Raises :exc:`RTMPError` if URL parsing fails.
"""
self.url = bytes(url, "utf8")
res = librtmp.RTMP_SetupURL(self.rtmp, self.url)
if res < 1:
raise RTMPError("Unable to parse URL")
def connect(self, packet=None):
"""Connect to the server.
:param packet: RTMPPacket, this packet will be sent instead
of the regular "connect" packet.
Raises :exc:`RTMPError` if the connect attempt fails.
"""
if isinstance(packet, RTMPPacket):
packet = packet.packet
else:
packet = ffi.NULL
res = librtmp.RTMP_Connect(self.rtmp, packet)
if res < 1:
raise RTMPError("Failed to connect")
return RTMPCall(self, 1.0)
def create_stream(self, seek=None, writeable=False, update_buffer=True):
"""Prepares the session for streaming of audio/video
and returns a :class:`RTMPStream` object.
:param seek: int, Attempt to seek to this position.
:param writeable: bool, Make the stream writeable instead of readable.
:param update_buffer: bool, When enabled will attempt to speed up
download by telling the server our buffer can
fit the whole stream.
Raises :exc:`RTMPError` if a stream could not be created.
Usage::
>>> stream = conn.create_stream()
>>> data = stream.read(1024)
"""
if writeable:
librtmp.RTMP_EnableWrite(self.rtmp)
# Calling handle_packet() on a connect result causes
# librtmp to send a CreateStream call. This is not always
# desired when using process_packets(), therefore we do it
# here instead.
if self._connect_result:
self.handle_packet(self._connect_result)
if not seek:
seek = 0
res = librtmp.RTMP_ConnectStream(self.rtmp, seek)
if res < 1:
raise RTMPError("Failed to start RTMP playback")
return RTMPStream(self, update_buffer=update_buffer)
@property
def connected(self):
"""Returns True if connected to the server.
Usage::
>>> conn.connected
True
"""
return bool(librtmp.RTMP_IsConnected(self.rtmp))
def read_packet(self):
"""Reads a RTMP packet from the server.
Returns a :class:`RTMPPacket`.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> packet = conn.read_packet()
>>> packet.body
b'packet body ...'
"""
packet = ffi.new("RTMPPacket*")
packet_complete = False
while not packet_complete:
res = librtmp.RTMP_ReadPacket(self.rtmp, packet)
if res < 1:
if librtmp.RTMP_IsTimedout(self.rtmp):
raise RTMPTimeoutError("Timed out while reading packet")
else:
raise RTMPError("Failed to read packet")
packet_complete = packet.m_nBytesRead == packet.m_nBodySize
return RTMPPacket._from_pointer(packet)
def send_packet(self, packet, queue=True):
"""Sends a RTMP packet to the server.
:param packet: RTMPPacket, the packet to send to the server.
:param queue: bool, If True, queue up the packet in a internal queue rather
than sending it right away.
"""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_SendPacket(self.rtmp, packet.packet,
int(queue))
def process_packets(self, transaction_id=None, invoked_method=None,
timeout=None):
"""Wait for packets and process them as needed.
:param transaction_id: int, Wait until the result of this
transaction ID is recieved.
:param invoked_method: int, Wait until this method is invoked
by the server.
:param timeout: int, The time to wait for a result from the server.
Note: This is the timeout used by this method only,
the connection timeout is still used when reading
packets.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> @conn.invoke_handler
... def add(x, y):
... return x + y
>>> @conn.process_packets()
"""
start = time()
while self.connected and transaction_id not in self._invoke_results:
if timeout and (time() - start) >= timeout:
raise RTMPTimeoutError("Timeout")
packet = self.read_packet()
if packet.type == PACKET_TYPE_INVOKE:
try:
decoded = decode_amf(packet.body)
except AMFError:
continue
try:
method, transaction_id_, obj = decoded[:3]
args = decoded[3:]
except ValueError:
continue
if method == "_result":
if len(args) > 0:
result = args[0]
else:
result = None
self._invoke_results[transaction_id_] = result
else:
handler = self._invoke_handlers.get(method)
if handler:
res = handler(*args)
if res is not None:
self.call("_result", res,
transaction_id=transaction_id_)
if method == invoked_method:
self._invoke_args[invoked_method] = args
break
if transaction_id_ == 1.0:
self._connect_result = packet
else:
self.handle_packet(packet)
else:
self.handle_packet(packet)
if transaction_id:
result = self._invoke_results.pop(transaction_id, None)
return result
if invoked_method:
args = self._invoke_args.pop(invoked_method, None)
return args
def call(self, method, *args, **params):
"""Calls a method on the server."""
transaction_id = params.get("transaction_id")
if not transaction_id:
self.transaction_id += 1
transaction_id = self.transaction_id
obj = params.get("obj")
args = [method, transaction_id, obj] + list(args)
args_encoded = map(lambda x: encode_amf(x), args)
body = b"".join(args_encoded)
format = params.get("format", PACKET_SIZE_MEDIUM)
channel = params.get("channel", 0x03)
packet = RTMPPacket(type=PACKET_TYPE_INVOKE,
format=format, channel=channel,
body=body)
self.send_packet(packet)
return RTMPCall(self, transaction_id)
def remote_method(self, method, block=False, **params):
"""Creates a Python function that will attempt to
call a remote method when used.
:param method: str, Method name on the server to call
:param block: bool, Wheter to wait for result or not
Usage::
>>> send_usher_token = conn.remote_method("NetStream.Authenticate.UsherToken", block=True)
>>> send_usher_token("some token")
'Token Accepted'
"""
def func(*args):
call = self.call(method, *args, **params)
if block:
return call.result()
return call
func.__name__ = method
return func
def invoke_handler(self, func=None, name=None):
if not callable(func):
return lambda f: self.invoke_handler(func=f, name=func)
method = name or func.__name__
self.register_invoke_handler(method, func)
return func
def register_invoke_handler(self, method, func):
self._invoke_handlers[method] = func
def close(self):
"""Closes the connection to the server."""
if self.connected:
librtmp.RTMP_Close(self.rtmp)
@property
def transaction_id(self):
return librtmp.RTMP_GetInvokeCount(self.rtmp)
@transaction_id.setter
def transaction_id(self, val):
librtmp.RTMP_SetInvokeCount(self.rtmp, int(val))
def __del__(self):
librtmp.RTMP_Free(self.rtmp)
|
chrippa/python-librtmp | librtmp/rtmp.py | RTMP.process_packets | python | def process_packets(self, transaction_id=None, invoked_method=None,
timeout=None):
start = time()
while self.connected and transaction_id not in self._invoke_results:
if timeout and (time() - start) >= timeout:
raise RTMPTimeoutError("Timeout")
packet = self.read_packet()
if packet.type == PACKET_TYPE_INVOKE:
try:
decoded = decode_amf(packet.body)
except AMFError:
continue
try:
method, transaction_id_, obj = decoded[:3]
args = decoded[3:]
except ValueError:
continue
if method == "_result":
if len(args) > 0:
result = args[0]
else:
result = None
self._invoke_results[transaction_id_] = result
else:
handler = self._invoke_handlers.get(method)
if handler:
res = handler(*args)
if res is not None:
self.call("_result", res,
transaction_id=transaction_id_)
if method == invoked_method:
self._invoke_args[invoked_method] = args
break
if transaction_id_ == 1.0:
self._connect_result = packet
else:
self.handle_packet(packet)
else:
self.handle_packet(packet)
if transaction_id:
result = self._invoke_results.pop(transaction_id, None)
return result
if invoked_method:
args = self._invoke_args.pop(invoked_method, None)
return args | Wait for packets and process them as needed.
:param transaction_id: int, Wait until the result of this
transaction ID is recieved.
:param invoked_method: int, Wait until this method is invoked
by the server.
:param timeout: int, The time to wait for a result from the server.
Note: This is the timeout used by this method only,
the connection timeout is still used when reading
packets.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> @conn.invoke_handler
... def add(x, y):
... return x + y
>>> @conn.process_packets() | train | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L297-L377 | [
"def decode_amf(body):\n obj = ffi.new(\"AMFObject*\")\n res = librtmp.AMF_Decode(obj, body, len(body), 0)\n\n if res == ffi.NULL:\n raise AMFError(\"Unable to decode AMF data\")\n\n rval = []\n prop_count = librtmp.AMF_CountProp(obj)\n\n for i in range(prop_count):\n prop = librtmp.AMF_GetProp(obj, ffi.NULL, i)\n val = _decode_prop(prop)\n rval.append(val)\n\n return rval\n",
"def read_packet(self):\n \"\"\"Reads a RTMP packet from the server.\n\n Returns a :class:`RTMPPacket`.\n\n Raises :exc:`RTMPError` on error.\n Raises :exc:`RTMPTimeoutError` on timeout.\n\n Usage::\n\n >>> packet = conn.read_packet()\n >>> packet.body\n b'packet body ...'\n \"\"\"\n\n packet = ffi.new(\"RTMPPacket*\")\n packet_complete = False\n\n while not packet_complete:\n res = librtmp.RTMP_ReadPacket(self.rtmp, packet)\n\n if res < 1:\n if librtmp.RTMP_IsTimedout(self.rtmp):\n raise RTMPTimeoutError(\"Timed out while reading packet\")\n else:\n raise RTMPError(\"Failed to read packet\")\n\n packet_complete = packet.m_nBytesRead == packet.m_nBodySize\n\n return RTMPPacket._from_pointer(packet)\n",
"def handle_packet(self, packet):\n \"\"\"Lets librtmp look at a packet and send a response\n if needed.\"\"\"\n\n if not isinstance(packet, RTMPPacket):\n raise ValueError(\"A RTMPPacket argument is required\")\n\n return librtmp.RTMP_ClientPacket(self.rtmp, packet.packet)\n",
"def call(self, method, *args, **params):\n \"\"\"Calls a method on the server.\"\"\"\n\n transaction_id = params.get(\"transaction_id\")\n\n if not transaction_id:\n self.transaction_id += 1\n transaction_id = self.transaction_id\n\n obj = params.get(\"obj\")\n\n args = [method, transaction_id, obj] + list(args)\n args_encoded = map(lambda x: encode_amf(x), args)\n body = b\"\".join(args_encoded)\n\n format = params.get(\"format\", PACKET_SIZE_MEDIUM)\n channel = params.get(\"channel\", 0x03)\n\n packet = RTMPPacket(type=PACKET_TYPE_INVOKE,\n format=format, channel=channel,\n body=body)\n\n self.send_packet(packet)\n\n return RTMPCall(self, transaction_id)\n"
] | class RTMP(object):
""" A RTMP client session.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`.
:param playpath: str, Overrides the playpath parsed from the RTMP URL.
:param tcurl: str, URL of the target stream. Defaults to `rtmp[t][e|s]://host[:port]/app`.
:param app: str, Name of application to connect to on the RTMP server.
:param pageurl: str, URL of the web page in which the media was embedded.
:param auth: str, Authentication string to be appended to the connect string.
:param connect_data: This value will be encoded to AMF and added to the connect packet.
:param swfhash: str, SHA256 hash of the decompressed SWF file (hexdigest).
:param swfsize: int, Size of the decompressed SWF file.
:param swfurl: str, URL of the SWF player for the media.
:param swfvfy: bool, Calculate the correct swfhash and swfsize parameter
from the `swfurl` specified.
:param flashver: str, Version of the Flash plugin used to run the SWF player.
:param subscribe: str, Name of live stream to subscribe to. Defaults to `playpath`.
:param token: str, Key for SecureToken response, used if the server requires
SecureToken authentication.
:param live: bool, Specify that the media is a live stream.
:param jtv: str, JSON token used by Twitch/Justin.tv servers.
:param socks: str, Use the specified SOCKS4 proxy.
:param start: int, Start at num seconds into the stream. Not valid for live streams.
:param stop: int, Stop at num seconds into the stream.
:param buffer: int, Set buffer time to num milliseconds. This is used to control
rate of data sent by FMS servers, not buffering of data. The default is 30000.
:param timeout: int, Timeout the session after num seconds without receiving any data
from the server. The default is 30.
"""
def __init__(self, url, playpath=None, tcurl=None, app=None, pageurl=None,
auth=None, swfhash=None, swfsize=None, swfurl=None, swfvfy=None,
flashver=None, subscribe=None, token=None, live=None, jtv=None,
connect_data=None, socks=None, start=None, stop=None, buffer=None,
timeout=None):
def set_opt(key, val):
if val is not None:
self.set_option(key, val)
self.rtmp = librtmp.RTMP_Alloc()
if self.rtmp == ffi.NULL:
raise MemoryError("Failed to allocate RTMP handle")
librtmp.RTMP_Init(self.rtmp)
self._options = dict()
self._invoke_args = dict()
self._invoke_handlers = dict()
self._invoke_results = dict()
self._connect_result = None
self.url = None
if swfurl and swfvfy:
swfhash, swfsize = hash_swf(swfurl)
if swfhash:
digest = unhexlify(swfhash)
librtmp.RTMP_SetSWFHash(self.rtmp, digest, swfsize)
# Socks option must be set before setup_url.
set_opt("socks", socks)
self.setup_url(url)
set_opt("playpath", playpath)
set_opt("tcUrl", tcurl)
set_opt("app", app)
set_opt("swfUrl", swfurl)
set_opt("pageUrl", pageurl)
set_opt("auth", auth)
set_opt("flashver", flashver)
set_opt("subscribe", subscribe)
set_opt("token", token)
set_opt("jtv", jtv)
set_opt("live", live)
set_opt("start", start)
set_opt("stop", stop)
set_opt("buffer", buffer)
set_opt("timeout", timeout)
if isinstance(connect_data, (list, tuple)):
for data in connect_data:
self._parse_connect_data(data)
elif connect_data is not None:
self._parse_connect_data(connect_data)
def _parse_connect_data(self, val):
if isinstance(val, bool):
self.set_option("conn", "B:{0}".format(int(val)))
elif isinstance(val, string_types):
self.set_option("conn", "S:{0}".format(val))
elif isinstance(val, integer_types):
self.set_option("conn", "N:{0}".format(val))
elif isinstance(val, type(None)):
self.set_option("conn", "Z:")
elif isinstance(val, dict):
self.set_option("conn", "O:1")
for key, value in val.items():
if isinstance(value, bool):
self.set_option("conn", "NB:{0}:{1}".format(key, int(value)))
elif isinstance(value, string_types):
self.set_option("conn", "NS:{0}:{1}".format(key, value))
elif isinstance(value, integer_types):
self.set_option("conn", "NN:{0}:{1}".format(key, value))
self.set_option("conn", "O:0")
def set_option(self, key, value):
"""Sets a option for this session.
For a detailed list of available options see the librtmp(3) man page.
:param key: str, A valid option key.
:param value: A value, anything that can be converted to str is valid.
Raises :exc:`ValueError` if a invalid option is specified.
"""
akey = AVal(key)
aval = AVal(value)
res = librtmp.RTMP_SetOpt(self.rtmp, akey.aval, aval.aval)
if res < 1:
raise ValueError("Unable to set option {0}".format(key))
self._options[akey] = aval
def setup_url(self, url):
r"""Attempt to parse a RTMP URL.
Additional options may be specified by appending space-separated
key=value pairs to the URL. Special characters in values may need
to be escaped to prevent misinterpretation by the option parser.
The escape encoding uses a backslash followed by two hexadecimal
digits representing the ASCII value of the character. E.g., spaces
must be escaped as `\\20` and backslashes must be escaped as `\\5c`.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`
Raises :exc:`RTMPError` if URL parsing fails.
"""
self.url = bytes(url, "utf8")
res = librtmp.RTMP_SetupURL(self.rtmp, self.url)
if res < 1:
raise RTMPError("Unable to parse URL")
def connect(self, packet=None):
"""Connect to the server.
:param packet: RTMPPacket, this packet will be sent instead
of the regular "connect" packet.
Raises :exc:`RTMPError` if the connect attempt fails.
"""
if isinstance(packet, RTMPPacket):
packet = packet.packet
else:
packet = ffi.NULL
res = librtmp.RTMP_Connect(self.rtmp, packet)
if res < 1:
raise RTMPError("Failed to connect")
return RTMPCall(self, 1.0)
def create_stream(self, seek=None, writeable=False, update_buffer=True):
"""Prepares the session for streaming of audio/video
and returns a :class:`RTMPStream` object.
:param seek: int, Attempt to seek to this position.
:param writeable: bool, Make the stream writeable instead of readable.
:param update_buffer: bool, When enabled will attempt to speed up
download by telling the server our buffer can
fit the whole stream.
Raises :exc:`RTMPError` if a stream could not be created.
Usage::
>>> stream = conn.create_stream()
>>> data = stream.read(1024)
"""
if writeable:
librtmp.RTMP_EnableWrite(self.rtmp)
# Calling handle_packet() on a connect result causes
# librtmp to send a CreateStream call. This is not always
# desired when using process_packets(), therefore we do it
# here instead.
if self._connect_result:
self.handle_packet(self._connect_result)
if not seek:
seek = 0
res = librtmp.RTMP_ConnectStream(self.rtmp, seek)
if res < 1:
raise RTMPError("Failed to start RTMP playback")
return RTMPStream(self, update_buffer=update_buffer)
@property
def connected(self):
"""Returns True if connected to the server.
Usage::
>>> conn.connected
True
"""
return bool(librtmp.RTMP_IsConnected(self.rtmp))
def read_packet(self):
"""Reads a RTMP packet from the server.
Returns a :class:`RTMPPacket`.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> packet = conn.read_packet()
>>> packet.body
b'packet body ...'
"""
packet = ffi.new("RTMPPacket*")
packet_complete = False
while not packet_complete:
res = librtmp.RTMP_ReadPacket(self.rtmp, packet)
if res < 1:
if librtmp.RTMP_IsTimedout(self.rtmp):
raise RTMPTimeoutError("Timed out while reading packet")
else:
raise RTMPError("Failed to read packet")
packet_complete = packet.m_nBytesRead == packet.m_nBodySize
return RTMPPacket._from_pointer(packet)
def send_packet(self, packet, queue=True):
"""Sends a RTMP packet to the server.
:param packet: RTMPPacket, the packet to send to the server.
:param queue: bool, If True, queue up the packet in a internal queue rather
than sending it right away.
"""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_SendPacket(self.rtmp, packet.packet,
int(queue))
def handle_packet(self, packet):
"""Lets librtmp look at a packet and send a response
if needed."""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_ClientPacket(self.rtmp, packet.packet)
def call(self, method, *args, **params):
"""Calls a method on the server."""
transaction_id = params.get("transaction_id")
if not transaction_id:
self.transaction_id += 1
transaction_id = self.transaction_id
obj = params.get("obj")
args = [method, transaction_id, obj] + list(args)
args_encoded = map(lambda x: encode_amf(x), args)
body = b"".join(args_encoded)
format = params.get("format", PACKET_SIZE_MEDIUM)
channel = params.get("channel", 0x03)
packet = RTMPPacket(type=PACKET_TYPE_INVOKE,
format=format, channel=channel,
body=body)
self.send_packet(packet)
return RTMPCall(self, transaction_id)
def remote_method(self, method, block=False, **params):
"""Creates a Python function that will attempt to
call a remote method when used.
:param method: str, Method name on the server to call
:param block: bool, Wheter to wait for result or not
Usage::
>>> send_usher_token = conn.remote_method("NetStream.Authenticate.UsherToken", block=True)
>>> send_usher_token("some token")
'Token Accepted'
"""
def func(*args):
call = self.call(method, *args, **params)
if block:
return call.result()
return call
func.__name__ = method
return func
def invoke_handler(self, func=None, name=None):
if not callable(func):
return lambda f: self.invoke_handler(func=f, name=func)
method = name or func.__name__
self.register_invoke_handler(method, func)
return func
def register_invoke_handler(self, method, func):
self._invoke_handlers[method] = func
def close(self):
"""Closes the connection to the server."""
if self.connected:
librtmp.RTMP_Close(self.rtmp)
@property
def transaction_id(self):
return librtmp.RTMP_GetInvokeCount(self.rtmp)
@transaction_id.setter
def transaction_id(self, val):
librtmp.RTMP_SetInvokeCount(self.rtmp, int(val))
def __del__(self):
librtmp.RTMP_Free(self.rtmp)
|
chrippa/python-librtmp | librtmp/rtmp.py | RTMP.call | python | def call(self, method, *args, **params):
transaction_id = params.get("transaction_id")
if not transaction_id:
self.transaction_id += 1
transaction_id = self.transaction_id
obj = params.get("obj")
args = [method, transaction_id, obj] + list(args)
args_encoded = map(lambda x: encode_amf(x), args)
body = b"".join(args_encoded)
format = params.get("format", PACKET_SIZE_MEDIUM)
channel = params.get("channel", 0x03)
packet = RTMPPacket(type=PACKET_TYPE_INVOKE,
format=format, channel=channel,
body=body)
self.send_packet(packet)
return RTMPCall(self, transaction_id) | Calls a method on the server. | train | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L379-L403 | [
"def send_packet(self, packet, queue=True):\n \"\"\"Sends a RTMP packet to the server.\n\n :param packet: RTMPPacket, the packet to send to the server.\n :param queue: bool, If True, queue up the packet in a internal queue rather\n than sending it right away.\n\n \"\"\"\n\n if not isinstance(packet, RTMPPacket):\n raise ValueError(\"A RTMPPacket argument is required\")\n\n return librtmp.RTMP_SendPacket(self.rtmp, packet.packet,\n int(queue))\n"
] | class RTMP(object):
""" A RTMP client session.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`.
:param playpath: str, Overrides the playpath parsed from the RTMP URL.
:param tcurl: str, URL of the target stream. Defaults to `rtmp[t][e|s]://host[:port]/app`.
:param app: str, Name of application to connect to on the RTMP server.
:param pageurl: str, URL of the web page in which the media was embedded.
:param auth: str, Authentication string to be appended to the connect string.
:param connect_data: This value will be encoded to AMF and added to the connect packet.
:param swfhash: str, SHA256 hash of the decompressed SWF file (hexdigest).
:param swfsize: int, Size of the decompressed SWF file.
:param swfurl: str, URL of the SWF player for the media.
:param swfvfy: bool, Calculate the correct swfhash and swfsize parameter
from the `swfurl` specified.
:param flashver: str, Version of the Flash plugin used to run the SWF player.
:param subscribe: str, Name of live stream to subscribe to. Defaults to `playpath`.
:param token: str, Key for SecureToken response, used if the server requires
SecureToken authentication.
:param live: bool, Specify that the media is a live stream.
:param jtv: str, JSON token used by Twitch/Justin.tv servers.
:param socks: str, Use the specified SOCKS4 proxy.
:param start: int, Start at num seconds into the stream. Not valid for live streams.
:param stop: int, Stop at num seconds into the stream.
:param buffer: int, Set buffer time to num milliseconds. This is used to control
rate of data sent by FMS servers, not buffering of data. The default is 30000.
:param timeout: int, Timeout the session after num seconds without receiving any data
from the server. The default is 30.
"""
def __init__(self, url, playpath=None, tcurl=None, app=None, pageurl=None,
auth=None, swfhash=None, swfsize=None, swfurl=None, swfvfy=None,
flashver=None, subscribe=None, token=None, live=None, jtv=None,
connect_data=None, socks=None, start=None, stop=None, buffer=None,
timeout=None):
def set_opt(key, val):
if val is not None:
self.set_option(key, val)
self.rtmp = librtmp.RTMP_Alloc()
if self.rtmp == ffi.NULL:
raise MemoryError("Failed to allocate RTMP handle")
librtmp.RTMP_Init(self.rtmp)
self._options = dict()
self._invoke_args = dict()
self._invoke_handlers = dict()
self._invoke_results = dict()
self._connect_result = None
self.url = None
if swfurl and swfvfy:
swfhash, swfsize = hash_swf(swfurl)
if swfhash:
digest = unhexlify(swfhash)
librtmp.RTMP_SetSWFHash(self.rtmp, digest, swfsize)
# Socks option must be set before setup_url.
set_opt("socks", socks)
self.setup_url(url)
set_opt("playpath", playpath)
set_opt("tcUrl", tcurl)
set_opt("app", app)
set_opt("swfUrl", swfurl)
set_opt("pageUrl", pageurl)
set_opt("auth", auth)
set_opt("flashver", flashver)
set_opt("subscribe", subscribe)
set_opt("token", token)
set_opt("jtv", jtv)
set_opt("live", live)
set_opt("start", start)
set_opt("stop", stop)
set_opt("buffer", buffer)
set_opt("timeout", timeout)
if isinstance(connect_data, (list, tuple)):
for data in connect_data:
self._parse_connect_data(data)
elif connect_data is not None:
self._parse_connect_data(connect_data)
def _parse_connect_data(self, val):
if isinstance(val, bool):
self.set_option("conn", "B:{0}".format(int(val)))
elif isinstance(val, string_types):
self.set_option("conn", "S:{0}".format(val))
elif isinstance(val, integer_types):
self.set_option("conn", "N:{0}".format(val))
elif isinstance(val, type(None)):
self.set_option("conn", "Z:")
elif isinstance(val, dict):
self.set_option("conn", "O:1")
for key, value in val.items():
if isinstance(value, bool):
self.set_option("conn", "NB:{0}:{1}".format(key, int(value)))
elif isinstance(value, string_types):
self.set_option("conn", "NS:{0}:{1}".format(key, value))
elif isinstance(value, integer_types):
self.set_option("conn", "NN:{0}:{1}".format(key, value))
self.set_option("conn", "O:0")
def set_option(self, key, value):
"""Sets a option for this session.
For a detailed list of available options see the librtmp(3) man page.
:param key: str, A valid option key.
:param value: A value, anything that can be converted to str is valid.
Raises :exc:`ValueError` if a invalid option is specified.
"""
akey = AVal(key)
aval = AVal(value)
res = librtmp.RTMP_SetOpt(self.rtmp, akey.aval, aval.aval)
if res < 1:
raise ValueError("Unable to set option {0}".format(key))
self._options[akey] = aval
def setup_url(self, url):
r"""Attempt to parse a RTMP URL.
Additional options may be specified by appending space-separated
key=value pairs to the URL. Special characters in values may need
to be escaped to prevent misinterpretation by the option parser.
The escape encoding uses a backslash followed by two hexadecimal
digits representing the ASCII value of the character. E.g., spaces
must be escaped as `\\20` and backslashes must be escaped as `\\5c`.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`
Raises :exc:`RTMPError` if URL parsing fails.
"""
self.url = bytes(url, "utf8")
res = librtmp.RTMP_SetupURL(self.rtmp, self.url)
if res < 1:
raise RTMPError("Unable to parse URL")
def connect(self, packet=None):
"""Connect to the server.
:param packet: RTMPPacket, this packet will be sent instead
of the regular "connect" packet.
Raises :exc:`RTMPError` if the connect attempt fails.
"""
if isinstance(packet, RTMPPacket):
packet = packet.packet
else:
packet = ffi.NULL
res = librtmp.RTMP_Connect(self.rtmp, packet)
if res < 1:
raise RTMPError("Failed to connect")
return RTMPCall(self, 1.0)
def create_stream(self, seek=None, writeable=False, update_buffer=True):
"""Prepares the session for streaming of audio/video
and returns a :class:`RTMPStream` object.
:param seek: int, Attempt to seek to this position.
:param writeable: bool, Make the stream writeable instead of readable.
:param update_buffer: bool, When enabled will attempt to speed up
download by telling the server our buffer can
fit the whole stream.
Raises :exc:`RTMPError` if a stream could not be created.
Usage::
>>> stream = conn.create_stream()
>>> data = stream.read(1024)
"""
if writeable:
librtmp.RTMP_EnableWrite(self.rtmp)
# Calling handle_packet() on a connect result causes
# librtmp to send a CreateStream call. This is not always
# desired when using process_packets(), therefore we do it
# here instead.
if self._connect_result:
self.handle_packet(self._connect_result)
if not seek:
seek = 0
res = librtmp.RTMP_ConnectStream(self.rtmp, seek)
if res < 1:
raise RTMPError("Failed to start RTMP playback")
return RTMPStream(self, update_buffer=update_buffer)
@property
def connected(self):
"""Returns True if connected to the server.
Usage::
>>> conn.connected
True
"""
return bool(librtmp.RTMP_IsConnected(self.rtmp))
def read_packet(self):
"""Reads a RTMP packet from the server.
Returns a :class:`RTMPPacket`.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> packet = conn.read_packet()
>>> packet.body
b'packet body ...'
"""
packet = ffi.new("RTMPPacket*")
packet_complete = False
while not packet_complete:
res = librtmp.RTMP_ReadPacket(self.rtmp, packet)
if res < 1:
if librtmp.RTMP_IsTimedout(self.rtmp):
raise RTMPTimeoutError("Timed out while reading packet")
else:
raise RTMPError("Failed to read packet")
packet_complete = packet.m_nBytesRead == packet.m_nBodySize
return RTMPPacket._from_pointer(packet)
def send_packet(self, packet, queue=True):
"""Sends a RTMP packet to the server.
:param packet: RTMPPacket, the packet to send to the server.
:param queue: bool, If True, queue up the packet in a internal queue rather
than sending it right away.
"""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_SendPacket(self.rtmp, packet.packet,
int(queue))
def handle_packet(self, packet):
"""Lets librtmp look at a packet and send a response
if needed."""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_ClientPacket(self.rtmp, packet.packet)
def process_packets(self, transaction_id=None, invoked_method=None,
timeout=None):
"""Wait for packets and process them as needed.
:param transaction_id: int, Wait until the result of this
transaction ID is recieved.
:param invoked_method: int, Wait until this method is invoked
by the server.
:param timeout: int, The time to wait for a result from the server.
Note: This is the timeout used by this method only,
the connection timeout is still used when reading
packets.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> @conn.invoke_handler
... def add(x, y):
... return x + y
>>> @conn.process_packets()
"""
start = time()
while self.connected and transaction_id not in self._invoke_results:
if timeout and (time() - start) >= timeout:
raise RTMPTimeoutError("Timeout")
packet = self.read_packet()
if packet.type == PACKET_TYPE_INVOKE:
try:
decoded = decode_amf(packet.body)
except AMFError:
continue
try:
method, transaction_id_, obj = decoded[:3]
args = decoded[3:]
except ValueError:
continue
if method == "_result":
if len(args) > 0:
result = args[0]
else:
result = None
self._invoke_results[transaction_id_] = result
else:
handler = self._invoke_handlers.get(method)
if handler:
res = handler(*args)
if res is not None:
self.call("_result", res,
transaction_id=transaction_id_)
if method == invoked_method:
self._invoke_args[invoked_method] = args
break
if transaction_id_ == 1.0:
self._connect_result = packet
else:
self.handle_packet(packet)
else:
self.handle_packet(packet)
if transaction_id:
result = self._invoke_results.pop(transaction_id, None)
return result
if invoked_method:
args = self._invoke_args.pop(invoked_method, None)
return args
def remote_method(self, method, block=False, **params):
"""Creates a Python function that will attempt to
call a remote method when used.
:param method: str, Method name on the server to call
:param block: bool, Wheter to wait for result or not
Usage::
>>> send_usher_token = conn.remote_method("NetStream.Authenticate.UsherToken", block=True)
>>> send_usher_token("some token")
'Token Accepted'
"""
def func(*args):
call = self.call(method, *args, **params)
if block:
return call.result()
return call
func.__name__ = method
return func
def invoke_handler(self, func=None, name=None):
if not callable(func):
return lambda f: self.invoke_handler(func=f, name=func)
method = name or func.__name__
self.register_invoke_handler(method, func)
return func
def register_invoke_handler(self, method, func):
self._invoke_handlers[method] = func
def close(self):
"""Closes the connection to the server."""
if self.connected:
librtmp.RTMP_Close(self.rtmp)
@property
def transaction_id(self):
return librtmp.RTMP_GetInvokeCount(self.rtmp)
@transaction_id.setter
def transaction_id(self, val):
librtmp.RTMP_SetInvokeCount(self.rtmp, int(val))
def __del__(self):
librtmp.RTMP_Free(self.rtmp)
|
chrippa/python-librtmp | librtmp/rtmp.py | RTMP.remote_method | python | def remote_method(self, method, block=False, **params):
def func(*args):
call = self.call(method, *args, **params)
if block:
return call.result()
return call
func.__name__ = method
return func | Creates a Python function that will attempt to
call a remote method when used.
:param method: str, Method name on the server to call
:param block: bool, Wheter to wait for result or not
Usage::
>>> send_usher_token = conn.remote_method("NetStream.Authenticate.UsherToken", block=True)
>>> send_usher_token("some token")
'Token Accepted' | train | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L405-L429 | null | class RTMP(object):
""" A RTMP client session.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`.
:param playpath: str, Overrides the playpath parsed from the RTMP URL.
:param tcurl: str, URL of the target stream. Defaults to `rtmp[t][e|s]://host[:port]/app`.
:param app: str, Name of application to connect to on the RTMP server.
:param pageurl: str, URL of the web page in which the media was embedded.
:param auth: str, Authentication string to be appended to the connect string.
:param connect_data: This value will be encoded to AMF and added to the connect packet.
:param swfhash: str, SHA256 hash of the decompressed SWF file (hexdigest).
:param swfsize: int, Size of the decompressed SWF file.
:param swfurl: str, URL of the SWF player for the media.
:param swfvfy: bool, Calculate the correct swfhash and swfsize parameter
from the `swfurl` specified.
:param flashver: str, Version of the Flash plugin used to run the SWF player.
:param subscribe: str, Name of live stream to subscribe to. Defaults to `playpath`.
:param token: str, Key for SecureToken response, used if the server requires
SecureToken authentication.
:param live: bool, Specify that the media is a live stream.
:param jtv: str, JSON token used by Twitch/Justin.tv servers.
:param socks: str, Use the specified SOCKS4 proxy.
:param start: int, Start at num seconds into the stream. Not valid for live streams.
:param stop: int, Stop at num seconds into the stream.
:param buffer: int, Set buffer time to num milliseconds. This is used to control
rate of data sent by FMS servers, not buffering of data. The default is 30000.
:param timeout: int, Timeout the session after num seconds without receiving any data
from the server. The default is 30.
"""
def __init__(self, url, playpath=None, tcurl=None, app=None, pageurl=None,
auth=None, swfhash=None, swfsize=None, swfurl=None, swfvfy=None,
flashver=None, subscribe=None, token=None, live=None, jtv=None,
connect_data=None, socks=None, start=None, stop=None, buffer=None,
timeout=None):
def set_opt(key, val):
if val is not None:
self.set_option(key, val)
self.rtmp = librtmp.RTMP_Alloc()
if self.rtmp == ffi.NULL:
raise MemoryError("Failed to allocate RTMP handle")
librtmp.RTMP_Init(self.rtmp)
self._options = dict()
self._invoke_args = dict()
self._invoke_handlers = dict()
self._invoke_results = dict()
self._connect_result = None
self.url = None
if swfurl and swfvfy:
swfhash, swfsize = hash_swf(swfurl)
if swfhash:
digest = unhexlify(swfhash)
librtmp.RTMP_SetSWFHash(self.rtmp, digest, swfsize)
# Socks option must be set before setup_url.
set_opt("socks", socks)
self.setup_url(url)
set_opt("playpath", playpath)
set_opt("tcUrl", tcurl)
set_opt("app", app)
set_opt("swfUrl", swfurl)
set_opt("pageUrl", pageurl)
set_opt("auth", auth)
set_opt("flashver", flashver)
set_opt("subscribe", subscribe)
set_opt("token", token)
set_opt("jtv", jtv)
set_opt("live", live)
set_opt("start", start)
set_opt("stop", stop)
set_opt("buffer", buffer)
set_opt("timeout", timeout)
if isinstance(connect_data, (list, tuple)):
for data in connect_data:
self._parse_connect_data(data)
elif connect_data is not None:
self._parse_connect_data(connect_data)
def _parse_connect_data(self, val):
if isinstance(val, bool):
self.set_option("conn", "B:{0}".format(int(val)))
elif isinstance(val, string_types):
self.set_option("conn", "S:{0}".format(val))
elif isinstance(val, integer_types):
self.set_option("conn", "N:{0}".format(val))
elif isinstance(val, type(None)):
self.set_option("conn", "Z:")
elif isinstance(val, dict):
self.set_option("conn", "O:1")
for key, value in val.items():
if isinstance(value, bool):
self.set_option("conn", "NB:{0}:{1}".format(key, int(value)))
elif isinstance(value, string_types):
self.set_option("conn", "NS:{0}:{1}".format(key, value))
elif isinstance(value, integer_types):
self.set_option("conn", "NN:{0}:{1}".format(key, value))
self.set_option("conn", "O:0")
def set_option(self, key, value):
"""Sets a option for this session.
For a detailed list of available options see the librtmp(3) man page.
:param key: str, A valid option key.
:param value: A value, anything that can be converted to str is valid.
Raises :exc:`ValueError` if a invalid option is specified.
"""
akey = AVal(key)
aval = AVal(value)
res = librtmp.RTMP_SetOpt(self.rtmp, akey.aval, aval.aval)
if res < 1:
raise ValueError("Unable to set option {0}".format(key))
self._options[akey] = aval
def setup_url(self, url):
r"""Attempt to parse a RTMP URL.
Additional options may be specified by appending space-separated
key=value pairs to the URL. Special characters in values may need
to be escaped to prevent misinterpretation by the option parser.
The escape encoding uses a backslash followed by two hexadecimal
digits representing the ASCII value of the character. E.g., spaces
must be escaped as `\\20` and backslashes must be escaped as `\\5c`.
:param url: str, A RTMP URL in the format `rtmp[t][e|s]://hostname[:port][/app[/playpath]]`
Raises :exc:`RTMPError` if URL parsing fails.
"""
self.url = bytes(url, "utf8")
res = librtmp.RTMP_SetupURL(self.rtmp, self.url)
if res < 1:
raise RTMPError("Unable to parse URL")
def connect(self, packet=None):
"""Connect to the server.
:param packet: RTMPPacket, this packet will be sent instead
of the regular "connect" packet.
Raises :exc:`RTMPError` if the connect attempt fails.
"""
if isinstance(packet, RTMPPacket):
packet = packet.packet
else:
packet = ffi.NULL
res = librtmp.RTMP_Connect(self.rtmp, packet)
if res < 1:
raise RTMPError("Failed to connect")
return RTMPCall(self, 1.0)
def create_stream(self, seek=None, writeable=False, update_buffer=True):
"""Prepares the session for streaming of audio/video
and returns a :class:`RTMPStream` object.
:param seek: int, Attempt to seek to this position.
:param writeable: bool, Make the stream writeable instead of readable.
:param update_buffer: bool, When enabled will attempt to speed up
download by telling the server our buffer can
fit the whole stream.
Raises :exc:`RTMPError` if a stream could not be created.
Usage::
>>> stream = conn.create_stream()
>>> data = stream.read(1024)
"""
if writeable:
librtmp.RTMP_EnableWrite(self.rtmp)
# Calling handle_packet() on a connect result causes
# librtmp to send a CreateStream call. This is not always
# desired when using process_packets(), therefore we do it
# here instead.
if self._connect_result:
self.handle_packet(self._connect_result)
if not seek:
seek = 0
res = librtmp.RTMP_ConnectStream(self.rtmp, seek)
if res < 1:
raise RTMPError("Failed to start RTMP playback")
return RTMPStream(self, update_buffer=update_buffer)
@property
def connected(self):
"""Returns True if connected to the server.
Usage::
>>> conn.connected
True
"""
return bool(librtmp.RTMP_IsConnected(self.rtmp))
def read_packet(self):
"""Reads a RTMP packet from the server.
Returns a :class:`RTMPPacket`.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> packet = conn.read_packet()
>>> packet.body
b'packet body ...'
"""
packet = ffi.new("RTMPPacket*")
packet_complete = False
while not packet_complete:
res = librtmp.RTMP_ReadPacket(self.rtmp, packet)
if res < 1:
if librtmp.RTMP_IsTimedout(self.rtmp):
raise RTMPTimeoutError("Timed out while reading packet")
else:
raise RTMPError("Failed to read packet")
packet_complete = packet.m_nBytesRead == packet.m_nBodySize
return RTMPPacket._from_pointer(packet)
def send_packet(self, packet, queue=True):
"""Sends a RTMP packet to the server.
:param packet: RTMPPacket, the packet to send to the server.
:param queue: bool, If True, queue up the packet in a internal queue rather
than sending it right away.
"""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_SendPacket(self.rtmp, packet.packet,
int(queue))
def handle_packet(self, packet):
"""Lets librtmp look at a packet and send a response
if needed."""
if not isinstance(packet, RTMPPacket):
raise ValueError("A RTMPPacket argument is required")
return librtmp.RTMP_ClientPacket(self.rtmp, packet.packet)
def process_packets(self, transaction_id=None, invoked_method=None,
timeout=None):
"""Wait for packets and process them as needed.
:param transaction_id: int, Wait until the result of this
transaction ID is recieved.
:param invoked_method: int, Wait until this method is invoked
by the server.
:param timeout: int, The time to wait for a result from the server.
Note: This is the timeout used by this method only,
the connection timeout is still used when reading
packets.
Raises :exc:`RTMPError` on error.
Raises :exc:`RTMPTimeoutError` on timeout.
Usage::
>>> @conn.invoke_handler
... def add(x, y):
... return x + y
>>> @conn.process_packets()
"""
start = time()
while self.connected and transaction_id not in self._invoke_results:
if timeout and (time() - start) >= timeout:
raise RTMPTimeoutError("Timeout")
packet = self.read_packet()
if packet.type == PACKET_TYPE_INVOKE:
try:
decoded = decode_amf(packet.body)
except AMFError:
continue
try:
method, transaction_id_, obj = decoded[:3]
args = decoded[3:]
except ValueError:
continue
if method == "_result":
if len(args) > 0:
result = args[0]
else:
result = None
self._invoke_results[transaction_id_] = result
else:
handler = self._invoke_handlers.get(method)
if handler:
res = handler(*args)
if res is not None:
self.call("_result", res,
transaction_id=transaction_id_)
if method == invoked_method:
self._invoke_args[invoked_method] = args
break
if transaction_id_ == 1.0:
self._connect_result = packet
else:
self.handle_packet(packet)
else:
self.handle_packet(packet)
if transaction_id:
result = self._invoke_results.pop(transaction_id, None)
return result
if invoked_method:
args = self._invoke_args.pop(invoked_method, None)
return args
def call(self, method, *args, **params):
"""Calls a method on the server."""
transaction_id = params.get("transaction_id")
if not transaction_id:
self.transaction_id += 1
transaction_id = self.transaction_id
obj = params.get("obj")
args = [method, transaction_id, obj] + list(args)
args_encoded = map(lambda x: encode_amf(x), args)
body = b"".join(args_encoded)
format = params.get("format", PACKET_SIZE_MEDIUM)
channel = params.get("channel", 0x03)
packet = RTMPPacket(type=PACKET_TYPE_INVOKE,
format=format, channel=channel,
body=body)
self.send_packet(packet)
return RTMPCall(self, transaction_id)
def invoke_handler(self, func=None, name=None):
if not callable(func):
return lambda f: self.invoke_handler(func=f, name=func)
method = name or func.__name__
self.register_invoke_handler(method, func)
return func
def register_invoke_handler(self, method, func):
self._invoke_handlers[method] = func
def close(self):
"""Closes the connection to the server."""
if self.connected:
librtmp.RTMP_Close(self.rtmp)
@property
def transaction_id(self):
return librtmp.RTMP_GetInvokeCount(self.rtmp)
@transaction_id.setter
def transaction_id(self, val):
librtmp.RTMP_SetInvokeCount(self.rtmp, int(val))
def __del__(self):
librtmp.RTMP_Free(self.rtmp)
|
chrippa/python-librtmp | librtmp/rtmp.py | RTMPCall.result | python | def result(self, timeout=None):
if self.done:
return self._result
result = self.conn.process_packets(transaction_id=self.transaction_id,
timeout=timeout)
self._result = result
self.done = True
return result | Retrieves the result of the call.
:param timeout: The time to wait for a result from the server.
Raises :exc:`RTMPTimeoutError` on timeout. | train | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/rtmp.py#L473-L489 | null | class RTMPCall(object):
"""A RTMP call.
Contains the result of a :meth:`RTMP.call`.
"""
def __init__(self, conn, transaction_id):
self._result = None
self.conn = conn
self.done = False
self.transaction_id = transaction_id
|
chrippa/python-librtmp | librtmp/utils.py | add_signal_handler | python | def add_signal_handler():
import signal
def handler(sig, frame):
if sig == signal.SIGINT:
librtmp.RTMP_UserInterrupt()
raise KeyboardInterrupt
signal.signal(signal.SIGINT, handler) | Adds a signal handler to handle KeyboardInterrupt. | train | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/utils.py#L12-L21 | null | from binascii import hexlify
from collections import namedtuple
from . import ffi, librtmp
from .aval import AVal
from .compat import bytes, str
from .exceptions import RTMPError
__all__ = ["add_signal_handler", "hash_swf"]
def hash_swf(url, age=30):
hash = ffi.new("unsigned char[]", 32)
size = ffi.new("unsigned int*")
url = bytes(url, "utf8")
res = librtmp.RTMP_HashSWF(url, size, hash, age)
if res == 0:
hash = hexlify(ffi.buffer(hash, 32)[:])
size = size[0]
return str(hash, "utf8"), size
else:
raise RTMPError("Failed to hash SWF")
RTMPURL = namedtuple("RTMPURL", ["protocol", "hostname",
"port", "playpath", "app"])
def parse_url(url):
protocol = ffi.new("int*")
hostname = AVal("")
port = ffi.new("unsigned int*")
playpath = AVal("")
app = AVal("")
res = librtmp.RTMP_ParseURL(bytes(url, "utf8"), protocol, hostname.aval, port,
playpath.aval, app.aval)
if res < 1:
result = RTMPURL(0, "", 0, "", "")
else:
result = RTMPURL(protocol[0], str(hostname.value, "utf8"), port[0],
str(playpath.value, "utf8"), str(app.value, "utf8"))
return result
|
ksbg/sparklanes | sparklanes/_framework/lane.py | build_lane_from_yaml | python | def build_lane_from_yaml(path):
# Open
with open(path, 'rb') as yaml_definition:
definition = yaml.load(yaml_definition)
# Validate schema
try:
validate_schema(definition)
except SchemaError as exc:
raise LaneSchemaError(**exc.__dict__)
def build(lb_def, branch=False):
"""Function to recursively build the `sparklanes.Lane` object from a YAML definition"""
init_kwargs = {k: lb_def[k] for k in (a for a in ('run_parallel', 'name') if a in lb_def)}
lane_or_branch = Lane(**init_kwargs) if not branch else Branch(**init_kwargs)
for task in lb_def['tasks']:
if 'branch' in task:
branch_def = task['branch']
lane_or_branch.add(build(branch_def, True))
else:
sep = task['class'].rfind('.')
if sep == -1:
raise LaneImportError('Class must include its parent module')
mdl = task['class'][:sep]
cls_ = task['class'][sep + 1:]
try:
cls = getattr(import_module(mdl), cls_)
except ImportError:
raise LaneImportError('Could not find module %s' % mdl)
except AttributeError:
raise LaneImportError('Could not find class %s' % cls_)
args = task['args'] if 'args' in task else []
args = [args] if not isinstance(args, list) else args
kwargs = task['kwargs'] if 'kwargs' in task else {}
lane_or_branch.add(cls, *args, **kwargs)
return lane_or_branch
return build(definition['lane']) | Builds a `sparklanes.Lane` object from a YAML definition file.
Parameters
----------
path: str
Path to the YAML definition file
Returns
-------
Lane
Lane, built according to definition in YAML file | train | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/lane.py#L165-L218 | [
"def validate_schema(yaml_def, branch=False):\n \"\"\"Validates the schema of a dict\n\n Parameters\n ----------\n yaml_def : dict\n dict whose schema shall be validated\n branch : bool\n Indicates whether `yaml_def` is a dict of a top-level lane, or of a branch\n inside a lane (needed for recursion)\n\n Returns\n -------\n bool\n True if validation was successful\n \"\"\"\n schema = Schema({\n 'lane' if not branch else 'branch': {\n Optional('name'): str,\n Optional('run_parallel'): bool,\n 'tasks': list\n }\n })\n\n schema.validate(yaml_def)\n from schema import And, Use\n task_schema = Schema({\n 'class': str,\n Optional('kwargs'): Or({str: object}),\n Optional('args'): Or([object], And(Use(lambda a: isinstance(a, dict)), False))\n })\n\n def validate_tasks(tasks): # pylint: disable=missing-docstring\n for task in tasks:\n try:\n Schema({'branch': dict}).validate(task)\n validate_schema(task, True)\n except SchemaError:\n task_schema.validate(task)\n\n return True\n\n return validate_tasks(yaml_def['lane']['tasks'] if not branch else yaml_def['branch']['tasks'])\n",
"def build(lb_def, branch=False):\n \"\"\"Function to recursively build the `sparklanes.Lane` object from a YAML definition\"\"\"\n init_kwargs = {k: lb_def[k] for k in (a for a in ('run_parallel', 'name') if a in lb_def)}\n lane_or_branch = Lane(**init_kwargs) if not branch else Branch(**init_kwargs)\n\n for task in lb_def['tasks']:\n if 'branch' in task:\n branch_def = task['branch']\n lane_or_branch.add(build(branch_def, True))\n else:\n sep = task['class'].rfind('.')\n if sep == -1:\n raise LaneImportError('Class must include its parent module')\n mdl = task['class'][:sep]\n cls_ = task['class'][sep + 1:]\n\n try:\n cls = getattr(import_module(mdl), cls_)\n except ImportError:\n raise LaneImportError('Could not find module %s' % mdl)\n except AttributeError:\n raise LaneImportError('Could not find class %s' % cls_)\n\n args = task['args'] if 'args' in task else []\n args = [args] if not isinstance(args, list) else args\n kwargs = task['kwargs'] if 'kwargs' in task else {}\n lane_or_branch.add(cls, *args, **kwargs)\n\n return lane_or_branch\n"
] | """Lane and Branch classes. TODO: Better logging"""
from importlib import import_module
from inspect import isclass
import yaml
from schema import SchemaError
from six import string_types
from sparklanes._framework.errors import LaneSchemaError, LaneImportError
from sparklanes._framework.validation import validate_schema
from .env import INTERNAL_LOGGER_NAME
from .errors import LaneExecutionError
from .log import make_default_logger
from .task import LaneTask, LaneTaskThread
from .validation import validate_params
class Lane(object):
"""Used to build and run data processing lanes (i.e. pipelines).
Public methods are chainable."""
def __init__(self, name='UnnamedLane', run_parallel=False):
"""
Parameters
----------
name : str
Custom name of the lane
run_parallel : bool
Indicates, whether the tasks in a Lane shall be executed in parallel.
Does not affect branches inside the lane (`run_parallel` must be indicated in the
branches themselves)
"""
if not isinstance(name, string_types):
raise TypeError('`name` must be a string')
self.name = name
self.run_parallel = run_parallel
self.tasks = []
def __str__(self):
"""Generates a readable string using the tasks/branches inside the lane, i.e. builds a
string the showing the tasks and branches in a lane"""
task_str = '=' * 80 + '\n'
def generate_str(lane_or_branch, prefix='\t', out=''):
"""Recursive string generation"""
out += prefix + lane_or_branch.name
if lane_or_branch.run_parallel:
out += ' (parallel)'
out += '\n'
for task in lane_or_branch.tasks:
if isinstance(task, Branch):
out += generate_str(task, prefix + prefix[0])
elif isinstance(task['cls_or_branch'], Branch):
out += generate_str(task['cls_or_branch'], prefix + prefix[0])
else:
out += prefix + ' >' + task['cls_or_branch'].__name__ + '\n'
return out
task_str += generate_str(self) + '=' * 80
return task_str
def __validate_task(self, cls, entry_mtd_name, args, kwargs):
"""Checks if a class is a task, i.e. if it has been decorated with `sparklanes.Task`, and if
the supplied args/kwargs match the signature of the task's entry method.
Parameters
----------
cls : LaneTask
entry_mtd_name : str
Name of the method, which is called when the task is run
args : list
kwargs : dict
"""
if not isclass(cls) or not issubclass(cls, LaneTask):
raise TypeError('Tried to add non-Task `%s` to a Lane. Are you sure the task was '
'decorated with `sparklanes.Task`?' % str(cls))
validate_params(cls, entry_mtd_name, *args, **kwargs)
def add(self, cls_or_branch, *args, **kwargs):
"""Adds a task or branch to the lane.
Parameters
----------
cls_or_branch : Class
*args
Variable length argument list to be passed to `cls_or_branch` during instantiation
**kwargs
Variable length keyword arguments to be passed to `cls_or_branch` during instantiation
Returns
-------
self: Returns `self` to allow method chaining
"""
if isinstance(cls_or_branch, Branch):
self.tasks.append(cls_or_branch) # Add branch with already validated tasks
else:
# Validate
self.__validate_task(cls_or_branch, '__init__', args, kwargs)
# Append
self.tasks.append({'cls_or_branch': cls_or_branch, 'args': args, 'kwargs': kwargs})
return self
def run(self):
"""Executes the tasks in the lane in the order in which they have been added, unless
`self.run_parallel` is True, then a thread is spawned for each task and executed in
parallel (note that task threads are still spawned in the order in which they were added).
"""
logger = make_default_logger(INTERNAL_LOGGER_NAME)
logger.info('\n%s\nExecuting `%s`\n%s\n', '-'*80, self.name, '-'*80)
logger.info('\n%s', str(self))
threads = []
if not self.tasks:
raise LaneExecutionError('No tasks to execute!')
for task_def_or_branch in self.tasks:
if isinstance(task_def_or_branch, Branch):
task_def_or_branch.run()
elif isinstance(task_def_or_branch['cls_or_branch'], Branch): # Nested Branch
task_def_or_branch['cls_or_branch'].run()
else:
task = task_def_or_branch['cls_or_branch'](*task_def_or_branch['args'],
**task_def_or_branch['kwargs'])
if self.run_parallel:
threads.append(LaneTaskThread(task))
else:
task()
if threads:
for thread in threads:
thread.start()
for thread in threads:
thread.join()
logger.info('\n%s\nFinished executing `%s`\n%s', '-'*80, self.name, '-'*80)
return self
class Branch(Lane, object):
"""Branches can be used to split task lanes into branches, which is e.g. useful if part of the
data processing pipeline should be executed in parallel, while other parts should be run in
subsequent order."""
def __init__(self, name='UnnamedBranch', run_parallel=False):
"""
Parameters
----------
name (str): Custom name of the branch
run_parallel (bool): Indicates if the task in the branch shall be executed in parallel
args (List[object])
"""
super(Branch, self).__init__(name=name, run_parallel=run_parallel)
|
ksbg/sparklanes | sparklanes/_framework/lane.py | Lane.__validate_task | python | def __validate_task(self, cls, entry_mtd_name, args, kwargs):
if not isclass(cls) or not issubclass(cls, LaneTask):
raise TypeError('Tried to add non-Task `%s` to a Lane. Are you sure the task was '
'decorated with `sparklanes.Task`?' % str(cls))
validate_params(cls, entry_mtd_name, *args, **kwargs) | Checks if a class is a task, i.e. if it has been decorated with `sparklanes.Task`, and if
the supplied args/kwargs match the signature of the task's entry method.
Parameters
----------
cls : LaneTask
entry_mtd_name : str
Name of the method, which is called when the task is run
args : list
kwargs : dict | train | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/lane.py#L68-L84 | null | class Lane(object):
"""Used to build and run data processing lanes (i.e. pipelines).
Public methods are chainable."""
def __init__(self, name='UnnamedLane', run_parallel=False):
"""
Parameters
----------
name : str
Custom name of the lane
run_parallel : bool
Indicates, whether the tasks in a Lane shall be executed in parallel.
Does not affect branches inside the lane (`run_parallel` must be indicated in the
branches themselves)
"""
if not isinstance(name, string_types):
raise TypeError('`name` must be a string')
self.name = name
self.run_parallel = run_parallel
self.tasks = []
def __str__(self):
"""Generates a readable string using the tasks/branches inside the lane, i.e. builds a
string the showing the tasks and branches in a lane"""
task_str = '=' * 80 + '\n'
def generate_str(lane_or_branch, prefix='\t', out=''):
"""Recursive string generation"""
out += prefix + lane_or_branch.name
if lane_or_branch.run_parallel:
out += ' (parallel)'
out += '\n'
for task in lane_or_branch.tasks:
if isinstance(task, Branch):
out += generate_str(task, prefix + prefix[0])
elif isinstance(task['cls_or_branch'], Branch):
out += generate_str(task['cls_or_branch'], prefix + prefix[0])
else:
out += prefix + ' >' + task['cls_or_branch'].__name__ + '\n'
return out
task_str += generate_str(self) + '=' * 80
return task_str
def add(self, cls_or_branch, *args, **kwargs):
"""Adds a task or branch to the lane.
Parameters
----------
cls_or_branch : Class
*args
Variable length argument list to be passed to `cls_or_branch` during instantiation
**kwargs
Variable length keyword arguments to be passed to `cls_or_branch` during instantiation
Returns
-------
self: Returns `self` to allow method chaining
"""
if isinstance(cls_or_branch, Branch):
self.tasks.append(cls_or_branch) # Add branch with already validated tasks
else:
# Validate
self.__validate_task(cls_or_branch, '__init__', args, kwargs)
# Append
self.tasks.append({'cls_or_branch': cls_or_branch, 'args': args, 'kwargs': kwargs})
return self
def run(self):
"""Executes the tasks in the lane in the order in which they have been added, unless
`self.run_parallel` is True, then a thread is spawned for each task and executed in
parallel (note that task threads are still spawned in the order in which they were added).
"""
logger = make_default_logger(INTERNAL_LOGGER_NAME)
logger.info('\n%s\nExecuting `%s`\n%s\n', '-'*80, self.name, '-'*80)
logger.info('\n%s', str(self))
threads = []
if not self.tasks:
raise LaneExecutionError('No tasks to execute!')
for task_def_or_branch in self.tasks:
if isinstance(task_def_or_branch, Branch):
task_def_or_branch.run()
elif isinstance(task_def_or_branch['cls_or_branch'], Branch): # Nested Branch
task_def_or_branch['cls_or_branch'].run()
else:
task = task_def_or_branch['cls_or_branch'](*task_def_or_branch['args'],
**task_def_or_branch['kwargs'])
if self.run_parallel:
threads.append(LaneTaskThread(task))
else:
task()
if threads:
for thread in threads:
thread.start()
for thread in threads:
thread.join()
logger.info('\n%s\nFinished executing `%s`\n%s', '-'*80, self.name, '-'*80)
return self
|
ksbg/sparklanes | sparklanes/_framework/lane.py | Lane.add | python | def add(self, cls_or_branch, *args, **kwargs):
if isinstance(cls_or_branch, Branch):
self.tasks.append(cls_or_branch) # Add branch with already validated tasks
else:
# Validate
self.__validate_task(cls_or_branch, '__init__', args, kwargs)
# Append
self.tasks.append({'cls_or_branch': cls_or_branch, 'args': args, 'kwargs': kwargs})
return self | Adds a task or branch to the lane.
Parameters
----------
cls_or_branch : Class
*args
Variable length argument list to be passed to `cls_or_branch` during instantiation
**kwargs
Variable length keyword arguments to be passed to `cls_or_branch` during instantiation
Returns
-------
self: Returns `self` to allow method chaining | train | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/lane.py#L86-L109 | [
"def __validate_task(self, cls, entry_mtd_name, args, kwargs):\n \"\"\"Checks if a class is a task, i.e. if it has been decorated with `sparklanes.Task`, and if\n the supplied args/kwargs match the signature of the task's entry method.\n\n Parameters\n ----------\n cls : LaneTask\n entry_mtd_name : str\n Name of the method, which is called when the task is run\n args : list\n kwargs : dict\n \"\"\"\n if not isclass(cls) or not issubclass(cls, LaneTask):\n raise TypeError('Tried to add non-Task `%s` to a Lane. Are you sure the task was '\n 'decorated with `sparklanes.Task`?' % str(cls))\n\n validate_params(cls, entry_mtd_name, *args, **kwargs)\n"
] | class Lane(object):
"""Used to build and run data processing lanes (i.e. pipelines).
Public methods are chainable."""
def __init__(self, name='UnnamedLane', run_parallel=False):
"""
Parameters
----------
name : str
Custom name of the lane
run_parallel : bool
Indicates, whether the tasks in a Lane shall be executed in parallel.
Does not affect branches inside the lane (`run_parallel` must be indicated in the
branches themselves)
"""
if not isinstance(name, string_types):
raise TypeError('`name` must be a string')
self.name = name
self.run_parallel = run_parallel
self.tasks = []
def __str__(self):
"""Generates a readable string using the tasks/branches inside the lane, i.e. builds a
string the showing the tasks and branches in a lane"""
task_str = '=' * 80 + '\n'
def generate_str(lane_or_branch, prefix='\t', out=''):
"""Recursive string generation"""
out += prefix + lane_or_branch.name
if lane_or_branch.run_parallel:
out += ' (parallel)'
out += '\n'
for task in lane_or_branch.tasks:
if isinstance(task, Branch):
out += generate_str(task, prefix + prefix[0])
elif isinstance(task['cls_or_branch'], Branch):
out += generate_str(task['cls_or_branch'], prefix + prefix[0])
else:
out += prefix + ' >' + task['cls_or_branch'].__name__ + '\n'
return out
task_str += generate_str(self) + '=' * 80
return task_str
def __validate_task(self, cls, entry_mtd_name, args, kwargs):
"""Checks if a class is a task, i.e. if it has been decorated with `sparklanes.Task`, and if
the supplied args/kwargs match the signature of the task's entry method.
Parameters
----------
cls : LaneTask
entry_mtd_name : str
Name of the method, which is called when the task is run
args : list
kwargs : dict
"""
if not isclass(cls) or not issubclass(cls, LaneTask):
raise TypeError('Tried to add non-Task `%s` to a Lane. Are you sure the task was '
'decorated with `sparklanes.Task`?' % str(cls))
validate_params(cls, entry_mtd_name, *args, **kwargs)
def run(self):
"""Executes the tasks in the lane in the order in which they have been added, unless
`self.run_parallel` is True, then a thread is spawned for each task and executed in
parallel (note that task threads are still spawned in the order in which they were added).
"""
logger = make_default_logger(INTERNAL_LOGGER_NAME)
logger.info('\n%s\nExecuting `%s`\n%s\n', '-'*80, self.name, '-'*80)
logger.info('\n%s', str(self))
threads = []
if not self.tasks:
raise LaneExecutionError('No tasks to execute!')
for task_def_or_branch in self.tasks:
if isinstance(task_def_or_branch, Branch):
task_def_or_branch.run()
elif isinstance(task_def_or_branch['cls_or_branch'], Branch): # Nested Branch
task_def_or_branch['cls_or_branch'].run()
else:
task = task_def_or_branch['cls_or_branch'](*task_def_or_branch['args'],
**task_def_or_branch['kwargs'])
if self.run_parallel:
threads.append(LaneTaskThread(task))
else:
task()
if threads:
for thread in threads:
thread.start()
for thread in threads:
thread.join()
logger.info('\n%s\nFinished executing `%s`\n%s', '-'*80, self.name, '-'*80)
return self
|
ksbg/sparklanes | sparklanes/_framework/lane.py | Lane.run | python | def run(self):
logger = make_default_logger(INTERNAL_LOGGER_NAME)
logger.info('\n%s\nExecuting `%s`\n%s\n', '-'*80, self.name, '-'*80)
logger.info('\n%s', str(self))
threads = []
if not self.tasks:
raise LaneExecutionError('No tasks to execute!')
for task_def_or_branch in self.tasks:
if isinstance(task_def_or_branch, Branch):
task_def_or_branch.run()
elif isinstance(task_def_or_branch['cls_or_branch'], Branch): # Nested Branch
task_def_or_branch['cls_or_branch'].run()
else:
task = task_def_or_branch['cls_or_branch'](*task_def_or_branch['args'],
**task_def_or_branch['kwargs'])
if self.run_parallel:
threads.append(LaneTaskThread(task))
else:
task()
if threads:
for thread in threads:
thread.start()
for thread in threads:
thread.join()
logger.info('\n%s\nFinished executing `%s`\n%s', '-'*80, self.name, '-'*80)
return self | Executes the tasks in the lane in the order in which they have been added, unless
`self.run_parallel` is True, then a thread is spawned for each task and executed in
parallel (note that task threads are still spawned in the order in which they were added). | train | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/lane.py#L111-L146 | [
"def make_default_logger(name=INTERNAL_LOGGER_NAME, level=logging.INFO,\n fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s'):\n \"\"\"Create a logger with the default configuration\"\"\"\n logger = logging.getLogger(name)\n logger.setLevel(level)\n if not logger.handlers:\n handler = logging.StreamHandler(sys.stderr)\n handler.setLevel(level)\n formatter = logging.Formatter(fmt)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n return logger\n"
] | class Lane(object):
"""Used to build and run data processing lanes (i.e. pipelines).
Public methods are chainable."""
def __init__(self, name='UnnamedLane', run_parallel=False):
"""
Parameters
----------
name : str
Custom name of the lane
run_parallel : bool
Indicates, whether the tasks in a Lane shall be executed in parallel.
Does not affect branches inside the lane (`run_parallel` must be indicated in the
branches themselves)
"""
if not isinstance(name, string_types):
raise TypeError('`name` must be a string')
self.name = name
self.run_parallel = run_parallel
self.tasks = []
def __str__(self):
"""Generates a readable string using the tasks/branches inside the lane, i.e. builds a
string the showing the tasks and branches in a lane"""
task_str = '=' * 80 + '\n'
def generate_str(lane_or_branch, prefix='\t', out=''):
"""Recursive string generation"""
out += prefix + lane_or_branch.name
if lane_or_branch.run_parallel:
out += ' (parallel)'
out += '\n'
for task in lane_or_branch.tasks:
if isinstance(task, Branch):
out += generate_str(task, prefix + prefix[0])
elif isinstance(task['cls_or_branch'], Branch):
out += generate_str(task['cls_or_branch'], prefix + prefix[0])
else:
out += prefix + ' >' + task['cls_or_branch'].__name__ + '\n'
return out
task_str += generate_str(self) + '=' * 80
return task_str
def __validate_task(self, cls, entry_mtd_name, args, kwargs):
"""Checks if a class is a task, i.e. if it has been decorated with `sparklanes.Task`, and if
the supplied args/kwargs match the signature of the task's entry method.
Parameters
----------
cls : LaneTask
entry_mtd_name : str
Name of the method, which is called when the task is run
args : list
kwargs : dict
"""
if not isclass(cls) or not issubclass(cls, LaneTask):
raise TypeError('Tried to add non-Task `%s` to a Lane. Are you sure the task was '
'decorated with `sparklanes.Task`?' % str(cls))
validate_params(cls, entry_mtd_name, *args, **kwargs)
def add(self, cls_or_branch, *args, **kwargs):
"""Adds a task or branch to the lane.
Parameters
----------
cls_or_branch : Class
*args
Variable length argument list to be passed to `cls_or_branch` during instantiation
**kwargs
Variable length keyword arguments to be passed to `cls_or_branch` during instantiation
Returns
-------
self: Returns `self` to allow method chaining
"""
if isinstance(cls_or_branch, Branch):
self.tasks.append(cls_or_branch) # Add branch with already validated tasks
else:
# Validate
self.__validate_task(cls_or_branch, '__init__', args, kwargs)
# Append
self.tasks.append({'cls_or_branch': cls_or_branch, 'args': args, 'kwargs': kwargs})
return self
|
ksbg/sparklanes | sparklanes/_framework/task.py | Task | python | def Task(entry): # pylint: disable=invalid-name
if not isinstance(entry, string_types):
# In the event that no argument is supplied to the decorator, python passes the decorated
# class itself as an argument. That way, we can detect if no argument (or an argument of
# invalid type) was supplied. This allows passing of `entry` as both a named kwarg, and
# as an arg. Isn't neat, but for now it suffices.
raise TypeError('When decorating a class with `Task`, a single string argument must be '
'supplied, which specifies the "main" task method, i.e. the class\'s entry '
'point to the task.')
else:
def wrapper(cls):
"""The actual decorator function"""
if isclass(cls):
if not hasattr(cls, entry): # Check if cls has the specified entry method
raise TypeError('Method `%s` not found in class `%s`.' % (entry, cls.__name__))
# We will have to inspect the task class's `__init__` method later (by inspecting
# the arg signature, before it is instantiated). In various circumstances, classes
# will not have an unbound `__init__` method. Let's deal with that now already, by
# assigning an empty, unbound `__init__` method manually, in order to prevent
# errors later on during method inspection (not an issue in Python 3):
# - Whenever a class is not defined as a new-style class in Python 2.7, i.e. a
# sub-class of object, and it does not have a `__init__` method definition, the
# class will not have an attribute `__init__`
# - If a class misses a `__init__` method definition, but is defined as a
# new-style class, attribute `__init__` will be of type `slot wrapper`, which
# cannot be inspected (and it also doesn't seem possible to check if a method is of
# type `slot wrapper`, which is why we manually define one).
if not hasattr(cls, '__init__') or cls.__init__ == object.__init__:
init = MethodType(lambda self: None, None, cls) \
if PY2 else MethodType(lambda self: None, cls)
setattr(cls, '__init__', init)
# Check for attributes that will be overwritten, in order to warn the user
reserved_attributes = ('__getattr__', '__call__', '_entry_mtd', 'cache', 'uncache',
'clear_cache', '_log_lock')
for attr in dir(cls):
if attr in reserved_attributes:
make_default_logger(INTERNAL_LOGGER_NAME).warning(
'Attribute `%s` of class `%s` will be overwritten when decorated with '
'`sparklanes.Task`! Avoid assigning any of the following attributes '
'`%s`', attr, cls.__name__, str(reserved_attributes)
)
assignments = {'_entry_mtd': entry,
'__getattr__': lambda self, name: TaskCache.get(name),
'__init__': cls.__init__,
'_log_lock': Lock()}
for attr in WRAPPER_ASSIGNMENTS:
try:
assignments[attr] = getattr(cls, attr)
except AttributeError:
pass
# Build task as a subclass of LaneTask
return type('Task_%s' % cls.__name__, (LaneTask, cls, object), assignments)
else:
raise TypeError('Only classes can be decorated with `Task`')
return wrapper | Decorator with which classes, who act as tasks in a `Lane`, must be decorated. When a class is
being decorated, it becomes a child of `LaneTask`.
Parameters
----------
entry: The name of the task's "main" method, i.e. the method which is executed when task is run
Returns
-------
wrapper (function): The actual decorator function | train | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/task.py#L18-L89 | null | """Includes the `Task` decorator, the parent class `LaneTask` from which all tasks will inherit,
as well as the `_TaskCache`, which is used to share attributes between Task objects."""
import sys
from datetime import timedelta
from functools import WRAPPER_ASSIGNMENTS
from inspect import isclass
from threading import Thread, Lock
from time import time
from types import MethodType
from six import string_types, PY2, PY3
from .env import INTERNAL_LOGGER_NAME
from .errors import CacheError, TaskInitializationError, LaneExecutionError
from .log import make_default_logger
class LaneTask(object):
"""The super class of each task, from which all tasks inherit when being decorated with
`sparklanes.Task`"""
# pylint: disable=no-member
def __new__(cls, *args, **kwargs):
"""Used to make sure the class will not be instantiated on its own. Instances of LaneTask
should only exist as parents."""
if cls is LaneTask:
raise TaskInitializationError(
"Task base `LaneTask` may not be instantiated on its own.")
return object.__new__(cls, *args, **kwargs) if PY2 else object.__new__(cls)
def __call__(self):
"""Used to make each task object callable, in order to execute tasks in a consistent
manner. Calls the task's entry method and provides some logging."""
logger = make_default_logger(INTERNAL_LOGGER_NAME)
task_name = self.__name__ + '.' + self._entry_mtd
logger.info('\n%s\nExecuting task `%s`\n%s',
'-'*80, task_name, '-'*80)
start = time()
res = getattr(self, self._entry_mtd)()
passed = str(timedelta(seconds=(time() - start)))
logger.info('\n%s\nFinished executing task `%s`. Execution time: %s\n%s',
'-'*80, task_name, passed, '-'*80)
return res
def cache(self, name, val, overwrite=True):
"""Assigns an attribute reference to all subsequent tasks. For example, if a task caches a
DataFrame `df` using `self.cache('some_df', df)`, all tasks that follow can access the
DataFrame using `self.some_df`. Note that manually assigned attributes that share the same
name have precedence over cached attributes.
Parameters
----------
name : str
Name of the attribute
val
Attribute value
overwrite : bool
Indicates if the attribute shall be overwritten, or not (if `False`, and
a cached attribute with the given name already exists, `sparklanes.errors.CacheError`
will be thrown).
"""
if name in TaskCache.cached and not overwrite:
raise CacheError('Object with name `%s` already in cache.' % name)
TaskCache.cached[name] = val
def uncache(self, name):
"""Removes an attribute from the cache, i.e. it will be deleted and becomes unavailable for
all subsequent tasks.
Parameters
----------
name : str
Name of the cached attribute, which shall be deleted
"""
try:
del TaskCache.cached[name]
except KeyError:
raise CacheError('Attribute `%s` not found in cache.' % name)
def clear_cache(self):
"""Clears the entire cache"""
TaskCache.cached = {}
class LaneTaskThread(Thread):
"""Used to spawn tasks as threads to be run in parallel."""
def __init__(self, task):
Thread.__init__(self)
self.task = task
self.exc = None
self.daemon = True
def run(self):
"""Overwrites `threading.Thread.run`, to allow handling of exceptions thrown by threads
from within the main app."""
self.exc = None
try:
self.task()
except BaseException:
self.exc = sys.exc_info()
def join(self, timeout=None):
"""Overwrites `threading.Thread.join`, to allow handling of exceptions thrown by threads
from within the main app."""
Thread.join(self, timeout=timeout)
if self.exc:
msg = "Thread '%s' threw an exception `%s`: %s" \
% (self.getName(), self.exc[0].__name__, self.exc[1])
new_exc = LaneExecutionError(msg)
if PY3:
raise new_exc.with_traceback(self.exc[2]) # pylint: disable=no-member
else:
raise (new_exc.__class__, new_exc, self.exc[2]) # pylint: disable=raising-bad-type
class TaskCache(object):
"""Serves as the attribute cache of tasks, which is accessed using the tasks'
`__getattr__` method."""
cached = {}
def __new__(cls, *args, **kwargs):
"""Used to make sure that TaskCache will not be instantiated."""
if cls is TaskCache:
raise CacheError("`TaskCache` may not be instantiated and only provides static access.")
return object.__new__(cls, *args, **kwargs) if PY2 else object.__new__(cls)
@staticmethod
def get(name):
"""Retrieves an object from the cache.
Parameters
----------
name : str
Name of the object to be retrieved
Returns
-------
object
"""
try:
return TaskCache.cached[name]
except KeyError:
raise AttributeError('Attribute `%s` not found in cache or object.' % name)
|
ksbg/sparklanes | sparklanes/_framework/task.py | LaneTask.cache | python | def cache(self, name, val, overwrite=True):
if name in TaskCache.cached and not overwrite:
raise CacheError('Object with name `%s` already in cache.' % name)
TaskCache.cached[name] = val | Assigns an attribute reference to all subsequent tasks. For example, if a task caches a
DataFrame `df` using `self.cache('some_df', df)`, all tasks that follow can access the
DataFrame using `self.some_df`. Note that manually assigned attributes that share the same
name have precedence over cached attributes.
Parameters
----------
name : str
Name of the attribute
val
Attribute value
overwrite : bool
Indicates if the attribute shall be overwritten, or not (if `False`, and
a cached attribute with the given name already exists, `sparklanes.errors.CacheError`
will be thrown). | train | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/task.py#L121-L140 | null | class LaneTask(object):
"""The super class of each task, from which all tasks inherit when being decorated with
`sparklanes.Task`"""
# pylint: disable=no-member
def __new__(cls, *args, **kwargs):
"""Used to make sure the class will not be instantiated on its own. Instances of LaneTask
should only exist as parents."""
if cls is LaneTask:
raise TaskInitializationError(
"Task base `LaneTask` may not be instantiated on its own.")
return object.__new__(cls, *args, **kwargs) if PY2 else object.__new__(cls)
def __call__(self):
"""Used to make each task object callable, in order to execute tasks in a consistent
manner. Calls the task's entry method and provides some logging."""
logger = make_default_logger(INTERNAL_LOGGER_NAME)
task_name = self.__name__ + '.' + self._entry_mtd
logger.info('\n%s\nExecuting task `%s`\n%s',
'-'*80, task_name, '-'*80)
start = time()
res = getattr(self, self._entry_mtd)()
passed = str(timedelta(seconds=(time() - start)))
logger.info('\n%s\nFinished executing task `%s`. Execution time: %s\n%s',
'-'*80, task_name, passed, '-'*80)
return res
def uncache(self, name):
"""Removes an attribute from the cache, i.e. it will be deleted and becomes unavailable for
all subsequent tasks.
Parameters
----------
name : str
Name of the cached attribute, which shall be deleted
"""
try:
del TaskCache.cached[name]
except KeyError:
raise CacheError('Attribute `%s` not found in cache.' % name)
def clear_cache(self):
"""Clears the entire cache"""
TaskCache.cached = {}
|
ksbg/sparklanes | sparklanes/_framework/task.py | LaneTaskThread.run | python | def run(self):
self.exc = None
try:
self.task()
except BaseException:
self.exc = sys.exc_info() | Overwrites `threading.Thread.run`, to allow handling of exceptions thrown by threads
from within the main app. | train | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/task.py#L170-L177 | null | class LaneTaskThread(Thread):
"""Used to spawn tasks as threads to be run in parallel."""
def __init__(self, task):
Thread.__init__(self)
self.task = task
self.exc = None
self.daemon = True
def join(self, timeout=None):
"""Overwrites `threading.Thread.join`, to allow handling of exceptions thrown by threads
from within the main app."""
Thread.join(self, timeout=timeout)
if self.exc:
msg = "Thread '%s' threw an exception `%s`: %s" \
% (self.getName(), self.exc[0].__name__, self.exc[1])
new_exc = LaneExecutionError(msg)
if PY3:
raise new_exc.with_traceback(self.exc[2]) # pylint: disable=no-member
else:
raise (new_exc.__class__, new_exc, self.exc[2]) # pylint: disable=raising-bad-type
|
ksbg/sparklanes | sparklanes/_framework/task.py | LaneTaskThread.join | python | def join(self, timeout=None):
Thread.join(self, timeout=timeout)
if self.exc:
msg = "Thread '%s' threw an exception `%s`: %s" \
% (self.getName(), self.exc[0].__name__, self.exc[1])
new_exc = LaneExecutionError(msg)
if PY3:
raise new_exc.with_traceback(self.exc[2]) # pylint: disable=no-member
else:
raise (new_exc.__class__, new_exc, self.exc[2]) | Overwrites `threading.Thread.join`, to allow handling of exceptions thrown by threads
from within the main app. | train | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/task.py#L179-L191 | null | class LaneTaskThread(Thread):
"""Used to spawn tasks as threads to be run in parallel."""
def __init__(self, task):
Thread.__init__(self)
self.task = task
self.exc = None
self.daemon = True
def run(self):
"""Overwrites `threading.Thread.run`, to allow handling of exceptions thrown by threads
from within the main app."""
self.exc = None
try:
self.task()
except BaseException:
self.exc = sys.exc_info()
# pylint: disable=raising-bad-type
|
ksbg/sparklanes | sparklanes/_framework/spark.py | SparkContextAndSessionContainer.set_sc | python | def set_sc(cls, master=None, appName=None, sparkHome=None, pyFiles=None, environment=None,
batchSize=0, serializer=PickleSerializer(), conf=None, gateway=None, jsc=None,
profiler_cls=BasicProfiler):
if cls.sc is not None:
cls.sc.stop()
cls.sc = SparkContext(master, appName, sparkHome, pyFiles, environment, batchSize,
serializer,
conf, gateway, jsc, profiler_cls)
cls.__init_spark() | Creates and initializes a new `SparkContext` (the old one will be stopped).
Argument signature is copied from `pyspark.SparkContext
<https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.SparkContext>`_. | train | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/spark.py#L24-L36 | [
"def __init_spark(cls):\n cls.spark = SparkSession.builder.appName(SPARK_APP_NAME).getOrCreate()\n"
] | class SparkContextAndSessionContainer(object):
"""Container class holding SparkContext and SparkSession instances, so that any changes
will be propagated across the application"""
sc = None
spark = None
def __new__(cls, *args, **kwargs):
if cls is SparkContextAndSessionContainer:
raise TypeError('SparkSession & SparkContext container class may not be instantiated.')
return object.__new__(cls, *args, **kwargs) if PY2 else object.__new__(cls)
@classmethod
@classmethod
def set_spark(cls, master=None, appName=None, conf=None, hive_support=False):
"""Creates and initializes a new `SparkSession`. Argument signature is copied from
`pyspark.sql.SparkSession
<https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.SparkSession>`_.
"""
sess = SparkSession.builder
if master:
sess.master(master)
if appName:
sess.appName(appName)
if conf:
sess.config(conf=conf)
if hive_support:
sess.enableHiveSupport()
cls.spark = sess.getOrCreate()
@classmethod
def init_default(cls):
"""Create and initialize a default SparkContext and SparkSession"""
cls.__init_sc()
cls.__init_spark()
@classmethod
def __init_sc(cls):
cls.sc = SparkContext(appName=SPARK_APP_NAME).getOrCreate()
@classmethod
def __init_spark(cls):
cls.spark = SparkSession.builder.appName(SPARK_APP_NAME).getOrCreate()
|
ksbg/sparklanes | sparklanes/_framework/spark.py | SparkContextAndSessionContainer.set_spark | python | def set_spark(cls, master=None, appName=None, conf=None, hive_support=False):
sess = SparkSession.builder
if master:
sess.master(master)
if appName:
sess.appName(appName)
if conf:
sess.config(conf=conf)
if hive_support:
sess.enableHiveSupport()
cls.spark = sess.getOrCreate() | Creates and initializes a new `SparkSession`. Argument signature is copied from
`pyspark.sql.SparkSession
<https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.SparkSession>`_. | train | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/spark.py#L39-L54 | null | class SparkContextAndSessionContainer(object):
"""Container class holding SparkContext and SparkSession instances, so that any changes
will be propagated across the application"""
sc = None
spark = None
def __new__(cls, *args, **kwargs):
if cls is SparkContextAndSessionContainer:
raise TypeError('SparkSession & SparkContext container class may not be instantiated.')
return object.__new__(cls, *args, **kwargs) if PY2 else object.__new__(cls)
@classmethod
def set_sc(cls, master=None, appName=None, sparkHome=None, pyFiles=None, environment=None,
batchSize=0, serializer=PickleSerializer(), conf=None, gateway=None, jsc=None,
profiler_cls=BasicProfiler):
"""Creates and initializes a new `SparkContext` (the old one will be stopped).
Argument signature is copied from `pyspark.SparkContext
<https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.SparkContext>`_.
"""
if cls.sc is not None:
cls.sc.stop()
cls.sc = SparkContext(master, appName, sparkHome, pyFiles, environment, batchSize,
serializer,
conf, gateway, jsc, profiler_cls)
cls.__init_spark()
@classmethod
@classmethod
def init_default(cls):
"""Create and initialize a default SparkContext and SparkSession"""
cls.__init_sc()
cls.__init_spark()
@classmethod
def __init_sc(cls):
cls.sc = SparkContext(appName=SPARK_APP_NAME).getOrCreate()
@classmethod
def __init_spark(cls):
cls.spark = SparkSession.builder.appName(SPARK_APP_NAME).getOrCreate()
|
ksbg/sparklanes | sparklanes/_submit/submit.py | _package_and_submit | python | def _package_and_submit(args):
args = _parse_and_validate_args(args)
logging.debug(args)
dist = __make_tmp_dir()
try:
__package_dependencies(dist_dir=dist, additional_reqs=args['requirements'],
silent=args['silent'])
__package_app(tasks_pkg=args['package'],
dist_dir=dist,
custom_main=args['main'],
extra_data=args['extra_data'])
__run_spark_submit(lane_yaml=args['yaml'],
dist_dir=dist,
spark_home=args['spark_home'],
spark_args=args['spark_args'],
silent=args['silent'])
except Exception as exc:
__clean_up(dist)
raise exc
__clean_up(dist) | Packages and submits a job, which is defined in a YAML file, to Spark.
Parameters
----------
args (List): Command-line arguments | train | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_submit/submit.py#L19-L47 | [
"def _parse_and_validate_args(args):\n \"\"\"\n Parse and validate arguments. During validation, it is checked whether the given\n files/directories exist, while also converting relative paths to absolute ones.\n\n Parameters\n ----------\n args (List): Command-line arguments\n \"\"\"\n class ExtendAction(argparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n if getattr(namespace, self.dest, None) is None:\n setattr(namespace, self.dest, [])\n getattr(namespace, self.dest).extend(values)\n\n parser = argparse.ArgumentParser(description='Submitting a lane to spark.')\n parser.add_argument('-y', '--yaml', type=str, required=True,\n help='Path to the yaml definition file.')\n parser.add_argument('-p', '--package', type=str, required=True,\n help='Path to the python package containing your tasks.')\n parser.add_argument('-r', '--requirements', type=str, required=False,\n help='Path to a `requirements.txt` specifying any additional dependencies '\n 'of your tasks.')\n parser.add_argument('-e', '--extra-data', nargs='*', required=False, action=ExtendAction,\n help='Path to any additional files or directories that should be packaged '\n 'and sent to Spark.')\n parser.add_argument('-m', '--main', type=str, required=False,\n help='Path to a custom main python file')\n parser.add_argument('-d', '--spark-home', type=str, required=False,\n help='Custom path to the directory containing your Spark installation. If '\n 'none is given, sparklanes will try to use the `spark-submit` command '\n 'from your PATH')\n parser.add_argument('-s', '--spark-args', nargs='*', required=False,\n help='Any additional arguments that should be sent to Spark via '\n 'spark-submit. '\n '(e.g. `--spark-args executor-memory=20G total-executor-cores=100`)')\n parser.add_argument('--silent', help='If set, no output will be sent to console',\n action='store_true')\n args = parser.parse_args(args).__dict__\n\n # Check/fix files/dirs\n for param in ('package', 'spark_home'):\n args[param] = __validate_and_fix_path(args[param], check_dir=True)\n for param in ('yaml', 'requirements', 'main'):\n args[param] = __validate_and_fix_path(args[param], check_file=True)\n if args['extra_data']:\n for i in range(len(args['extra_data'])):\n args['extra_data'][i] = __validate_and_fix_path(args['extra_data'][i],\n check_file=True, check_dir=True)\n\n # Check if python package\n if not os.path.isfile(os.path.join(args['package'], '__init__.py')):\n raise SystemExit('Could not confirm `%s` is a python package. Make sure it contains an '\n '`__init__.py`.')\n\n # Check/fix spark args\n if args['spark_args']:\n args['spark_args'] = __validate_and_fix_spark_args(args['spark_args'])\n\n return args\n",
"def __make_tmp_dir():\n \"\"\"\n Create a temporary directory where the packaged files will be located\n\n Returns\n -------\n tmp_dir (str): Absolute path to temporary directory\n \"\"\"\n tmp_dir = tempfile.mkdtemp()\n logging.debug('Created temporary dir: `%s`', tmp_dir)\n\n return tmp_dir\n",
"def __package_dependencies(dist_dir, additional_reqs, silent):\n \"\"\"\n Installs the app's dependencies from pip and packages them (as zip), to be submitted to spark.\n\n Parameters\n ----------\n dist_dir (str): Path to directory where the packaged libs shall be located\n additional_reqs (str): Path to a requirements.txt, containing any of the app's additional\n requirements\n silent (bool): Flag indicating whether pip output should be printed to console\n \"\"\"\n logging.info('Packaging dependencies')\n libs_dir = os.path.join(dist_dir, 'libs')\n if not os.path.isdir(libs_dir):\n os.mkdir(libs_dir)\n\n # Get requirements\n req_txt = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'requirements-submit.txt')\n with open(req_txt, 'r') as req:\n requirements = req.read().splitlines()\n if additional_reqs:\n with open(additional_reqs, 'r') as req:\n for row in req:\n requirements.append(row)\n\n # Remove duplicates\n requirements = list(set(requirements))\n\n # Install\n devnull = open(os.devnull, 'w')\n outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {}\n for pkg in requirements:\n cmd = ['pip', 'install', pkg, '-t', libs_dir]\n logging.debug('Calling `%s`', str(cmd))\n call(cmd, **outp)\n devnull.close()\n\n # Package\n shutil.make_archive(libs_dir, 'zip', libs_dir, './')\n",
"def __package_app(tasks_pkg, dist_dir, custom_main=None, extra_data=None):\n \"\"\"\n Packages the `tasks_pkg` (as zip) to `dist_dir`. Also copies the 'main' python file to\n `dist_dir`, to be submitted to spark. Same for `extra_data`.\n\n Parameters\n ----------\n tasks_pkg (str): Path to the python package containing tasks\n dist_dir (str): Path to the directory where the packaged code should be stored\n custom_main (str): Path to a custom 'main' python file.\n extra_data (List[str]): List containing paths to files/directories that should also be packaged\n and submitted to spark\n \"\"\"\n logging.info('Packaging application')\n\n # Package tasks\n tasks_dir_splits = os.path.split(os.path.realpath(tasks_pkg))\n shutil.make_archive(os.path.join(dist_dir, 'tasks'),\n 'zip',\n tasks_dir_splits[0],\n tasks_dir_splits[1])\n\n # Package main.py\n if custom_main is None:\n from . import _main\n main_path = _main.__file__\n if main_path[-3:] == 'pyc':\n main_path = main_path[:-1]\n shutil.copy(os.path.realpath(main_path),\n os.path.join(dist_dir, 'main.py'))\n else:\n shutil.copy(os.path.realpath(custom_main),\n os.path.join(dist_dir, 'main.py'))\n\n # Package _framework\n shutil.make_archive(os.path.join(dist_dir, '_framework'),\n 'zip',\n os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'),\n './sparklanes/')\n\n # Package extra data\n if extra_data:\n for dat in extra_data:\n real_path = os.path.realpath(dat)\n target = os.path.join(dist_dir, os.path.split(real_path)[1])\n if os.path.isfile(real_path):\n shutil.copy(real_path, target)\n elif os.path.isdir(real_path):\n shutil.copytree(real_path, target)\n else:\n raise IOError('File `%s` not found at `%s`.' % (dat, real_path))\n",
"def __run_spark_submit(lane_yaml, dist_dir, spark_home, spark_args, silent):\n \"\"\"\n Submits the packaged application to spark using a `spark-submit` subprocess\n\n Parameters\n ----------\n lane_yaml (str): Path to the YAML lane definition file\n dist_dir (str): Path to the directory where the packaged code is located\n spark_args (str): String of any additional spark config args to be passed when submitting\n silent (bool): Flag indicating whether job output should be printed to console\n \"\"\"\n # spark-submit binary\n cmd = ['spark-submit' if spark_home is None else os.path.join(spark_home, 'bin/spark-submit')]\n\n # Supplied spark arguments\n if spark_args:\n cmd += spark_args\n\n # Packaged App & lane\n cmd += ['--py-files', 'libs.zip,_framework.zip,tasks.zip', 'main.py']\n cmd += ['--lane', lane_yaml]\n\n logging.info('Submitting to Spark')\n logging.debug(str(cmd))\n\n # Submit\n devnull = open(os.devnull, 'w')\n outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {}\n call(cmd, cwd=dist_dir, env=MY_ENV, **outp)\n devnull.close()\n",
"def __clean_up(dist_dir):\n \"\"\"Delete packaged app\"\"\"\n shutil.rmtree(dist_dir)\n"
] | """Module that allows submitting lanes to spark using YAML definitions"""
import argparse
import logging
import os
import re
import shutil
import sys
import tempfile
from subprocess import call, STDOUT
SPARK_SUBMIT_FLAGS = ['verbose', 'supervised']
MY_ENV = os.environ.copy()
def submit_to_spark():
"""Console-script entry point"""
_package_and_submit(sys.argv[1:])
def _parse_and_validate_args(args):
"""
Parse and validate arguments. During validation, it is checked whether the given
files/directories exist, while also converting relative paths to absolute ones.
Parameters
----------
args (List): Command-line arguments
"""
class ExtendAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if getattr(namespace, self.dest, None) is None:
setattr(namespace, self.dest, [])
getattr(namespace, self.dest).extend(values)
parser = argparse.ArgumentParser(description='Submitting a lane to spark.')
parser.add_argument('-y', '--yaml', type=str, required=True,
help='Path to the yaml definition file.')
parser.add_argument('-p', '--package', type=str, required=True,
help='Path to the python package containing your tasks.')
parser.add_argument('-r', '--requirements', type=str, required=False,
help='Path to a `requirements.txt` specifying any additional dependencies '
'of your tasks.')
parser.add_argument('-e', '--extra-data', nargs='*', required=False, action=ExtendAction,
help='Path to any additional files or directories that should be packaged '
'and sent to Spark.')
parser.add_argument('-m', '--main', type=str, required=False,
help='Path to a custom main python file')
parser.add_argument('-d', '--spark-home', type=str, required=False,
help='Custom path to the directory containing your Spark installation. If '
'none is given, sparklanes will try to use the `spark-submit` command '
'from your PATH')
parser.add_argument('-s', '--spark-args', nargs='*', required=False,
help='Any additional arguments that should be sent to Spark via '
'spark-submit. '
'(e.g. `--spark-args executor-memory=20G total-executor-cores=100`)')
parser.add_argument('--silent', help='If set, no output will be sent to console',
action='store_true')
args = parser.parse_args(args).__dict__
# Check/fix files/dirs
for param in ('package', 'spark_home'):
args[param] = __validate_and_fix_path(args[param], check_dir=True)
for param in ('yaml', 'requirements', 'main'):
args[param] = __validate_and_fix_path(args[param], check_file=True)
if args['extra_data']:
for i in range(len(args['extra_data'])):
args['extra_data'][i] = __validate_and_fix_path(args['extra_data'][i],
check_file=True, check_dir=True)
# Check if python package
if not os.path.isfile(os.path.join(args['package'], '__init__.py')):
raise SystemExit('Could not confirm `%s` is a python package. Make sure it contains an '
'`__init__.py`.')
# Check/fix spark args
if args['spark_args']:
args['spark_args'] = __validate_and_fix_spark_args(args['spark_args'])
return args
def __validate_and_fix_path(path, check_file=False, check_dir=False):
"""Check if a file/directory exists and converts relative paths to absolute ones"""
# pylint: disable=superfluous-parens
if path is None:
return path
else:
if not (os.path.isfile(path) if check_file else False) \
and not (os.path.isdir(path) if check_dir else False):
raise SystemExit('Path `%s` does not exist' % path)
if not os.path.isabs(path):
path = os.path.abspath(os.path.join(os.path.abspath(os.curdir), path))
return path
def __validate_and_fix_spark_args(spark_args):
"""
Prepares spark arguments. In the command-line script, they are passed as for example
`-s master=local[4] deploy-mode=client verbose`, which would be passed to spark-submit as
`--master local[4] --deploy-mode client --verbose`
Parameters
----------
spark_args (List): List of spark arguments
Returns
-------
fixed_args (List): List of fixed and validated spark arguments
"""
pattern = re.compile(r'[\w\-_]+=.+')
fixed_args = []
for arg in spark_args:
if arg not in SPARK_SUBMIT_FLAGS:
if not pattern.match(arg):
raise SystemExit('Spark argument `%s` does not seem to be in the correct format '
'`ARG_NAME=ARG_VAL`, and is also not recognized to be one of the'
'valid spark-submit flags (%s).' % (arg, str(SPARK_SUBMIT_FLAGS)))
eq_pos = arg.find('=')
fixed_args.append('--' + arg[:eq_pos])
fixed_args.append(arg[eq_pos + 1:])
else:
fixed_args.append('--' + arg)
return fixed_args
def __make_tmp_dir():
"""
Create a temporary directory where the packaged files will be located
Returns
-------
tmp_dir (str): Absolute path to temporary directory
"""
tmp_dir = tempfile.mkdtemp()
logging.debug('Created temporary dir: `%s`', tmp_dir)
return tmp_dir
def __package_dependencies(dist_dir, additional_reqs, silent):
"""
Installs the app's dependencies from pip and packages them (as zip), to be submitted to spark.
Parameters
----------
dist_dir (str): Path to directory where the packaged libs shall be located
additional_reqs (str): Path to a requirements.txt, containing any of the app's additional
requirements
silent (bool): Flag indicating whether pip output should be printed to console
"""
logging.info('Packaging dependencies')
libs_dir = os.path.join(dist_dir, 'libs')
if not os.path.isdir(libs_dir):
os.mkdir(libs_dir)
# Get requirements
req_txt = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'requirements-submit.txt')
with open(req_txt, 'r') as req:
requirements = req.read().splitlines()
if additional_reqs:
with open(additional_reqs, 'r') as req:
for row in req:
requirements.append(row)
# Remove duplicates
requirements = list(set(requirements))
# Install
devnull = open(os.devnull, 'w')
outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {}
for pkg in requirements:
cmd = ['pip', 'install', pkg, '-t', libs_dir]
logging.debug('Calling `%s`', str(cmd))
call(cmd, **outp)
devnull.close()
# Package
shutil.make_archive(libs_dir, 'zip', libs_dir, './')
def __package_app(tasks_pkg, dist_dir, custom_main=None, extra_data=None):
"""
Packages the `tasks_pkg` (as zip) to `dist_dir`. Also copies the 'main' python file to
`dist_dir`, to be submitted to spark. Same for `extra_data`.
Parameters
----------
tasks_pkg (str): Path to the python package containing tasks
dist_dir (str): Path to the directory where the packaged code should be stored
custom_main (str): Path to a custom 'main' python file.
extra_data (List[str]): List containing paths to files/directories that should also be packaged
and submitted to spark
"""
logging.info('Packaging application')
# Package tasks
tasks_dir_splits = os.path.split(os.path.realpath(tasks_pkg))
shutil.make_archive(os.path.join(dist_dir, 'tasks'),
'zip',
tasks_dir_splits[0],
tasks_dir_splits[1])
# Package main.py
if custom_main is None:
from . import _main
main_path = _main.__file__
if main_path[-3:] == 'pyc':
main_path = main_path[:-1]
shutil.copy(os.path.realpath(main_path),
os.path.join(dist_dir, 'main.py'))
else:
shutil.copy(os.path.realpath(custom_main),
os.path.join(dist_dir, 'main.py'))
# Package _framework
shutil.make_archive(os.path.join(dist_dir, '_framework'),
'zip',
os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'),
'./sparklanes/')
# Package extra data
if extra_data:
for dat in extra_data:
real_path = os.path.realpath(dat)
target = os.path.join(dist_dir, os.path.split(real_path)[1])
if os.path.isfile(real_path):
shutil.copy(real_path, target)
elif os.path.isdir(real_path):
shutil.copytree(real_path, target)
else:
raise IOError('File `%s` not found at `%s`.' % (dat, real_path))
def __run_spark_submit(lane_yaml, dist_dir, spark_home, spark_args, silent):
"""
Submits the packaged application to spark using a `spark-submit` subprocess
Parameters
----------
lane_yaml (str): Path to the YAML lane definition file
dist_dir (str): Path to the directory where the packaged code is located
spark_args (str): String of any additional spark config args to be passed when submitting
silent (bool): Flag indicating whether job output should be printed to console
"""
# spark-submit binary
cmd = ['spark-submit' if spark_home is None else os.path.join(spark_home, 'bin/spark-submit')]
# Supplied spark arguments
if spark_args:
cmd += spark_args
# Packaged App & lane
cmd += ['--py-files', 'libs.zip,_framework.zip,tasks.zip', 'main.py']
cmd += ['--lane', lane_yaml]
logging.info('Submitting to Spark')
logging.debug(str(cmd))
# Submit
devnull = open(os.devnull, 'w')
outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {}
call(cmd, cwd=dist_dir, env=MY_ENV, **outp)
devnull.close()
def __clean_up(dist_dir):
"""Delete packaged app"""
shutil.rmtree(dist_dir)
|
ksbg/sparklanes | sparklanes/_submit/submit.py | _parse_and_validate_args | python | def _parse_and_validate_args(args):
class ExtendAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if getattr(namespace, self.dest, None) is None:
setattr(namespace, self.dest, [])
getattr(namespace, self.dest).extend(values)
parser = argparse.ArgumentParser(description='Submitting a lane to spark.')
parser.add_argument('-y', '--yaml', type=str, required=True,
help='Path to the yaml definition file.')
parser.add_argument('-p', '--package', type=str, required=True,
help='Path to the python package containing your tasks.')
parser.add_argument('-r', '--requirements', type=str, required=False,
help='Path to a `requirements.txt` specifying any additional dependencies '
'of your tasks.')
parser.add_argument('-e', '--extra-data', nargs='*', required=False, action=ExtendAction,
help='Path to any additional files or directories that should be packaged '
'and sent to Spark.')
parser.add_argument('-m', '--main', type=str, required=False,
help='Path to a custom main python file')
parser.add_argument('-d', '--spark-home', type=str, required=False,
help='Custom path to the directory containing your Spark installation. If '
'none is given, sparklanes will try to use the `spark-submit` command '
'from your PATH')
parser.add_argument('-s', '--spark-args', nargs='*', required=False,
help='Any additional arguments that should be sent to Spark via '
'spark-submit. '
'(e.g. `--spark-args executor-memory=20G total-executor-cores=100`)')
parser.add_argument('--silent', help='If set, no output will be sent to console',
action='store_true')
args = parser.parse_args(args).__dict__
# Check/fix files/dirs
for param in ('package', 'spark_home'):
args[param] = __validate_and_fix_path(args[param], check_dir=True)
for param in ('yaml', 'requirements', 'main'):
args[param] = __validate_and_fix_path(args[param], check_file=True)
if args['extra_data']:
for i in range(len(args['extra_data'])):
args['extra_data'][i] = __validate_and_fix_path(args['extra_data'][i],
check_file=True, check_dir=True)
# Check if python package
if not os.path.isfile(os.path.join(args['package'], '__init__.py')):
raise SystemExit('Could not confirm `%s` is a python package. Make sure it contains an '
'`__init__.py`.')
# Check/fix spark args
if args['spark_args']:
args['spark_args'] = __validate_and_fix_spark_args(args['spark_args'])
return args | Parse and validate arguments. During validation, it is checked whether the given
files/directories exist, while also converting relative paths to absolute ones.
Parameters
----------
args (List): Command-line arguments | train | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_submit/submit.py#L50-L109 | [
"def __validate_and_fix_path(path, check_file=False, check_dir=False):\n \"\"\"Check if a file/directory exists and converts relative paths to absolute ones\"\"\"\n # pylint: disable=superfluous-parens\n if path is None:\n return path\n else:\n if not (os.path.isfile(path) if check_file else False) \\\n and not (os.path.isdir(path) if check_dir else False):\n raise SystemExit('Path `%s` does not exist' % path)\n if not os.path.isabs(path):\n path = os.path.abspath(os.path.join(os.path.abspath(os.curdir), path))\n\n return path\n"
] | """Module that allows submitting lanes to spark using YAML definitions"""
import argparse
import logging
import os
import re
import shutil
import sys
import tempfile
from subprocess import call, STDOUT
SPARK_SUBMIT_FLAGS = ['verbose', 'supervised']
MY_ENV = os.environ.copy()
def submit_to_spark():
"""Console-script entry point"""
_package_and_submit(sys.argv[1:])
def _package_and_submit(args):
"""
Packages and submits a job, which is defined in a YAML file, to Spark.
Parameters
----------
args (List): Command-line arguments
"""
args = _parse_and_validate_args(args)
logging.debug(args)
dist = __make_tmp_dir()
try:
__package_dependencies(dist_dir=dist, additional_reqs=args['requirements'],
silent=args['silent'])
__package_app(tasks_pkg=args['package'],
dist_dir=dist,
custom_main=args['main'],
extra_data=args['extra_data'])
__run_spark_submit(lane_yaml=args['yaml'],
dist_dir=dist,
spark_home=args['spark_home'],
spark_args=args['spark_args'],
silent=args['silent'])
except Exception as exc:
__clean_up(dist)
raise exc
__clean_up(dist)
def __validate_and_fix_path(path, check_file=False, check_dir=False):
"""Check if a file/directory exists and converts relative paths to absolute ones"""
# pylint: disable=superfluous-parens
if path is None:
return path
else:
if not (os.path.isfile(path) if check_file else False) \
and not (os.path.isdir(path) if check_dir else False):
raise SystemExit('Path `%s` does not exist' % path)
if not os.path.isabs(path):
path = os.path.abspath(os.path.join(os.path.abspath(os.curdir), path))
return path
def __validate_and_fix_spark_args(spark_args):
"""
Prepares spark arguments. In the command-line script, they are passed as for example
`-s master=local[4] deploy-mode=client verbose`, which would be passed to spark-submit as
`--master local[4] --deploy-mode client --verbose`
Parameters
----------
spark_args (List): List of spark arguments
Returns
-------
fixed_args (List): List of fixed and validated spark arguments
"""
pattern = re.compile(r'[\w\-_]+=.+')
fixed_args = []
for arg in spark_args:
if arg not in SPARK_SUBMIT_FLAGS:
if not pattern.match(arg):
raise SystemExit('Spark argument `%s` does not seem to be in the correct format '
'`ARG_NAME=ARG_VAL`, and is also not recognized to be one of the'
'valid spark-submit flags (%s).' % (arg, str(SPARK_SUBMIT_FLAGS)))
eq_pos = arg.find('=')
fixed_args.append('--' + arg[:eq_pos])
fixed_args.append(arg[eq_pos + 1:])
else:
fixed_args.append('--' + arg)
return fixed_args
def __make_tmp_dir():
"""
Create a temporary directory where the packaged files will be located
Returns
-------
tmp_dir (str): Absolute path to temporary directory
"""
tmp_dir = tempfile.mkdtemp()
logging.debug('Created temporary dir: `%s`', tmp_dir)
return tmp_dir
def __package_dependencies(dist_dir, additional_reqs, silent):
"""
Installs the app's dependencies from pip and packages them (as zip), to be submitted to spark.
Parameters
----------
dist_dir (str): Path to directory where the packaged libs shall be located
additional_reqs (str): Path to a requirements.txt, containing any of the app's additional
requirements
silent (bool): Flag indicating whether pip output should be printed to console
"""
logging.info('Packaging dependencies')
libs_dir = os.path.join(dist_dir, 'libs')
if not os.path.isdir(libs_dir):
os.mkdir(libs_dir)
# Get requirements
req_txt = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'requirements-submit.txt')
with open(req_txt, 'r') as req:
requirements = req.read().splitlines()
if additional_reqs:
with open(additional_reqs, 'r') as req:
for row in req:
requirements.append(row)
# Remove duplicates
requirements = list(set(requirements))
# Install
devnull = open(os.devnull, 'w')
outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {}
for pkg in requirements:
cmd = ['pip', 'install', pkg, '-t', libs_dir]
logging.debug('Calling `%s`', str(cmd))
call(cmd, **outp)
devnull.close()
# Package
shutil.make_archive(libs_dir, 'zip', libs_dir, './')
def __package_app(tasks_pkg, dist_dir, custom_main=None, extra_data=None):
"""
Packages the `tasks_pkg` (as zip) to `dist_dir`. Also copies the 'main' python file to
`dist_dir`, to be submitted to spark. Same for `extra_data`.
Parameters
----------
tasks_pkg (str): Path to the python package containing tasks
dist_dir (str): Path to the directory where the packaged code should be stored
custom_main (str): Path to a custom 'main' python file.
extra_data (List[str]): List containing paths to files/directories that should also be packaged
and submitted to spark
"""
logging.info('Packaging application')
# Package tasks
tasks_dir_splits = os.path.split(os.path.realpath(tasks_pkg))
shutil.make_archive(os.path.join(dist_dir, 'tasks'),
'zip',
tasks_dir_splits[0],
tasks_dir_splits[1])
# Package main.py
if custom_main is None:
from . import _main
main_path = _main.__file__
if main_path[-3:] == 'pyc':
main_path = main_path[:-1]
shutil.copy(os.path.realpath(main_path),
os.path.join(dist_dir, 'main.py'))
else:
shutil.copy(os.path.realpath(custom_main),
os.path.join(dist_dir, 'main.py'))
# Package _framework
shutil.make_archive(os.path.join(dist_dir, '_framework'),
'zip',
os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'),
'./sparklanes/')
# Package extra data
if extra_data:
for dat in extra_data:
real_path = os.path.realpath(dat)
target = os.path.join(dist_dir, os.path.split(real_path)[1])
if os.path.isfile(real_path):
shutil.copy(real_path, target)
elif os.path.isdir(real_path):
shutil.copytree(real_path, target)
else:
raise IOError('File `%s` not found at `%s`.' % (dat, real_path))
def __run_spark_submit(lane_yaml, dist_dir, spark_home, spark_args, silent):
"""
Submits the packaged application to spark using a `spark-submit` subprocess
Parameters
----------
lane_yaml (str): Path to the YAML lane definition file
dist_dir (str): Path to the directory where the packaged code is located
spark_args (str): String of any additional spark config args to be passed when submitting
silent (bool): Flag indicating whether job output should be printed to console
"""
# spark-submit binary
cmd = ['spark-submit' if spark_home is None else os.path.join(spark_home, 'bin/spark-submit')]
# Supplied spark arguments
if spark_args:
cmd += spark_args
# Packaged App & lane
cmd += ['--py-files', 'libs.zip,_framework.zip,tasks.zip', 'main.py']
cmd += ['--lane', lane_yaml]
logging.info('Submitting to Spark')
logging.debug(str(cmd))
# Submit
devnull = open(os.devnull, 'w')
outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {}
call(cmd, cwd=dist_dir, env=MY_ENV, **outp)
devnull.close()
def __clean_up(dist_dir):
"""Delete packaged app"""
shutil.rmtree(dist_dir)
|
ksbg/sparklanes | sparklanes/_submit/submit.py | __validate_and_fix_path | python | def __validate_and_fix_path(path, check_file=False, check_dir=False):
# pylint: disable=superfluous-parens
if path is None:
return path
else:
if not (os.path.isfile(path) if check_file else False) \
and not (os.path.isdir(path) if check_dir else False):
raise SystemExit('Path `%s` does not exist' % path)
if not os.path.isabs(path):
path = os.path.abspath(os.path.join(os.path.abspath(os.curdir), path))
return path | Check if a file/directory exists and converts relative paths to absolute ones | train | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_submit/submit.py#L112-L124 | null | """Module that allows submitting lanes to spark using YAML definitions"""
import argparse
import logging
import os
import re
import shutil
import sys
import tempfile
from subprocess import call, STDOUT
SPARK_SUBMIT_FLAGS = ['verbose', 'supervised']
MY_ENV = os.environ.copy()
def submit_to_spark():
"""Console-script entry point"""
_package_and_submit(sys.argv[1:])
def _package_and_submit(args):
"""
Packages and submits a job, which is defined in a YAML file, to Spark.
Parameters
----------
args (List): Command-line arguments
"""
args = _parse_and_validate_args(args)
logging.debug(args)
dist = __make_tmp_dir()
try:
__package_dependencies(dist_dir=dist, additional_reqs=args['requirements'],
silent=args['silent'])
__package_app(tasks_pkg=args['package'],
dist_dir=dist,
custom_main=args['main'],
extra_data=args['extra_data'])
__run_spark_submit(lane_yaml=args['yaml'],
dist_dir=dist,
spark_home=args['spark_home'],
spark_args=args['spark_args'],
silent=args['silent'])
except Exception as exc:
__clean_up(dist)
raise exc
__clean_up(dist)
def _parse_and_validate_args(args):
"""
Parse and validate arguments. During validation, it is checked whether the given
files/directories exist, while also converting relative paths to absolute ones.
Parameters
----------
args (List): Command-line arguments
"""
class ExtendAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if getattr(namespace, self.dest, None) is None:
setattr(namespace, self.dest, [])
getattr(namespace, self.dest).extend(values)
parser = argparse.ArgumentParser(description='Submitting a lane to spark.')
parser.add_argument('-y', '--yaml', type=str, required=True,
help='Path to the yaml definition file.')
parser.add_argument('-p', '--package', type=str, required=True,
help='Path to the python package containing your tasks.')
parser.add_argument('-r', '--requirements', type=str, required=False,
help='Path to a `requirements.txt` specifying any additional dependencies '
'of your tasks.')
parser.add_argument('-e', '--extra-data', nargs='*', required=False, action=ExtendAction,
help='Path to any additional files or directories that should be packaged '
'and sent to Spark.')
parser.add_argument('-m', '--main', type=str, required=False,
help='Path to a custom main python file')
parser.add_argument('-d', '--spark-home', type=str, required=False,
help='Custom path to the directory containing your Spark installation. If '
'none is given, sparklanes will try to use the `spark-submit` command '
'from your PATH')
parser.add_argument('-s', '--spark-args', nargs='*', required=False,
help='Any additional arguments that should be sent to Spark via '
'spark-submit. '
'(e.g. `--spark-args executor-memory=20G total-executor-cores=100`)')
parser.add_argument('--silent', help='If set, no output will be sent to console',
action='store_true')
args = parser.parse_args(args).__dict__
# Check/fix files/dirs
for param in ('package', 'spark_home'):
args[param] = __validate_and_fix_path(args[param], check_dir=True)
for param in ('yaml', 'requirements', 'main'):
args[param] = __validate_and_fix_path(args[param], check_file=True)
if args['extra_data']:
for i in range(len(args['extra_data'])):
args['extra_data'][i] = __validate_and_fix_path(args['extra_data'][i],
check_file=True, check_dir=True)
# Check if python package
if not os.path.isfile(os.path.join(args['package'], '__init__.py')):
raise SystemExit('Could not confirm `%s` is a python package. Make sure it contains an '
'`__init__.py`.')
# Check/fix spark args
if args['spark_args']:
args['spark_args'] = __validate_and_fix_spark_args(args['spark_args'])
return args
def __validate_and_fix_spark_args(spark_args):
"""
Prepares spark arguments. In the command-line script, they are passed as for example
`-s master=local[4] deploy-mode=client verbose`, which would be passed to spark-submit as
`--master local[4] --deploy-mode client --verbose`
Parameters
----------
spark_args (List): List of spark arguments
Returns
-------
fixed_args (List): List of fixed and validated spark arguments
"""
pattern = re.compile(r'[\w\-_]+=.+')
fixed_args = []
for arg in spark_args:
if arg not in SPARK_SUBMIT_FLAGS:
if not pattern.match(arg):
raise SystemExit('Spark argument `%s` does not seem to be in the correct format '
'`ARG_NAME=ARG_VAL`, and is also not recognized to be one of the'
'valid spark-submit flags (%s).' % (arg, str(SPARK_SUBMIT_FLAGS)))
eq_pos = arg.find('=')
fixed_args.append('--' + arg[:eq_pos])
fixed_args.append(arg[eq_pos + 1:])
else:
fixed_args.append('--' + arg)
return fixed_args
def __make_tmp_dir():
"""
Create a temporary directory where the packaged files will be located
Returns
-------
tmp_dir (str): Absolute path to temporary directory
"""
tmp_dir = tempfile.mkdtemp()
logging.debug('Created temporary dir: `%s`', tmp_dir)
return tmp_dir
def __package_dependencies(dist_dir, additional_reqs, silent):
"""
Installs the app's dependencies from pip and packages them (as zip), to be submitted to spark.
Parameters
----------
dist_dir (str): Path to directory where the packaged libs shall be located
additional_reqs (str): Path to a requirements.txt, containing any of the app's additional
requirements
silent (bool): Flag indicating whether pip output should be printed to console
"""
logging.info('Packaging dependencies')
libs_dir = os.path.join(dist_dir, 'libs')
if not os.path.isdir(libs_dir):
os.mkdir(libs_dir)
# Get requirements
req_txt = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'requirements-submit.txt')
with open(req_txt, 'r') as req:
requirements = req.read().splitlines()
if additional_reqs:
with open(additional_reqs, 'r') as req:
for row in req:
requirements.append(row)
# Remove duplicates
requirements = list(set(requirements))
# Install
devnull = open(os.devnull, 'w')
outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {}
for pkg in requirements:
cmd = ['pip', 'install', pkg, '-t', libs_dir]
logging.debug('Calling `%s`', str(cmd))
call(cmd, **outp)
devnull.close()
# Package
shutil.make_archive(libs_dir, 'zip', libs_dir, './')
def __package_app(tasks_pkg, dist_dir, custom_main=None, extra_data=None):
"""
Packages the `tasks_pkg` (as zip) to `dist_dir`. Also copies the 'main' python file to
`dist_dir`, to be submitted to spark. Same for `extra_data`.
Parameters
----------
tasks_pkg (str): Path to the python package containing tasks
dist_dir (str): Path to the directory where the packaged code should be stored
custom_main (str): Path to a custom 'main' python file.
extra_data (List[str]): List containing paths to files/directories that should also be packaged
and submitted to spark
"""
logging.info('Packaging application')
# Package tasks
tasks_dir_splits = os.path.split(os.path.realpath(tasks_pkg))
shutil.make_archive(os.path.join(dist_dir, 'tasks'),
'zip',
tasks_dir_splits[0],
tasks_dir_splits[1])
# Package main.py
if custom_main is None:
from . import _main
main_path = _main.__file__
if main_path[-3:] == 'pyc':
main_path = main_path[:-1]
shutil.copy(os.path.realpath(main_path),
os.path.join(dist_dir, 'main.py'))
else:
shutil.copy(os.path.realpath(custom_main),
os.path.join(dist_dir, 'main.py'))
# Package _framework
shutil.make_archive(os.path.join(dist_dir, '_framework'),
'zip',
os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'),
'./sparklanes/')
# Package extra data
if extra_data:
for dat in extra_data:
real_path = os.path.realpath(dat)
target = os.path.join(dist_dir, os.path.split(real_path)[1])
if os.path.isfile(real_path):
shutil.copy(real_path, target)
elif os.path.isdir(real_path):
shutil.copytree(real_path, target)
else:
raise IOError('File `%s` not found at `%s`.' % (dat, real_path))
def __run_spark_submit(lane_yaml, dist_dir, spark_home, spark_args, silent):
"""
Submits the packaged application to spark using a `spark-submit` subprocess
Parameters
----------
lane_yaml (str): Path to the YAML lane definition file
dist_dir (str): Path to the directory where the packaged code is located
spark_args (str): String of any additional spark config args to be passed when submitting
silent (bool): Flag indicating whether job output should be printed to console
"""
# spark-submit binary
cmd = ['spark-submit' if spark_home is None else os.path.join(spark_home, 'bin/spark-submit')]
# Supplied spark arguments
if spark_args:
cmd += spark_args
# Packaged App & lane
cmd += ['--py-files', 'libs.zip,_framework.zip,tasks.zip', 'main.py']
cmd += ['--lane', lane_yaml]
logging.info('Submitting to Spark')
logging.debug(str(cmd))
# Submit
devnull = open(os.devnull, 'w')
outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {}
call(cmd, cwd=dist_dir, env=MY_ENV, **outp)
devnull.close()
def __clean_up(dist_dir):
"""Delete packaged app"""
shutil.rmtree(dist_dir)
|
ksbg/sparklanes | sparklanes/_submit/submit.py | __validate_and_fix_spark_args | python | def __validate_and_fix_spark_args(spark_args):
pattern = re.compile(r'[\w\-_]+=.+')
fixed_args = []
for arg in spark_args:
if arg not in SPARK_SUBMIT_FLAGS:
if not pattern.match(arg):
raise SystemExit('Spark argument `%s` does not seem to be in the correct format '
'`ARG_NAME=ARG_VAL`, and is also not recognized to be one of the'
'valid spark-submit flags (%s).' % (arg, str(SPARK_SUBMIT_FLAGS)))
eq_pos = arg.find('=')
fixed_args.append('--' + arg[:eq_pos])
fixed_args.append(arg[eq_pos + 1:])
else:
fixed_args.append('--' + arg)
return fixed_args | Prepares spark arguments. In the command-line script, they are passed as for example
`-s master=local[4] deploy-mode=client verbose`, which would be passed to spark-submit as
`--master local[4] --deploy-mode client --verbose`
Parameters
----------
spark_args (List): List of spark arguments
Returns
-------
fixed_args (List): List of fixed and validated spark arguments | train | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_submit/submit.py#L127-L155 | null | """Module that allows submitting lanes to spark using YAML definitions"""
import argparse
import logging
import os
import re
import shutil
import sys
import tempfile
from subprocess import call, STDOUT
SPARK_SUBMIT_FLAGS = ['verbose', 'supervised']
MY_ENV = os.environ.copy()
def submit_to_spark():
"""Console-script entry point"""
_package_and_submit(sys.argv[1:])
def _package_and_submit(args):
"""
Packages and submits a job, which is defined in a YAML file, to Spark.
Parameters
----------
args (List): Command-line arguments
"""
args = _parse_and_validate_args(args)
logging.debug(args)
dist = __make_tmp_dir()
try:
__package_dependencies(dist_dir=dist, additional_reqs=args['requirements'],
silent=args['silent'])
__package_app(tasks_pkg=args['package'],
dist_dir=dist,
custom_main=args['main'],
extra_data=args['extra_data'])
__run_spark_submit(lane_yaml=args['yaml'],
dist_dir=dist,
spark_home=args['spark_home'],
spark_args=args['spark_args'],
silent=args['silent'])
except Exception as exc:
__clean_up(dist)
raise exc
__clean_up(dist)
def _parse_and_validate_args(args):
"""
Parse and validate arguments. During validation, it is checked whether the given
files/directories exist, while also converting relative paths to absolute ones.
Parameters
----------
args (List): Command-line arguments
"""
class ExtendAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if getattr(namespace, self.dest, None) is None:
setattr(namespace, self.dest, [])
getattr(namespace, self.dest).extend(values)
parser = argparse.ArgumentParser(description='Submitting a lane to spark.')
parser.add_argument('-y', '--yaml', type=str, required=True,
help='Path to the yaml definition file.')
parser.add_argument('-p', '--package', type=str, required=True,
help='Path to the python package containing your tasks.')
parser.add_argument('-r', '--requirements', type=str, required=False,
help='Path to a `requirements.txt` specifying any additional dependencies '
'of your tasks.')
parser.add_argument('-e', '--extra-data', nargs='*', required=False, action=ExtendAction,
help='Path to any additional files or directories that should be packaged '
'and sent to Spark.')
parser.add_argument('-m', '--main', type=str, required=False,
help='Path to a custom main python file')
parser.add_argument('-d', '--spark-home', type=str, required=False,
help='Custom path to the directory containing your Spark installation. If '
'none is given, sparklanes will try to use the `spark-submit` command '
'from your PATH')
parser.add_argument('-s', '--spark-args', nargs='*', required=False,
help='Any additional arguments that should be sent to Spark via '
'spark-submit. '
'(e.g. `--spark-args executor-memory=20G total-executor-cores=100`)')
parser.add_argument('--silent', help='If set, no output will be sent to console',
action='store_true')
args = parser.parse_args(args).__dict__
# Check/fix files/dirs
for param in ('package', 'spark_home'):
args[param] = __validate_and_fix_path(args[param], check_dir=True)
for param in ('yaml', 'requirements', 'main'):
args[param] = __validate_and_fix_path(args[param], check_file=True)
if args['extra_data']:
for i in range(len(args['extra_data'])):
args['extra_data'][i] = __validate_and_fix_path(args['extra_data'][i],
check_file=True, check_dir=True)
# Check if python package
if not os.path.isfile(os.path.join(args['package'], '__init__.py')):
raise SystemExit('Could not confirm `%s` is a python package. Make sure it contains an '
'`__init__.py`.')
# Check/fix spark args
if args['spark_args']:
args['spark_args'] = __validate_and_fix_spark_args(args['spark_args'])
return args
def __validate_and_fix_path(path, check_file=False, check_dir=False):
"""Check if a file/directory exists and converts relative paths to absolute ones"""
# pylint: disable=superfluous-parens
if path is None:
return path
else:
if not (os.path.isfile(path) if check_file else False) \
and not (os.path.isdir(path) if check_dir else False):
raise SystemExit('Path `%s` does not exist' % path)
if not os.path.isabs(path):
path = os.path.abspath(os.path.join(os.path.abspath(os.curdir), path))
return path
def __make_tmp_dir():
"""
Create a temporary directory where the packaged files will be located
Returns
-------
tmp_dir (str): Absolute path to temporary directory
"""
tmp_dir = tempfile.mkdtemp()
logging.debug('Created temporary dir: `%s`', tmp_dir)
return tmp_dir
def __package_dependencies(dist_dir, additional_reqs, silent):
"""
Installs the app's dependencies from pip and packages them (as zip), to be submitted to spark.
Parameters
----------
dist_dir (str): Path to directory where the packaged libs shall be located
additional_reqs (str): Path to a requirements.txt, containing any of the app's additional
requirements
silent (bool): Flag indicating whether pip output should be printed to console
"""
logging.info('Packaging dependencies')
libs_dir = os.path.join(dist_dir, 'libs')
if not os.path.isdir(libs_dir):
os.mkdir(libs_dir)
# Get requirements
req_txt = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'requirements-submit.txt')
with open(req_txt, 'r') as req:
requirements = req.read().splitlines()
if additional_reqs:
with open(additional_reqs, 'r') as req:
for row in req:
requirements.append(row)
# Remove duplicates
requirements = list(set(requirements))
# Install
devnull = open(os.devnull, 'w')
outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {}
for pkg in requirements:
cmd = ['pip', 'install', pkg, '-t', libs_dir]
logging.debug('Calling `%s`', str(cmd))
call(cmd, **outp)
devnull.close()
# Package
shutil.make_archive(libs_dir, 'zip', libs_dir, './')
def __package_app(tasks_pkg, dist_dir, custom_main=None, extra_data=None):
"""
Packages the `tasks_pkg` (as zip) to `dist_dir`. Also copies the 'main' python file to
`dist_dir`, to be submitted to spark. Same for `extra_data`.
Parameters
----------
tasks_pkg (str): Path to the python package containing tasks
dist_dir (str): Path to the directory where the packaged code should be stored
custom_main (str): Path to a custom 'main' python file.
extra_data (List[str]): List containing paths to files/directories that should also be packaged
and submitted to spark
"""
logging.info('Packaging application')
# Package tasks
tasks_dir_splits = os.path.split(os.path.realpath(tasks_pkg))
shutil.make_archive(os.path.join(dist_dir, 'tasks'),
'zip',
tasks_dir_splits[0],
tasks_dir_splits[1])
# Package main.py
if custom_main is None:
from . import _main
main_path = _main.__file__
if main_path[-3:] == 'pyc':
main_path = main_path[:-1]
shutil.copy(os.path.realpath(main_path),
os.path.join(dist_dir, 'main.py'))
else:
shutil.copy(os.path.realpath(custom_main),
os.path.join(dist_dir, 'main.py'))
# Package _framework
shutil.make_archive(os.path.join(dist_dir, '_framework'),
'zip',
os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'),
'./sparklanes/')
# Package extra data
if extra_data:
for dat in extra_data:
real_path = os.path.realpath(dat)
target = os.path.join(dist_dir, os.path.split(real_path)[1])
if os.path.isfile(real_path):
shutil.copy(real_path, target)
elif os.path.isdir(real_path):
shutil.copytree(real_path, target)
else:
raise IOError('File `%s` not found at `%s`.' % (dat, real_path))
def __run_spark_submit(lane_yaml, dist_dir, spark_home, spark_args, silent):
"""
Submits the packaged application to spark using a `spark-submit` subprocess
Parameters
----------
lane_yaml (str): Path to the YAML lane definition file
dist_dir (str): Path to the directory where the packaged code is located
spark_args (str): String of any additional spark config args to be passed when submitting
silent (bool): Flag indicating whether job output should be printed to console
"""
# spark-submit binary
cmd = ['spark-submit' if spark_home is None else os.path.join(spark_home, 'bin/spark-submit')]
# Supplied spark arguments
if spark_args:
cmd += spark_args
# Packaged App & lane
cmd += ['--py-files', 'libs.zip,_framework.zip,tasks.zip', 'main.py']
cmd += ['--lane', lane_yaml]
logging.info('Submitting to Spark')
logging.debug(str(cmd))
# Submit
devnull = open(os.devnull, 'w')
outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {}
call(cmd, cwd=dist_dir, env=MY_ENV, **outp)
devnull.close()
def __clean_up(dist_dir):
"""Delete packaged app"""
shutil.rmtree(dist_dir)
|
ksbg/sparklanes | sparklanes/_submit/submit.py | __package_dependencies | python | def __package_dependencies(dist_dir, additional_reqs, silent):
logging.info('Packaging dependencies')
libs_dir = os.path.join(dist_dir, 'libs')
if not os.path.isdir(libs_dir):
os.mkdir(libs_dir)
# Get requirements
req_txt = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'requirements-submit.txt')
with open(req_txt, 'r') as req:
requirements = req.read().splitlines()
if additional_reqs:
with open(additional_reqs, 'r') as req:
for row in req:
requirements.append(row)
# Remove duplicates
requirements = list(set(requirements))
# Install
devnull = open(os.devnull, 'w')
outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {}
for pkg in requirements:
cmd = ['pip', 'install', pkg, '-t', libs_dir]
logging.debug('Calling `%s`', str(cmd))
call(cmd, **outp)
devnull.close()
# Package
shutil.make_archive(libs_dir, 'zip', libs_dir, './') | Installs the app's dependencies from pip and packages them (as zip), to be submitted to spark.
Parameters
----------
dist_dir (str): Path to directory where the packaged libs shall be located
additional_reqs (str): Path to a requirements.txt, containing any of the app's additional
requirements
silent (bool): Flag indicating whether pip output should be printed to console | train | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_submit/submit.py#L172-L210 | null | """Module that allows submitting lanes to spark using YAML definitions"""
import argparse
import logging
import os
import re
import shutil
import sys
import tempfile
from subprocess import call, STDOUT
SPARK_SUBMIT_FLAGS = ['verbose', 'supervised']
MY_ENV = os.environ.copy()
def submit_to_spark():
"""Console-script entry point"""
_package_and_submit(sys.argv[1:])
def _package_and_submit(args):
"""
Packages and submits a job, which is defined in a YAML file, to Spark.
Parameters
----------
args (List): Command-line arguments
"""
args = _parse_and_validate_args(args)
logging.debug(args)
dist = __make_tmp_dir()
try:
__package_dependencies(dist_dir=dist, additional_reqs=args['requirements'],
silent=args['silent'])
__package_app(tasks_pkg=args['package'],
dist_dir=dist,
custom_main=args['main'],
extra_data=args['extra_data'])
__run_spark_submit(lane_yaml=args['yaml'],
dist_dir=dist,
spark_home=args['spark_home'],
spark_args=args['spark_args'],
silent=args['silent'])
except Exception as exc:
__clean_up(dist)
raise exc
__clean_up(dist)
def _parse_and_validate_args(args):
"""
Parse and validate arguments. During validation, it is checked whether the given
files/directories exist, while also converting relative paths to absolute ones.
Parameters
----------
args (List): Command-line arguments
"""
class ExtendAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if getattr(namespace, self.dest, None) is None:
setattr(namespace, self.dest, [])
getattr(namespace, self.dest).extend(values)
parser = argparse.ArgumentParser(description='Submitting a lane to spark.')
parser.add_argument('-y', '--yaml', type=str, required=True,
help='Path to the yaml definition file.')
parser.add_argument('-p', '--package', type=str, required=True,
help='Path to the python package containing your tasks.')
parser.add_argument('-r', '--requirements', type=str, required=False,
help='Path to a `requirements.txt` specifying any additional dependencies '
'of your tasks.')
parser.add_argument('-e', '--extra-data', nargs='*', required=False, action=ExtendAction,
help='Path to any additional files or directories that should be packaged '
'and sent to Spark.')
parser.add_argument('-m', '--main', type=str, required=False,
help='Path to a custom main python file')
parser.add_argument('-d', '--spark-home', type=str, required=False,
help='Custom path to the directory containing your Spark installation. If '
'none is given, sparklanes will try to use the `spark-submit` command '
'from your PATH')
parser.add_argument('-s', '--spark-args', nargs='*', required=False,
help='Any additional arguments that should be sent to Spark via '
'spark-submit. '
'(e.g. `--spark-args executor-memory=20G total-executor-cores=100`)')
parser.add_argument('--silent', help='If set, no output will be sent to console',
action='store_true')
args = parser.parse_args(args).__dict__
# Check/fix files/dirs
for param in ('package', 'spark_home'):
args[param] = __validate_and_fix_path(args[param], check_dir=True)
for param in ('yaml', 'requirements', 'main'):
args[param] = __validate_and_fix_path(args[param], check_file=True)
if args['extra_data']:
for i in range(len(args['extra_data'])):
args['extra_data'][i] = __validate_and_fix_path(args['extra_data'][i],
check_file=True, check_dir=True)
# Check if python package
if not os.path.isfile(os.path.join(args['package'], '__init__.py')):
raise SystemExit('Could not confirm `%s` is a python package. Make sure it contains an '
'`__init__.py`.')
# Check/fix spark args
if args['spark_args']:
args['spark_args'] = __validate_and_fix_spark_args(args['spark_args'])
return args
def __validate_and_fix_path(path, check_file=False, check_dir=False):
"""Check if a file/directory exists and converts relative paths to absolute ones"""
# pylint: disable=superfluous-parens
if path is None:
return path
else:
if not (os.path.isfile(path) if check_file else False) \
and not (os.path.isdir(path) if check_dir else False):
raise SystemExit('Path `%s` does not exist' % path)
if not os.path.isabs(path):
path = os.path.abspath(os.path.join(os.path.abspath(os.curdir), path))
return path
def __validate_and_fix_spark_args(spark_args):
"""
Prepares spark arguments. In the command-line script, they are passed as for example
`-s master=local[4] deploy-mode=client verbose`, which would be passed to spark-submit as
`--master local[4] --deploy-mode client --verbose`
Parameters
----------
spark_args (List): List of spark arguments
Returns
-------
fixed_args (List): List of fixed and validated spark arguments
"""
pattern = re.compile(r'[\w\-_]+=.+')
fixed_args = []
for arg in spark_args:
if arg not in SPARK_SUBMIT_FLAGS:
if not pattern.match(arg):
raise SystemExit('Spark argument `%s` does not seem to be in the correct format '
'`ARG_NAME=ARG_VAL`, and is also not recognized to be one of the'
'valid spark-submit flags (%s).' % (arg, str(SPARK_SUBMIT_FLAGS)))
eq_pos = arg.find('=')
fixed_args.append('--' + arg[:eq_pos])
fixed_args.append(arg[eq_pos + 1:])
else:
fixed_args.append('--' + arg)
return fixed_args
def __make_tmp_dir():
"""
Create a temporary directory where the packaged files will be located
Returns
-------
tmp_dir (str): Absolute path to temporary directory
"""
tmp_dir = tempfile.mkdtemp()
logging.debug('Created temporary dir: `%s`', tmp_dir)
return tmp_dir
def __package_app(tasks_pkg, dist_dir, custom_main=None, extra_data=None):
"""
Packages the `tasks_pkg` (as zip) to `dist_dir`. Also copies the 'main' python file to
`dist_dir`, to be submitted to spark. Same for `extra_data`.
Parameters
----------
tasks_pkg (str): Path to the python package containing tasks
dist_dir (str): Path to the directory where the packaged code should be stored
custom_main (str): Path to a custom 'main' python file.
extra_data (List[str]): List containing paths to files/directories that should also be packaged
and submitted to spark
"""
logging.info('Packaging application')
# Package tasks
tasks_dir_splits = os.path.split(os.path.realpath(tasks_pkg))
shutil.make_archive(os.path.join(dist_dir, 'tasks'),
'zip',
tasks_dir_splits[0],
tasks_dir_splits[1])
# Package main.py
if custom_main is None:
from . import _main
main_path = _main.__file__
if main_path[-3:] == 'pyc':
main_path = main_path[:-1]
shutil.copy(os.path.realpath(main_path),
os.path.join(dist_dir, 'main.py'))
else:
shutil.copy(os.path.realpath(custom_main),
os.path.join(dist_dir, 'main.py'))
# Package _framework
shutil.make_archive(os.path.join(dist_dir, '_framework'),
'zip',
os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'),
'./sparklanes/')
# Package extra data
if extra_data:
for dat in extra_data:
real_path = os.path.realpath(dat)
target = os.path.join(dist_dir, os.path.split(real_path)[1])
if os.path.isfile(real_path):
shutil.copy(real_path, target)
elif os.path.isdir(real_path):
shutil.copytree(real_path, target)
else:
raise IOError('File `%s` not found at `%s`.' % (dat, real_path))
def __run_spark_submit(lane_yaml, dist_dir, spark_home, spark_args, silent):
"""
Submits the packaged application to spark using a `spark-submit` subprocess
Parameters
----------
lane_yaml (str): Path to the YAML lane definition file
dist_dir (str): Path to the directory where the packaged code is located
spark_args (str): String of any additional spark config args to be passed when submitting
silent (bool): Flag indicating whether job output should be printed to console
"""
# spark-submit binary
cmd = ['spark-submit' if spark_home is None else os.path.join(spark_home, 'bin/spark-submit')]
# Supplied spark arguments
if spark_args:
cmd += spark_args
# Packaged App & lane
cmd += ['--py-files', 'libs.zip,_framework.zip,tasks.zip', 'main.py']
cmd += ['--lane', lane_yaml]
logging.info('Submitting to Spark')
logging.debug(str(cmd))
# Submit
devnull = open(os.devnull, 'w')
outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {}
call(cmd, cwd=dist_dir, env=MY_ENV, **outp)
devnull.close()
def __clean_up(dist_dir):
"""Delete packaged app"""
shutil.rmtree(dist_dir)
|
ksbg/sparklanes | sparklanes/_submit/submit.py | __package_app | python | def __package_app(tasks_pkg, dist_dir, custom_main=None, extra_data=None):
logging.info('Packaging application')
# Package tasks
tasks_dir_splits = os.path.split(os.path.realpath(tasks_pkg))
shutil.make_archive(os.path.join(dist_dir, 'tasks'),
'zip',
tasks_dir_splits[0],
tasks_dir_splits[1])
# Package main.py
if custom_main is None:
from . import _main
main_path = _main.__file__
if main_path[-3:] == 'pyc':
main_path = main_path[:-1]
shutil.copy(os.path.realpath(main_path),
os.path.join(dist_dir, 'main.py'))
else:
shutil.copy(os.path.realpath(custom_main),
os.path.join(dist_dir, 'main.py'))
# Package _framework
shutil.make_archive(os.path.join(dist_dir, '_framework'),
'zip',
os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'),
'./sparklanes/')
# Package extra data
if extra_data:
for dat in extra_data:
real_path = os.path.realpath(dat)
target = os.path.join(dist_dir, os.path.split(real_path)[1])
if os.path.isfile(real_path):
shutil.copy(real_path, target)
elif os.path.isdir(real_path):
shutil.copytree(real_path, target)
else:
raise IOError('File `%s` not found at `%s`.' % (dat, real_path)) | Packages the `tasks_pkg` (as zip) to `dist_dir`. Also copies the 'main' python file to
`dist_dir`, to be submitted to spark. Same for `extra_data`.
Parameters
----------
tasks_pkg (str): Path to the python package containing tasks
dist_dir (str): Path to the directory where the packaged code should be stored
custom_main (str): Path to a custom 'main' python file.
extra_data (List[str]): List containing paths to files/directories that should also be packaged
and submitted to spark | train | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_submit/submit.py#L213-L263 | null | """Module that allows submitting lanes to spark using YAML definitions"""
import argparse
import logging
import os
import re
import shutil
import sys
import tempfile
from subprocess import call, STDOUT
SPARK_SUBMIT_FLAGS = ['verbose', 'supervised']
MY_ENV = os.environ.copy()
def submit_to_spark():
"""Console-script entry point"""
_package_and_submit(sys.argv[1:])
def _package_and_submit(args):
"""
Packages and submits a job, which is defined in a YAML file, to Spark.
Parameters
----------
args (List): Command-line arguments
"""
args = _parse_and_validate_args(args)
logging.debug(args)
dist = __make_tmp_dir()
try:
__package_dependencies(dist_dir=dist, additional_reqs=args['requirements'],
silent=args['silent'])
__package_app(tasks_pkg=args['package'],
dist_dir=dist,
custom_main=args['main'],
extra_data=args['extra_data'])
__run_spark_submit(lane_yaml=args['yaml'],
dist_dir=dist,
spark_home=args['spark_home'],
spark_args=args['spark_args'],
silent=args['silent'])
except Exception as exc:
__clean_up(dist)
raise exc
__clean_up(dist)
def _parse_and_validate_args(args):
"""
Parse and validate arguments. During validation, it is checked whether the given
files/directories exist, while also converting relative paths to absolute ones.
Parameters
----------
args (List): Command-line arguments
"""
class ExtendAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if getattr(namespace, self.dest, None) is None:
setattr(namespace, self.dest, [])
getattr(namespace, self.dest).extend(values)
parser = argparse.ArgumentParser(description='Submitting a lane to spark.')
parser.add_argument('-y', '--yaml', type=str, required=True,
help='Path to the yaml definition file.')
parser.add_argument('-p', '--package', type=str, required=True,
help='Path to the python package containing your tasks.')
parser.add_argument('-r', '--requirements', type=str, required=False,
help='Path to a `requirements.txt` specifying any additional dependencies '
'of your tasks.')
parser.add_argument('-e', '--extra-data', nargs='*', required=False, action=ExtendAction,
help='Path to any additional files or directories that should be packaged '
'and sent to Spark.')
parser.add_argument('-m', '--main', type=str, required=False,
help='Path to a custom main python file')
parser.add_argument('-d', '--spark-home', type=str, required=False,
help='Custom path to the directory containing your Spark installation. If '
'none is given, sparklanes will try to use the `spark-submit` command '
'from your PATH')
parser.add_argument('-s', '--spark-args', nargs='*', required=False,
help='Any additional arguments that should be sent to Spark via '
'spark-submit. '
'(e.g. `--spark-args executor-memory=20G total-executor-cores=100`)')
parser.add_argument('--silent', help='If set, no output will be sent to console',
action='store_true')
args = parser.parse_args(args).__dict__
# Check/fix files/dirs
for param in ('package', 'spark_home'):
args[param] = __validate_and_fix_path(args[param], check_dir=True)
for param in ('yaml', 'requirements', 'main'):
args[param] = __validate_and_fix_path(args[param], check_file=True)
if args['extra_data']:
for i in range(len(args['extra_data'])):
args['extra_data'][i] = __validate_and_fix_path(args['extra_data'][i],
check_file=True, check_dir=True)
# Check if python package
if not os.path.isfile(os.path.join(args['package'], '__init__.py')):
raise SystemExit('Could not confirm `%s` is a python package. Make sure it contains an '
'`__init__.py`.')
# Check/fix spark args
if args['spark_args']:
args['spark_args'] = __validate_and_fix_spark_args(args['spark_args'])
return args
def __validate_and_fix_path(path, check_file=False, check_dir=False):
"""Check if a file/directory exists and converts relative paths to absolute ones"""
# pylint: disable=superfluous-parens
if path is None:
return path
else:
if not (os.path.isfile(path) if check_file else False) \
and not (os.path.isdir(path) if check_dir else False):
raise SystemExit('Path `%s` does not exist' % path)
if not os.path.isabs(path):
path = os.path.abspath(os.path.join(os.path.abspath(os.curdir), path))
return path
def __validate_and_fix_spark_args(spark_args):
"""
Prepares spark arguments. In the command-line script, they are passed as for example
`-s master=local[4] deploy-mode=client verbose`, which would be passed to spark-submit as
`--master local[4] --deploy-mode client --verbose`
Parameters
----------
spark_args (List): List of spark arguments
Returns
-------
fixed_args (List): List of fixed and validated spark arguments
"""
pattern = re.compile(r'[\w\-_]+=.+')
fixed_args = []
for arg in spark_args:
if arg not in SPARK_SUBMIT_FLAGS:
if not pattern.match(arg):
raise SystemExit('Spark argument `%s` does not seem to be in the correct format '
'`ARG_NAME=ARG_VAL`, and is also not recognized to be one of the'
'valid spark-submit flags (%s).' % (arg, str(SPARK_SUBMIT_FLAGS)))
eq_pos = arg.find('=')
fixed_args.append('--' + arg[:eq_pos])
fixed_args.append(arg[eq_pos + 1:])
else:
fixed_args.append('--' + arg)
return fixed_args
def __make_tmp_dir():
"""
Create a temporary directory where the packaged files will be located
Returns
-------
tmp_dir (str): Absolute path to temporary directory
"""
tmp_dir = tempfile.mkdtemp()
logging.debug('Created temporary dir: `%s`', tmp_dir)
return tmp_dir
def __package_dependencies(dist_dir, additional_reqs, silent):
"""
Installs the app's dependencies from pip and packages them (as zip), to be submitted to spark.
Parameters
----------
dist_dir (str): Path to directory where the packaged libs shall be located
additional_reqs (str): Path to a requirements.txt, containing any of the app's additional
requirements
silent (bool): Flag indicating whether pip output should be printed to console
"""
logging.info('Packaging dependencies')
libs_dir = os.path.join(dist_dir, 'libs')
if not os.path.isdir(libs_dir):
os.mkdir(libs_dir)
# Get requirements
req_txt = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'requirements-submit.txt')
with open(req_txt, 'r') as req:
requirements = req.read().splitlines()
if additional_reqs:
with open(additional_reqs, 'r') as req:
for row in req:
requirements.append(row)
# Remove duplicates
requirements = list(set(requirements))
# Install
devnull = open(os.devnull, 'w')
outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {}
for pkg in requirements:
cmd = ['pip', 'install', pkg, '-t', libs_dir]
logging.debug('Calling `%s`', str(cmd))
call(cmd, **outp)
devnull.close()
# Package
shutil.make_archive(libs_dir, 'zip', libs_dir, './')
def __run_spark_submit(lane_yaml, dist_dir, spark_home, spark_args, silent):
"""
Submits the packaged application to spark using a `spark-submit` subprocess
Parameters
----------
lane_yaml (str): Path to the YAML lane definition file
dist_dir (str): Path to the directory where the packaged code is located
spark_args (str): String of any additional spark config args to be passed when submitting
silent (bool): Flag indicating whether job output should be printed to console
"""
# spark-submit binary
cmd = ['spark-submit' if spark_home is None else os.path.join(spark_home, 'bin/spark-submit')]
# Supplied spark arguments
if spark_args:
cmd += spark_args
# Packaged App & lane
cmd += ['--py-files', 'libs.zip,_framework.zip,tasks.zip', 'main.py']
cmd += ['--lane', lane_yaml]
logging.info('Submitting to Spark')
logging.debug(str(cmd))
# Submit
devnull = open(os.devnull, 'w')
outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {}
call(cmd, cwd=dist_dir, env=MY_ENV, **outp)
devnull.close()
def __clean_up(dist_dir):
"""Delete packaged app"""
shutil.rmtree(dist_dir)
|
ksbg/sparklanes | sparklanes/_submit/submit.py | __run_spark_submit | python | def __run_spark_submit(lane_yaml, dist_dir, spark_home, spark_args, silent):
# spark-submit binary
cmd = ['spark-submit' if spark_home is None else os.path.join(spark_home, 'bin/spark-submit')]
# Supplied spark arguments
if spark_args:
cmd += spark_args
# Packaged App & lane
cmd += ['--py-files', 'libs.zip,_framework.zip,tasks.zip', 'main.py']
cmd += ['--lane', lane_yaml]
logging.info('Submitting to Spark')
logging.debug(str(cmd))
# Submit
devnull = open(os.devnull, 'w')
outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {}
call(cmd, cwd=dist_dir, env=MY_ENV, **outp)
devnull.close() | Submits the packaged application to spark using a `spark-submit` subprocess
Parameters
----------
lane_yaml (str): Path to the YAML lane definition file
dist_dir (str): Path to the directory where the packaged code is located
spark_args (str): String of any additional spark config args to be passed when submitting
silent (bool): Flag indicating whether job output should be printed to console | train | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_submit/submit.py#L266-L295 | null | """Module that allows submitting lanes to spark using YAML definitions"""
import argparse
import logging
import os
import re
import shutil
import sys
import tempfile
from subprocess import call, STDOUT
SPARK_SUBMIT_FLAGS = ['verbose', 'supervised']
MY_ENV = os.environ.copy()
def submit_to_spark():
"""Console-script entry point"""
_package_and_submit(sys.argv[1:])
def _package_and_submit(args):
"""
Packages and submits a job, which is defined in a YAML file, to Spark.
Parameters
----------
args (List): Command-line arguments
"""
args = _parse_and_validate_args(args)
logging.debug(args)
dist = __make_tmp_dir()
try:
__package_dependencies(dist_dir=dist, additional_reqs=args['requirements'],
silent=args['silent'])
__package_app(tasks_pkg=args['package'],
dist_dir=dist,
custom_main=args['main'],
extra_data=args['extra_data'])
__run_spark_submit(lane_yaml=args['yaml'],
dist_dir=dist,
spark_home=args['spark_home'],
spark_args=args['spark_args'],
silent=args['silent'])
except Exception as exc:
__clean_up(dist)
raise exc
__clean_up(dist)
def _parse_and_validate_args(args):
"""
Parse and validate arguments. During validation, it is checked whether the given
files/directories exist, while also converting relative paths to absolute ones.
Parameters
----------
args (List): Command-line arguments
"""
class ExtendAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if getattr(namespace, self.dest, None) is None:
setattr(namespace, self.dest, [])
getattr(namespace, self.dest).extend(values)
parser = argparse.ArgumentParser(description='Submitting a lane to spark.')
parser.add_argument('-y', '--yaml', type=str, required=True,
help='Path to the yaml definition file.')
parser.add_argument('-p', '--package', type=str, required=True,
help='Path to the python package containing your tasks.')
parser.add_argument('-r', '--requirements', type=str, required=False,
help='Path to a `requirements.txt` specifying any additional dependencies '
'of your tasks.')
parser.add_argument('-e', '--extra-data', nargs='*', required=False, action=ExtendAction,
help='Path to any additional files or directories that should be packaged '
'and sent to Spark.')
parser.add_argument('-m', '--main', type=str, required=False,
help='Path to a custom main python file')
parser.add_argument('-d', '--spark-home', type=str, required=False,
help='Custom path to the directory containing your Spark installation. If '
'none is given, sparklanes will try to use the `spark-submit` command '
'from your PATH')
parser.add_argument('-s', '--spark-args', nargs='*', required=False,
help='Any additional arguments that should be sent to Spark via '
'spark-submit. '
'(e.g. `--spark-args executor-memory=20G total-executor-cores=100`)')
parser.add_argument('--silent', help='If set, no output will be sent to console',
action='store_true')
args = parser.parse_args(args).__dict__
# Check/fix files/dirs
for param in ('package', 'spark_home'):
args[param] = __validate_and_fix_path(args[param], check_dir=True)
for param in ('yaml', 'requirements', 'main'):
args[param] = __validate_and_fix_path(args[param], check_file=True)
if args['extra_data']:
for i in range(len(args['extra_data'])):
args['extra_data'][i] = __validate_and_fix_path(args['extra_data'][i],
check_file=True, check_dir=True)
# Check if python package
if not os.path.isfile(os.path.join(args['package'], '__init__.py')):
raise SystemExit('Could not confirm `%s` is a python package. Make sure it contains an '
'`__init__.py`.')
# Check/fix spark args
if args['spark_args']:
args['spark_args'] = __validate_and_fix_spark_args(args['spark_args'])
return args
def __validate_and_fix_path(path, check_file=False, check_dir=False):
"""Check if a file/directory exists and converts relative paths to absolute ones"""
# pylint: disable=superfluous-parens
if path is None:
return path
else:
if not (os.path.isfile(path) if check_file else False) \
and not (os.path.isdir(path) if check_dir else False):
raise SystemExit('Path `%s` does not exist' % path)
if not os.path.isabs(path):
path = os.path.abspath(os.path.join(os.path.abspath(os.curdir), path))
return path
def __validate_and_fix_spark_args(spark_args):
"""
Prepares spark arguments. In the command-line script, they are passed as for example
`-s master=local[4] deploy-mode=client verbose`, which would be passed to spark-submit as
`--master local[4] --deploy-mode client --verbose`
Parameters
----------
spark_args (List): List of spark arguments
Returns
-------
fixed_args (List): List of fixed and validated spark arguments
"""
pattern = re.compile(r'[\w\-_]+=.+')
fixed_args = []
for arg in spark_args:
if arg not in SPARK_SUBMIT_FLAGS:
if not pattern.match(arg):
raise SystemExit('Spark argument `%s` does not seem to be in the correct format '
'`ARG_NAME=ARG_VAL`, and is also not recognized to be one of the'
'valid spark-submit flags (%s).' % (arg, str(SPARK_SUBMIT_FLAGS)))
eq_pos = arg.find('=')
fixed_args.append('--' + arg[:eq_pos])
fixed_args.append(arg[eq_pos + 1:])
else:
fixed_args.append('--' + arg)
return fixed_args
def __make_tmp_dir():
"""
Create a temporary directory where the packaged files will be located
Returns
-------
tmp_dir (str): Absolute path to temporary directory
"""
tmp_dir = tempfile.mkdtemp()
logging.debug('Created temporary dir: `%s`', tmp_dir)
return tmp_dir
def __package_dependencies(dist_dir, additional_reqs, silent):
"""
Installs the app's dependencies from pip and packages them (as zip), to be submitted to spark.
Parameters
----------
dist_dir (str): Path to directory where the packaged libs shall be located
additional_reqs (str): Path to a requirements.txt, containing any of the app's additional
requirements
silent (bool): Flag indicating whether pip output should be printed to console
"""
logging.info('Packaging dependencies')
libs_dir = os.path.join(dist_dir, 'libs')
if not os.path.isdir(libs_dir):
os.mkdir(libs_dir)
# Get requirements
req_txt = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'requirements-submit.txt')
with open(req_txt, 'r') as req:
requirements = req.read().splitlines()
if additional_reqs:
with open(additional_reqs, 'r') as req:
for row in req:
requirements.append(row)
# Remove duplicates
requirements = list(set(requirements))
# Install
devnull = open(os.devnull, 'w')
outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {}
for pkg in requirements:
cmd = ['pip', 'install', pkg, '-t', libs_dir]
logging.debug('Calling `%s`', str(cmd))
call(cmd, **outp)
devnull.close()
# Package
shutil.make_archive(libs_dir, 'zip', libs_dir, './')
def __package_app(tasks_pkg, dist_dir, custom_main=None, extra_data=None):
"""
Packages the `tasks_pkg` (as zip) to `dist_dir`. Also copies the 'main' python file to
`dist_dir`, to be submitted to spark. Same for `extra_data`.
Parameters
----------
tasks_pkg (str): Path to the python package containing tasks
dist_dir (str): Path to the directory where the packaged code should be stored
custom_main (str): Path to a custom 'main' python file.
extra_data (List[str]): List containing paths to files/directories that should also be packaged
and submitted to spark
"""
logging.info('Packaging application')
# Package tasks
tasks_dir_splits = os.path.split(os.path.realpath(tasks_pkg))
shutil.make_archive(os.path.join(dist_dir, 'tasks'),
'zip',
tasks_dir_splits[0],
tasks_dir_splits[1])
# Package main.py
if custom_main is None:
from . import _main
main_path = _main.__file__
if main_path[-3:] == 'pyc':
main_path = main_path[:-1]
shutil.copy(os.path.realpath(main_path),
os.path.join(dist_dir, 'main.py'))
else:
shutil.copy(os.path.realpath(custom_main),
os.path.join(dist_dir, 'main.py'))
# Package _framework
shutil.make_archive(os.path.join(dist_dir, '_framework'),
'zip',
os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'),
'./sparklanes/')
# Package extra data
if extra_data:
for dat in extra_data:
real_path = os.path.realpath(dat)
target = os.path.join(dist_dir, os.path.split(real_path)[1])
if os.path.isfile(real_path):
shutil.copy(real_path, target)
elif os.path.isdir(real_path):
shutil.copytree(real_path, target)
else:
raise IOError('File `%s` not found at `%s`.' % (dat, real_path))
def __clean_up(dist_dir):
"""Delete packaged app"""
shutil.rmtree(dist_dir)
|
ksbg/sparklanes | sparklanes/_framework/log.py | make_default_logger | python | def make_default_logger(name=INTERNAL_LOGGER_NAME, level=logging.INFO,
fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s'):
logger = logging.getLogger(name)
logger.setLevel(level)
if not logger.handlers:
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(level)
formatter = logging.Formatter(fmt)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger | Create a logger with the default configuration | train | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/log.py#L8-L20 | null | """Module handling logging. TODO: improve logging. Allow configuration, etc."""
import logging
import sys
from .env import INTERNAL_LOGGER_NAME
|
ksbg/sparklanes | sparklanes/_framework/validation.py | validate_schema | python | def validate_schema(yaml_def, branch=False):
schema = Schema({
'lane' if not branch else 'branch': {
Optional('name'): str,
Optional('run_parallel'): bool,
'tasks': list
}
})
schema.validate(yaml_def)
from schema import And, Use
task_schema = Schema({
'class': str,
Optional('kwargs'): Or({str: object}),
Optional('args'): Or([object], And(Use(lambda a: isinstance(a, dict)), False))
})
def validate_tasks(tasks): # pylint: disable=missing-docstring
for task in tasks:
try:
Schema({'branch': dict}).validate(task)
validate_schema(task, True)
except SchemaError:
task_schema.validate(task)
return True
return validate_tasks(yaml_def['lane']['tasks'] if not branch else yaml_def['branch']['tasks']) | Validates the schema of a dict
Parameters
----------
yaml_def : dict
dict whose schema shall be validated
branch : bool
Indicates whether `yaml_def` is a dict of a top-level lane, or of a branch
inside a lane (needed for recursion)
Returns
-------
bool
True if validation was successful | train | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/validation.py#L10-L52 | [
"def validate_tasks(tasks): # pylint: disable=missing-docstring\n for task in tasks:\n try:\n Schema({'branch': dict}).validate(task)\n validate_schema(task, True)\n except SchemaError:\n task_schema.validate(task)\n\n return True\n"
] | """Contains helper functions, used for class and schema validation."""
import inspect
from schema import Schema, Optional, Or
from six import PY2, PY3
from .errors import TaskInitializationError, SchemaError
def validate_params(cls, mtd_name, *args, **kwargs):
"""Validates if the given args/kwargs match the method signature. Checks if:
- at least all required args/kwargs are given
- no redundant args/kwargs are given
Parameters
----------
cls : Class
mtd_name : str
Name of the method whose parameters shall be validated
args: list
Positional arguments
kwargs : dict
Dict of keyword arguments
"""
mtd = getattr(cls, mtd_name)
py3_mtd_condition = (not (inspect.isfunction(mtd) or inspect.ismethod(mtd))
and hasattr(cls, mtd_name))
py2_mtd_condition = (not inspect.ismethod(mtd)
and not isinstance(cls.__dict__[mtd_name], staticmethod))
if (PY3 and py3_mtd_condition) or (PY2 and py2_mtd_condition):
raise TypeError('Attribute `%s` of class `%s` must be a method. Got type `%s` instead.'
% (mtd_name, cls.__name__, type(mtd)))
req_params, opt_params = arg_spec(cls, mtd_name)
n_params = len(req_params) + len(opt_params)
n_args_kwargs = len(args) + len(kwargs)
for k in kwargs:
if k not in req_params and k not in opt_params:
raise TaskInitializationError('kwarg `%s` is not a parameter of callable `%s`.'
% (k, mtd.__name__))
if n_args_kwargs < len(req_params):
raise TaskInitializationError('Not enough args/kwargs supplied for callable `%s`. '
'Required args: %s' % (mtd.__name__, str(req_params)))
if len(args) > n_params or n_args_kwargs > n_params or len(kwargs) > n_params:
raise TaskInitializationError('Too many args/kwargs supplied for callable `%s`. '
'Required args: %s' % (mtd.__name__, str(req_params)))
redundant_p = [p for p in kwargs if p not in req_params[len(args):] + opt_params]
if redundant_p:
raise TaskInitializationError('Supplied one or more kwargs that in the signature of '
'callable `%s`. Redundant kwargs: %s'
% (mtd.__name__, str(redundant_p)))
needed_kwargs = req_params[len(args):]
if not all([True if p in kwargs else False for p in needed_kwargs]):
raise TaskInitializationError('Not enough args/kwargs supplied for callable `%s`. '
'Required args: %s' % (mtd.__name__, str(req_params)))
def arg_spec(cls, mtd_name):
"""Cross-version argument signature inspection
Parameters
----------
cls : class
mtd_name : str
Name of the method to be inspected
Returns
-------
required_params : list of str
List of required, positional parameters
optional_params : list of str
List of optional parameters, i.e. parameters with a default value
"""
mtd = getattr(cls, mtd_name)
required_params = []
optional_params = []
if hasattr(inspect, 'signature'): # Python 3
params = inspect.signature(mtd).parameters # pylint: disable=no-member
for k in params.keys():
if params[k].default == inspect.Parameter.empty: # pylint: disable=no-member
# Python 3 does not make a difference between unbound methods and functions, so the
# only way to distinguish if the first argument is of a regular method, or a class
# method, is to look for the conventional argument name. Yikes.
if not (params[k].name == 'self' or params[k].name == 'cls'):
required_params.append(k)
else:
optional_params.append(k)
else: # Python 2
params = inspect.getargspec(mtd) # pylint: disable=deprecated-method
num = len(params[0]) if params[0] else 0
n_opt = len(params[3]) if params[3] else 0
n_req = (num - n_opt) if n_opt <= num else 0
for i in range(0, n_req):
required_params.append(params[0][i])
for i in range(n_req, num):
optional_params.append(params[0][i])
if inspect.isroutine(getattr(cls, mtd_name)):
bound_mtd = cls.__dict__[mtd_name]
if not isinstance(bound_mtd, staticmethod):
del required_params[0]
return required_params, optional_params
|
ksbg/sparklanes | sparklanes/_framework/validation.py | validate_params | python | def validate_params(cls, mtd_name, *args, **kwargs):
mtd = getattr(cls, mtd_name)
py3_mtd_condition = (not (inspect.isfunction(mtd) or inspect.ismethod(mtd))
and hasattr(cls, mtd_name))
py2_mtd_condition = (not inspect.ismethod(mtd)
and not isinstance(cls.__dict__[mtd_name], staticmethod))
if (PY3 and py3_mtd_condition) or (PY2 and py2_mtd_condition):
raise TypeError('Attribute `%s` of class `%s` must be a method. Got type `%s` instead.'
% (mtd_name, cls.__name__, type(mtd)))
req_params, opt_params = arg_spec(cls, mtd_name)
n_params = len(req_params) + len(opt_params)
n_args_kwargs = len(args) + len(kwargs)
for k in kwargs:
if k not in req_params and k not in opt_params:
raise TaskInitializationError('kwarg `%s` is not a parameter of callable `%s`.'
% (k, mtd.__name__))
if n_args_kwargs < len(req_params):
raise TaskInitializationError('Not enough args/kwargs supplied for callable `%s`. '
'Required args: %s' % (mtd.__name__, str(req_params)))
if len(args) > n_params or n_args_kwargs > n_params or len(kwargs) > n_params:
raise TaskInitializationError('Too many args/kwargs supplied for callable `%s`. '
'Required args: %s' % (mtd.__name__, str(req_params)))
redundant_p = [p for p in kwargs if p not in req_params[len(args):] + opt_params]
if redundant_p:
raise TaskInitializationError('Supplied one or more kwargs that in the signature of '
'callable `%s`. Redundant kwargs: %s'
% (mtd.__name__, str(redundant_p)))
needed_kwargs = req_params[len(args):]
if not all([True if p in kwargs else False for p in needed_kwargs]):
raise TaskInitializationError('Not enough args/kwargs supplied for callable `%s`. '
'Required args: %s' % (mtd.__name__, str(req_params))) | Validates if the given args/kwargs match the method signature. Checks if:
- at least all required args/kwargs are given
- no redundant args/kwargs are given
Parameters
----------
cls : Class
mtd_name : str
Name of the method whose parameters shall be validated
args: list
Positional arguments
kwargs : dict
Dict of keyword arguments | train | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/validation.py#L55-L105 | null | """Contains helper functions, used for class and schema validation."""
import inspect
from schema import Schema, Optional, Or
from six import PY2, PY3
from .errors import TaskInitializationError, SchemaError
def validate_schema(yaml_def, branch=False):
"""Validates the schema of a dict
Parameters
----------
yaml_def : dict
dict whose schema shall be validated
branch : bool
Indicates whether `yaml_def` is a dict of a top-level lane, or of a branch
inside a lane (needed for recursion)
Returns
-------
bool
True if validation was successful
"""
schema = Schema({
'lane' if not branch else 'branch': {
Optional('name'): str,
Optional('run_parallel'): bool,
'tasks': list
}
})
schema.validate(yaml_def)
from schema import And, Use
task_schema = Schema({
'class': str,
Optional('kwargs'): Or({str: object}),
Optional('args'): Or([object], And(Use(lambda a: isinstance(a, dict)), False))
})
def validate_tasks(tasks): # pylint: disable=missing-docstring
for task in tasks:
try:
Schema({'branch': dict}).validate(task)
validate_schema(task, True)
except SchemaError:
task_schema.validate(task)
return True
return validate_tasks(yaml_def['lane']['tasks'] if not branch else yaml_def['branch']['tasks'])
def arg_spec(cls, mtd_name):
"""Cross-version argument signature inspection
Parameters
----------
cls : class
mtd_name : str
Name of the method to be inspected
Returns
-------
required_params : list of str
List of required, positional parameters
optional_params : list of str
List of optional parameters, i.e. parameters with a default value
"""
mtd = getattr(cls, mtd_name)
required_params = []
optional_params = []
if hasattr(inspect, 'signature'): # Python 3
params = inspect.signature(mtd).parameters # pylint: disable=no-member
for k in params.keys():
if params[k].default == inspect.Parameter.empty: # pylint: disable=no-member
# Python 3 does not make a difference between unbound methods and functions, so the
# only way to distinguish if the first argument is of a regular method, or a class
# method, is to look for the conventional argument name. Yikes.
if not (params[k].name == 'self' or params[k].name == 'cls'):
required_params.append(k)
else:
optional_params.append(k)
else: # Python 2
params = inspect.getargspec(mtd) # pylint: disable=deprecated-method
num = len(params[0]) if params[0] else 0
n_opt = len(params[3]) if params[3] else 0
n_req = (num - n_opt) if n_opt <= num else 0
for i in range(0, n_req):
required_params.append(params[0][i])
for i in range(n_req, num):
optional_params.append(params[0][i])
if inspect.isroutine(getattr(cls, mtd_name)):
bound_mtd = cls.__dict__[mtd_name]
if not isinstance(bound_mtd, staticmethod):
del required_params[0]
return required_params, optional_params
|
ksbg/sparklanes | sparklanes/_framework/validation.py | arg_spec | python | def arg_spec(cls, mtd_name):
mtd = getattr(cls, mtd_name)
required_params = []
optional_params = []
if hasattr(inspect, 'signature'): # Python 3
params = inspect.signature(mtd).parameters # pylint: disable=no-member
for k in params.keys():
if params[k].default == inspect.Parameter.empty: # pylint: disable=no-member
# Python 3 does not make a difference between unbound methods and functions, so the
# only way to distinguish if the first argument is of a regular method, or a class
# method, is to look for the conventional argument name. Yikes.
if not (params[k].name == 'self' or params[k].name == 'cls'):
required_params.append(k)
else:
optional_params.append(k)
else: # Python 2
params = inspect.getargspec(mtd) # pylint: disable=deprecated-method
num = len(params[0]) if params[0] else 0
n_opt = len(params[3]) if params[3] else 0
n_req = (num - n_opt) if n_opt <= num else 0
for i in range(0, n_req):
required_params.append(params[0][i])
for i in range(n_req, num):
optional_params.append(params[0][i])
if inspect.isroutine(getattr(cls, mtd_name)):
bound_mtd = cls.__dict__[mtd_name]
if not isinstance(bound_mtd, staticmethod):
del required_params[0]
return required_params, optional_params | Cross-version argument signature inspection
Parameters
----------
cls : class
mtd_name : str
Name of the method to be inspected
Returns
-------
required_params : list of str
List of required, positional parameters
optional_params : list of str
List of optional parameters, i.e. parameters with a default value | train | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_framework/validation.py#L108-L156 | null | """Contains helper functions, used for class and schema validation."""
import inspect
from schema import Schema, Optional, Or
from six import PY2, PY3
from .errors import TaskInitializationError, SchemaError
def validate_schema(yaml_def, branch=False):
"""Validates the schema of a dict
Parameters
----------
yaml_def : dict
dict whose schema shall be validated
branch : bool
Indicates whether `yaml_def` is a dict of a top-level lane, or of a branch
inside a lane (needed for recursion)
Returns
-------
bool
True if validation was successful
"""
schema = Schema({
'lane' if not branch else 'branch': {
Optional('name'): str,
Optional('run_parallel'): bool,
'tasks': list
}
})
schema.validate(yaml_def)
from schema import And, Use
task_schema = Schema({
'class': str,
Optional('kwargs'): Or({str: object}),
Optional('args'): Or([object], And(Use(lambda a: isinstance(a, dict)), False))
})
def validate_tasks(tasks): # pylint: disable=missing-docstring
for task in tasks:
try:
Schema({'branch': dict}).validate(task)
validate_schema(task, True)
except SchemaError:
task_schema.validate(task)
return True
return validate_tasks(yaml_def['lane']['tasks'] if not branch else yaml_def['branch']['tasks'])
def validate_params(cls, mtd_name, *args, **kwargs):
"""Validates if the given args/kwargs match the method signature. Checks if:
- at least all required args/kwargs are given
- no redundant args/kwargs are given
Parameters
----------
cls : Class
mtd_name : str
Name of the method whose parameters shall be validated
args: list
Positional arguments
kwargs : dict
Dict of keyword arguments
"""
mtd = getattr(cls, mtd_name)
py3_mtd_condition = (not (inspect.isfunction(mtd) or inspect.ismethod(mtd))
and hasattr(cls, mtd_name))
py2_mtd_condition = (not inspect.ismethod(mtd)
and not isinstance(cls.__dict__[mtd_name], staticmethod))
if (PY3 and py3_mtd_condition) or (PY2 and py2_mtd_condition):
raise TypeError('Attribute `%s` of class `%s` must be a method. Got type `%s` instead.'
% (mtd_name, cls.__name__, type(mtd)))
req_params, opt_params = arg_spec(cls, mtd_name)
n_params = len(req_params) + len(opt_params)
n_args_kwargs = len(args) + len(kwargs)
for k in kwargs:
if k not in req_params and k not in opt_params:
raise TaskInitializationError('kwarg `%s` is not a parameter of callable `%s`.'
% (k, mtd.__name__))
if n_args_kwargs < len(req_params):
raise TaskInitializationError('Not enough args/kwargs supplied for callable `%s`. '
'Required args: %s' % (mtd.__name__, str(req_params)))
if len(args) > n_params or n_args_kwargs > n_params or len(kwargs) > n_params:
raise TaskInitializationError('Too many args/kwargs supplied for callable `%s`. '
'Required args: %s' % (mtd.__name__, str(req_params)))
redundant_p = [p for p in kwargs if p not in req_params[len(args):] + opt_params]
if redundant_p:
raise TaskInitializationError('Supplied one or more kwargs that in the signature of '
'callable `%s`. Redundant kwargs: %s'
% (mtd.__name__, str(redundant_p)))
needed_kwargs = req_params[len(args):]
if not all([True if p in kwargs else False for p in needed_kwargs]):
raise TaskInitializationError('Not enough args/kwargs supplied for callable `%s`. '
'Required args: %s' % (mtd.__name__, str(req_params)))
|
sporteasy/python-poeditor | poeditor/client.py | POEditorAPI._run | python | def _run(self, url_path, headers=None, **kwargs):
url = self._construct_url(url_path)
payload = kwargs
payload.update({'api_token': self.api_token})
return self._make_request(url, payload, headers) | Requests API | train | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L129-L138 | [
"def _construct_url(self, path):\n return '{}{}'.format(self.HOST, path)\n",
"def _make_request(self, url, payload, headers=None):\n kwargs = {}\n if payload.get('file'):\n kwargs['files'] = {'file': payload.pop('file')}\n response = requests.post(url, data=payload, headers=headers, **kwargs)\n\n if response.status_code != 200:\n raise POEditorException(\n status='fail',\n error_code=response.status_code,\n message=response.reason\n )\n\n data = response.json()\n\n if 'response' not in data:\n raise POEditorException(\n status='fail',\n error_code=-1,\n message='\"response\" key is not present'\n )\n\n if 'status' in data['response'] and \\\n data['response']['status'] != self.SUCCESS_CODE:\n raise POEditorException(\n error_code=data['response'].get('code'),\n status=data['response']['status'],\n message=data['response'].get('message')\n )\n\n return data\n"
] | class POEditorAPI(object):
"""
Connect your software to POEditor with its simple API
Please refers to https://poeditor.com/docs/api if you have questions
"""
HOST = "https://api.poeditor.com/v2/"
SUCCESS_CODE = "success"
FILE_TYPES = ['po', 'pot', 'mo', 'xls', 'csv', 'resx', 'resw', 'android_strings',
'apple_strings', 'xliff', 'properties', 'key_value_json', 'json',
'xmb', 'xtb']
FILTER_BY = ['translated', 'untranslated', 'fuzzy', 'not_fuzzy',
'automatic', 'not_automatic', 'proofread', 'not_proofread']
UPDATING_TERMS = 'terms'
UPDATING_TERMS_TRANSLATIONS = 'terms_translations'
UPDATING_TRANSLATIONS = 'translations'
# in seconds. Upload: No more than one request every 30 seconds
MIN_UPLOAD_INTERVAL = 30
def __init__(self, api_token):
"""
All requests to the API must contain the parameter api_token.
You'll find it in My Account > API Access in your POEditor account.
"""
self.api_token = api_token
def _construct_url(self, path):
return '{}{}'.format(self.HOST, path)
def _make_request(self, url, payload, headers=None):
kwargs = {}
if payload.get('file'):
kwargs['files'] = {'file': payload.pop('file')}
response = requests.post(url, data=payload, headers=headers, **kwargs)
if response.status_code != 200:
raise POEditorException(
status='fail',
error_code=response.status_code,
message=response.reason
)
data = response.json()
if 'response' not in data:
raise POEditorException(
status='fail',
error_code=-1,
message='"response" key is not present'
)
if 'status' in data['response'] and \
data['response']['status'] != self.SUCCESS_CODE:
raise POEditorException(
error_code=data['response'].get('code'),
status=data['response']['status'],
message=data['response'].get('message')
)
return data
def _apiv1_run(self, action, headers=None, **kwargs):
"""
Kept for backwards compatibility of this client
See "self.clear_reference_language"
"""
warnings.warn(
"POEditor API v1 is deprecated. Use POEditorAPI._run method to call API v2",
DeprecationWarning, stacklevel=2
)
url = "https://poeditor.com/api/"
payload = kwargs
payload.update({'action': action, 'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _project_formatter(self, data):
"""
Project object
"""
open_ = False if not data['open'] or data['open'] == '0' else True
public = False if not data['public'] or data['public'] == '0' else True
output = {
'created': parse_datetime(data['created']),
'id': int(data['id']),
'name': data['name'],
'open': open_,
'public': public,
}
# the detail view returns more info than the list view
# see https://poeditor.com/docs/api#projects_view
for key in ['description', 'reference_language', 'terms']:
if key in data:
output[key] = data[key]
return output
def list_projects(self):
"""
Returns the list of projects owned by user.
"""
data = self._run(
url_path="projects/list"
)
projects = data['result'].get('projects', [])
return [self._project_formatter(item) for item in projects]
def create_project(self, name, description=None):
"""
creates a new project. Returns the id of the project (if successful)
"""
description = description or ''
data = self._run(
url_path="projects/add",
name=name,
description=description
)
return data['result']['project']['id']
def update_project(self, project_id, name=None, description=None,
reference_language=None):
"""
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
"""
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id']
def delete_project(self, project_id):
"""
Deletes the project from the account.
You must be the owner of the project.
"""
self._run(
url_path="projects/delete",
id=project_id,
)
return True
def view_project_details(self, project_id):
"""
Returns project's details.
"""
data = self._run(
url_path="projects/view",
id=project_id
)
return self._project_formatter(data['result']['project'])
def list_project_languages(self, project_id):
"""
Returns project languages, percentage of translation done for each and the
datetime (UTC - ISO 8601) when the last change was made.
"""
data = self._run(
url_path="languages/list",
id=project_id
)
return data['result'].get('languages', [])
def add_language_to_project(self, project_id, language_code):
"""
Adds a new language to project
"""
self._run(
url_path="languages/add",
id=project_id,
language=language_code
)
return True
def delete_language_from_project(self, project_id, language_code):
"""
Deletes existing language from project
"""
self._run(
url_path="languages/delete",
id=project_id,
language=language_code
)
return True
def set_reference_language(self, project_id, language_code):
"""
Sets a reference language to project
"""
return self.update_project(project_id, reference_language=language_code)
def clear_reference_language(self, project_id):
"""
Clears reference language from project
Could not find how to reproduce the "clear_reference_language" v1 action with the v2 API.
Calling v2 projects/update with reference_language='' or reference_language=None did not work.
https://poeditor.com/docs/api#projects_update
"""
self._apiv1_run(
action="clear_reference_language",
id=project_id
)
return True
def view_project_terms(self, project_id, language_code=None):
"""
Returns project's terms and translations if the argument language is provided.
"""
data = self._run(
url_path="terms/list",
id=project_id,
language=language_code
)
return data['result'].get('terms', [])
def add_terms(self, project_id, data):
"""
Adds terms to project.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="terms/add",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_terms(self, project_id, data, fuzzy_trigger=None):
"""
Updates project terms. Lets you change the text, context, reference, plural and tags.
>>> data = [
{
"term": "Add new list",
"context": "",
"new_term": "Save list",
"new_context": "",
"reference": "\/projects",
"plural": "",
"comment": "",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Display list",
"context": "",
"new_term": "Show list",
"new_context": ""
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="terms/update",
id=project_id,
data=json.dumps(data),
**kwargs
)
return data['result']['terms']
def delete_terms(self, project_id, data):
"""
Deletes terms from project.
>>> data = [
{
"term": "one project found",
"context": ""
},
{
"term": "Show all projects",
"context": "form"
}
]
"""
data = self._run(
url_path="terms/delete",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def add_comment(self, project_id, data):
"""
Adds comments to existing terms.
>>> data = [
{
"term": "Add new list",
"context": "",
"comment": "This is a button"
},
{
"term": "one project found",
"context": "",
"comment": "Make sure you translate the plural forms"
},
{
"term": "Show all projects",
"context": "",
"comment": "This is a button"
}
]
"""
data = self._run(
url_path="terms/add_comment",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def sync_terms(self, project_id, data):
"""
Syncs your project with the array you send (terms that are not found
in the dict object will be deleted from project and the new ones
added).
Please use with caution. If wrong data is sent, existing terms and their
translations might be irreversibly lost.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="projects/sync",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_project_language(self, project_id, language_code, data, fuzzy_trigger=None):
"""
Inserts / overwrites translations.
>>> data = [
{
"term": "Projects",
"context": "project list",
"translation": {
"content": "Des projets",
"fuzzy": 0
}
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="languages/update",
id=project_id,
language=language_code,
data=json.dumps(data),
**kwargs
)
return data['result']['translations']
def export(self, project_id, language_code, file_type='po', filters=None,
tags=None, local_file=None):
"""
Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"]
"""
if file_type not in self.FILE_TYPES:
raise POEditorArgsException(
'content_type: file format {}'.format(self.FILE_TYPES))
if filters and isinstance(filters, str) and filters not in self.FILTER_BY:
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
elif filters and set(filters).difference(set(self.FILTER_BY)):
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
data = self._run(
url_path="projects/export",
id=project_id,
language=language_code,
type=file_type,
filters=filters,
tags=tags
)
# The link of the file (expires after 10 minutes).
file_url = data['result']['url']
# Download file content:
res = requests.get(file_url, stream=True)
if not local_file:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.{}'.format(file_type))
tmp_file.close()
local_file = tmp_file.name
with open(local_file, 'w+b') as po_file:
for data in res.iter_content(chunk_size=1024):
po_file.write(data)
return file_url, local_file
def _upload(self, project_id, updating, file_path, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
"""
options = [
self.UPDATING_TERMS,
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if updating not in options:
raise POEditorArgsException(
'Updating arg must be in {}'.format(options)
)
options = [
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if language_code is None and updating in options:
raise POEditorArgsException(
'Language code is required only if updating is '
'terms_translations or translations)'
)
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(
url_path="projects/upload",
id=project_id,
language=language_code,
file=local_file,
updating=updating,
tags=tags,
sync_terms=sync_terms,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
return data['result']
def update_terms(self, project_id, file_path=None, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_terms_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_terms_translations",
DeprecationWarning, stacklevel=2
)
return self.update_terms_translations(
project_id,
file_path,
language_code,
overwrite,
sync_terms,
tags,
fuzzy_trigger
)
def update_terms_translations(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_translations",
DeprecationWarning, stacklevel=2
)
return self.update_translations(
project_id,
file_path,
language_code,
overwrite,
fuzzy_trigger
)
def update_translations(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
"""
Updates translations
overwrite: set it to True if you want to overwrite definitions
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
def available_languages(self):
"""
Returns a comprehensive list of all languages supported by POEditor.
You can find it here (https://poeditor.com/docs/languages), too.
"""
data = self._run(
url_path="languages/available"
)
return data['result'].get('languages', [])
def list_contributors(self, project_id=None, language_code=None):
"""
Returns the list of contributors
"""
data = self._run(
url_path="contributors/list",
id=project_id,
language=language_code
)
return data['result'].get('contributors', [])
def add_contributor(self, project_id, name, email, language_code):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
language=language_code
)
return True
def add_administrator(self, project_id, name, email):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
admin=True
)
return True
def remove_contributor(self, project_id, email, language):
"""
Removes a contributor
"""
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True
|
sporteasy/python-poeditor | poeditor/client.py | POEditorAPI._apiv1_run | python | def _apiv1_run(self, action, headers=None, **kwargs):
warnings.warn(
"POEditor API v1 is deprecated. Use POEditorAPI._run method to call API v2",
DeprecationWarning, stacklevel=2
)
url = "https://poeditor.com/api/"
payload = kwargs
payload.update({'action': action, 'api_token': self.api_token})
return self._make_request(url, payload, headers) | Kept for backwards compatibility of this client
See "self.clear_reference_language" | train | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L140-L154 | [
"def _make_request(self, url, payload, headers=None):\n kwargs = {}\n if payload.get('file'):\n kwargs['files'] = {'file': payload.pop('file')}\n response = requests.post(url, data=payload, headers=headers, **kwargs)\n\n if response.status_code != 200:\n raise POEditorException(\n status='fail',\n error_code=response.status_code,\n message=response.reason\n )\n\n data = response.json()\n\n if 'response' not in data:\n raise POEditorException(\n status='fail',\n error_code=-1,\n message='\"response\" key is not present'\n )\n\n if 'status' in data['response'] and \\\n data['response']['status'] != self.SUCCESS_CODE:\n raise POEditorException(\n error_code=data['response'].get('code'),\n status=data['response']['status'],\n message=data['response'].get('message')\n )\n\n return data\n"
] | class POEditorAPI(object):
"""
Connect your software to POEditor with its simple API
Please refers to https://poeditor.com/docs/api if you have questions
"""
HOST = "https://api.poeditor.com/v2/"
SUCCESS_CODE = "success"
FILE_TYPES = ['po', 'pot', 'mo', 'xls', 'csv', 'resx', 'resw', 'android_strings',
'apple_strings', 'xliff', 'properties', 'key_value_json', 'json',
'xmb', 'xtb']
FILTER_BY = ['translated', 'untranslated', 'fuzzy', 'not_fuzzy',
'automatic', 'not_automatic', 'proofread', 'not_proofread']
UPDATING_TERMS = 'terms'
UPDATING_TERMS_TRANSLATIONS = 'terms_translations'
UPDATING_TRANSLATIONS = 'translations'
# in seconds. Upload: No more than one request every 30 seconds
MIN_UPLOAD_INTERVAL = 30
def __init__(self, api_token):
"""
All requests to the API must contain the parameter api_token.
You'll find it in My Account > API Access in your POEditor account.
"""
self.api_token = api_token
def _construct_url(self, path):
return '{}{}'.format(self.HOST, path)
def _make_request(self, url, payload, headers=None):
kwargs = {}
if payload.get('file'):
kwargs['files'] = {'file': payload.pop('file')}
response = requests.post(url, data=payload, headers=headers, **kwargs)
if response.status_code != 200:
raise POEditorException(
status='fail',
error_code=response.status_code,
message=response.reason
)
data = response.json()
if 'response' not in data:
raise POEditorException(
status='fail',
error_code=-1,
message='"response" key is not present'
)
if 'status' in data['response'] and \
data['response']['status'] != self.SUCCESS_CODE:
raise POEditorException(
error_code=data['response'].get('code'),
status=data['response']['status'],
message=data['response'].get('message')
)
return data
def _run(self, url_path, headers=None, **kwargs):
"""
Requests API
"""
url = self._construct_url(url_path)
payload = kwargs
payload.update({'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _project_formatter(self, data):
"""
Project object
"""
open_ = False if not data['open'] or data['open'] == '0' else True
public = False if not data['public'] or data['public'] == '0' else True
output = {
'created': parse_datetime(data['created']),
'id': int(data['id']),
'name': data['name'],
'open': open_,
'public': public,
}
# the detail view returns more info than the list view
# see https://poeditor.com/docs/api#projects_view
for key in ['description', 'reference_language', 'terms']:
if key in data:
output[key] = data[key]
return output
def list_projects(self):
"""
Returns the list of projects owned by user.
"""
data = self._run(
url_path="projects/list"
)
projects = data['result'].get('projects', [])
return [self._project_formatter(item) for item in projects]
def create_project(self, name, description=None):
"""
creates a new project. Returns the id of the project (if successful)
"""
description = description or ''
data = self._run(
url_path="projects/add",
name=name,
description=description
)
return data['result']['project']['id']
def update_project(self, project_id, name=None, description=None,
reference_language=None):
"""
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
"""
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id']
def delete_project(self, project_id):
"""
Deletes the project from the account.
You must be the owner of the project.
"""
self._run(
url_path="projects/delete",
id=project_id,
)
return True
def view_project_details(self, project_id):
"""
Returns project's details.
"""
data = self._run(
url_path="projects/view",
id=project_id
)
return self._project_formatter(data['result']['project'])
def list_project_languages(self, project_id):
"""
Returns project languages, percentage of translation done for each and the
datetime (UTC - ISO 8601) when the last change was made.
"""
data = self._run(
url_path="languages/list",
id=project_id
)
return data['result'].get('languages', [])
def add_language_to_project(self, project_id, language_code):
"""
Adds a new language to project
"""
self._run(
url_path="languages/add",
id=project_id,
language=language_code
)
return True
def delete_language_from_project(self, project_id, language_code):
"""
Deletes existing language from project
"""
self._run(
url_path="languages/delete",
id=project_id,
language=language_code
)
return True
def set_reference_language(self, project_id, language_code):
"""
Sets a reference language to project
"""
return self.update_project(project_id, reference_language=language_code)
def clear_reference_language(self, project_id):
"""
Clears reference language from project
Could not find how to reproduce the "clear_reference_language" v1 action with the v2 API.
Calling v2 projects/update with reference_language='' or reference_language=None did not work.
https://poeditor.com/docs/api#projects_update
"""
self._apiv1_run(
action="clear_reference_language",
id=project_id
)
return True
def view_project_terms(self, project_id, language_code=None):
"""
Returns project's terms and translations if the argument language is provided.
"""
data = self._run(
url_path="terms/list",
id=project_id,
language=language_code
)
return data['result'].get('terms', [])
def add_terms(self, project_id, data):
"""
Adds terms to project.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="terms/add",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_terms(self, project_id, data, fuzzy_trigger=None):
"""
Updates project terms. Lets you change the text, context, reference, plural and tags.
>>> data = [
{
"term": "Add new list",
"context": "",
"new_term": "Save list",
"new_context": "",
"reference": "\/projects",
"plural": "",
"comment": "",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Display list",
"context": "",
"new_term": "Show list",
"new_context": ""
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="terms/update",
id=project_id,
data=json.dumps(data),
**kwargs
)
return data['result']['terms']
def delete_terms(self, project_id, data):
"""
Deletes terms from project.
>>> data = [
{
"term": "one project found",
"context": ""
},
{
"term": "Show all projects",
"context": "form"
}
]
"""
data = self._run(
url_path="terms/delete",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def add_comment(self, project_id, data):
"""
Adds comments to existing terms.
>>> data = [
{
"term": "Add new list",
"context": "",
"comment": "This is a button"
},
{
"term": "one project found",
"context": "",
"comment": "Make sure you translate the plural forms"
},
{
"term": "Show all projects",
"context": "",
"comment": "This is a button"
}
]
"""
data = self._run(
url_path="terms/add_comment",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def sync_terms(self, project_id, data):
"""
Syncs your project with the array you send (terms that are not found
in the dict object will be deleted from project and the new ones
added).
Please use with caution. If wrong data is sent, existing terms and their
translations might be irreversibly lost.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="projects/sync",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_project_language(self, project_id, language_code, data, fuzzy_trigger=None):
"""
Inserts / overwrites translations.
>>> data = [
{
"term": "Projects",
"context": "project list",
"translation": {
"content": "Des projets",
"fuzzy": 0
}
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="languages/update",
id=project_id,
language=language_code,
data=json.dumps(data),
**kwargs
)
return data['result']['translations']
def export(self, project_id, language_code, file_type='po', filters=None,
tags=None, local_file=None):
"""
Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"]
"""
if file_type not in self.FILE_TYPES:
raise POEditorArgsException(
'content_type: file format {}'.format(self.FILE_TYPES))
if filters and isinstance(filters, str) and filters not in self.FILTER_BY:
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
elif filters and set(filters).difference(set(self.FILTER_BY)):
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
data = self._run(
url_path="projects/export",
id=project_id,
language=language_code,
type=file_type,
filters=filters,
tags=tags
)
# The link of the file (expires after 10 minutes).
file_url = data['result']['url']
# Download file content:
res = requests.get(file_url, stream=True)
if not local_file:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.{}'.format(file_type))
tmp_file.close()
local_file = tmp_file.name
with open(local_file, 'w+b') as po_file:
for data in res.iter_content(chunk_size=1024):
po_file.write(data)
return file_url, local_file
def _upload(self, project_id, updating, file_path, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
"""
options = [
self.UPDATING_TERMS,
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if updating not in options:
raise POEditorArgsException(
'Updating arg must be in {}'.format(options)
)
options = [
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if language_code is None and updating in options:
raise POEditorArgsException(
'Language code is required only if updating is '
'terms_translations or translations)'
)
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(
url_path="projects/upload",
id=project_id,
language=language_code,
file=local_file,
updating=updating,
tags=tags,
sync_terms=sync_terms,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
return data['result']
def update_terms(self, project_id, file_path=None, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_terms_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_terms_translations",
DeprecationWarning, stacklevel=2
)
return self.update_terms_translations(
project_id,
file_path,
language_code,
overwrite,
sync_terms,
tags,
fuzzy_trigger
)
def update_terms_translations(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_translations",
DeprecationWarning, stacklevel=2
)
return self.update_translations(
project_id,
file_path,
language_code,
overwrite,
fuzzy_trigger
)
def update_translations(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
"""
Updates translations
overwrite: set it to True if you want to overwrite definitions
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
def available_languages(self):
"""
Returns a comprehensive list of all languages supported by POEditor.
You can find it here (https://poeditor.com/docs/languages), too.
"""
data = self._run(
url_path="languages/available"
)
return data['result'].get('languages', [])
def list_contributors(self, project_id=None, language_code=None):
"""
Returns the list of contributors
"""
data = self._run(
url_path="contributors/list",
id=project_id,
language=language_code
)
return data['result'].get('contributors', [])
def add_contributor(self, project_id, name, email, language_code):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
language=language_code
)
return True
def add_administrator(self, project_id, name, email):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
admin=True
)
return True
def remove_contributor(self, project_id, email, language):
"""
Removes a contributor
"""
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True
|
sporteasy/python-poeditor | poeditor/client.py | POEditorAPI._project_formatter | python | def _project_formatter(self, data):
open_ = False if not data['open'] or data['open'] == '0' else True
public = False if not data['public'] or data['public'] == '0' else True
output = {
'created': parse_datetime(data['created']),
'id': int(data['id']),
'name': data['name'],
'open': open_,
'public': public,
}
# the detail view returns more info than the list view
# see https://poeditor.com/docs/api#projects_view
for key in ['description', 'reference_language', 'terms']:
if key in data:
output[key] = data[key]
return output | Project object | train | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L156-L176 | [
"def parse_datetime(dt_string):\n return datetime.strptime(dt_string, '%Y-%m-%dT%H:%M:%S%z')\n"
] | class POEditorAPI(object):
"""
Connect your software to POEditor with its simple API
Please refers to https://poeditor.com/docs/api if you have questions
"""
HOST = "https://api.poeditor.com/v2/"
SUCCESS_CODE = "success"
FILE_TYPES = ['po', 'pot', 'mo', 'xls', 'csv', 'resx', 'resw', 'android_strings',
'apple_strings', 'xliff', 'properties', 'key_value_json', 'json',
'xmb', 'xtb']
FILTER_BY = ['translated', 'untranslated', 'fuzzy', 'not_fuzzy',
'automatic', 'not_automatic', 'proofread', 'not_proofread']
UPDATING_TERMS = 'terms'
UPDATING_TERMS_TRANSLATIONS = 'terms_translations'
UPDATING_TRANSLATIONS = 'translations'
# in seconds. Upload: No more than one request every 30 seconds
MIN_UPLOAD_INTERVAL = 30
def __init__(self, api_token):
"""
All requests to the API must contain the parameter api_token.
You'll find it in My Account > API Access in your POEditor account.
"""
self.api_token = api_token
def _construct_url(self, path):
return '{}{}'.format(self.HOST, path)
def _make_request(self, url, payload, headers=None):
kwargs = {}
if payload.get('file'):
kwargs['files'] = {'file': payload.pop('file')}
response = requests.post(url, data=payload, headers=headers, **kwargs)
if response.status_code != 200:
raise POEditorException(
status='fail',
error_code=response.status_code,
message=response.reason
)
data = response.json()
if 'response' not in data:
raise POEditorException(
status='fail',
error_code=-1,
message='"response" key is not present'
)
if 'status' in data['response'] and \
data['response']['status'] != self.SUCCESS_CODE:
raise POEditorException(
error_code=data['response'].get('code'),
status=data['response']['status'],
message=data['response'].get('message')
)
return data
def _run(self, url_path, headers=None, **kwargs):
"""
Requests API
"""
url = self._construct_url(url_path)
payload = kwargs
payload.update({'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _apiv1_run(self, action, headers=None, **kwargs):
"""
Kept for backwards compatibility of this client
See "self.clear_reference_language"
"""
warnings.warn(
"POEditor API v1 is deprecated. Use POEditorAPI._run method to call API v2",
DeprecationWarning, stacklevel=2
)
url = "https://poeditor.com/api/"
payload = kwargs
payload.update({'action': action, 'api_token': self.api_token})
return self._make_request(url, payload, headers)
def list_projects(self):
"""
Returns the list of projects owned by user.
"""
data = self._run(
url_path="projects/list"
)
projects = data['result'].get('projects', [])
return [self._project_formatter(item) for item in projects]
def create_project(self, name, description=None):
"""
creates a new project. Returns the id of the project (if successful)
"""
description = description or ''
data = self._run(
url_path="projects/add",
name=name,
description=description
)
return data['result']['project']['id']
def update_project(self, project_id, name=None, description=None,
reference_language=None):
"""
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
"""
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id']
def delete_project(self, project_id):
"""
Deletes the project from the account.
You must be the owner of the project.
"""
self._run(
url_path="projects/delete",
id=project_id,
)
return True
def view_project_details(self, project_id):
"""
Returns project's details.
"""
data = self._run(
url_path="projects/view",
id=project_id
)
return self._project_formatter(data['result']['project'])
def list_project_languages(self, project_id):
"""
Returns project languages, percentage of translation done for each and the
datetime (UTC - ISO 8601) when the last change was made.
"""
data = self._run(
url_path="languages/list",
id=project_id
)
return data['result'].get('languages', [])
def add_language_to_project(self, project_id, language_code):
"""
Adds a new language to project
"""
self._run(
url_path="languages/add",
id=project_id,
language=language_code
)
return True
def delete_language_from_project(self, project_id, language_code):
"""
Deletes existing language from project
"""
self._run(
url_path="languages/delete",
id=project_id,
language=language_code
)
return True
def set_reference_language(self, project_id, language_code):
"""
Sets a reference language to project
"""
return self.update_project(project_id, reference_language=language_code)
def clear_reference_language(self, project_id):
"""
Clears reference language from project
Could not find how to reproduce the "clear_reference_language" v1 action with the v2 API.
Calling v2 projects/update with reference_language='' or reference_language=None did not work.
https://poeditor.com/docs/api#projects_update
"""
self._apiv1_run(
action="clear_reference_language",
id=project_id
)
return True
def view_project_terms(self, project_id, language_code=None):
"""
Returns project's terms and translations if the argument language is provided.
"""
data = self._run(
url_path="terms/list",
id=project_id,
language=language_code
)
return data['result'].get('terms', [])
def add_terms(self, project_id, data):
"""
Adds terms to project.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="terms/add",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_terms(self, project_id, data, fuzzy_trigger=None):
"""
Updates project terms. Lets you change the text, context, reference, plural and tags.
>>> data = [
{
"term": "Add new list",
"context": "",
"new_term": "Save list",
"new_context": "",
"reference": "\/projects",
"plural": "",
"comment": "",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Display list",
"context": "",
"new_term": "Show list",
"new_context": ""
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="terms/update",
id=project_id,
data=json.dumps(data),
**kwargs
)
return data['result']['terms']
def delete_terms(self, project_id, data):
"""
Deletes terms from project.
>>> data = [
{
"term": "one project found",
"context": ""
},
{
"term": "Show all projects",
"context": "form"
}
]
"""
data = self._run(
url_path="terms/delete",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def add_comment(self, project_id, data):
"""
Adds comments to existing terms.
>>> data = [
{
"term": "Add new list",
"context": "",
"comment": "This is a button"
},
{
"term": "one project found",
"context": "",
"comment": "Make sure you translate the plural forms"
},
{
"term": "Show all projects",
"context": "",
"comment": "This is a button"
}
]
"""
data = self._run(
url_path="terms/add_comment",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def sync_terms(self, project_id, data):
"""
Syncs your project with the array you send (terms that are not found
in the dict object will be deleted from project and the new ones
added).
Please use with caution. If wrong data is sent, existing terms and their
translations might be irreversibly lost.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="projects/sync",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_project_language(self, project_id, language_code, data, fuzzy_trigger=None):
"""
Inserts / overwrites translations.
>>> data = [
{
"term": "Projects",
"context": "project list",
"translation": {
"content": "Des projets",
"fuzzy": 0
}
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="languages/update",
id=project_id,
language=language_code,
data=json.dumps(data),
**kwargs
)
return data['result']['translations']
def export(self, project_id, language_code, file_type='po', filters=None,
tags=None, local_file=None):
"""
Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"]
"""
if file_type not in self.FILE_TYPES:
raise POEditorArgsException(
'content_type: file format {}'.format(self.FILE_TYPES))
if filters and isinstance(filters, str) and filters not in self.FILTER_BY:
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
elif filters and set(filters).difference(set(self.FILTER_BY)):
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
data = self._run(
url_path="projects/export",
id=project_id,
language=language_code,
type=file_type,
filters=filters,
tags=tags
)
# The link of the file (expires after 10 minutes).
file_url = data['result']['url']
# Download file content:
res = requests.get(file_url, stream=True)
if not local_file:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.{}'.format(file_type))
tmp_file.close()
local_file = tmp_file.name
with open(local_file, 'w+b') as po_file:
for data in res.iter_content(chunk_size=1024):
po_file.write(data)
return file_url, local_file
def _upload(self, project_id, updating, file_path, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
"""
options = [
self.UPDATING_TERMS,
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if updating not in options:
raise POEditorArgsException(
'Updating arg must be in {}'.format(options)
)
options = [
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if language_code is None and updating in options:
raise POEditorArgsException(
'Language code is required only if updating is '
'terms_translations or translations)'
)
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(
url_path="projects/upload",
id=project_id,
language=language_code,
file=local_file,
updating=updating,
tags=tags,
sync_terms=sync_terms,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
return data['result']
def update_terms(self, project_id, file_path=None, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_terms_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_terms_translations",
DeprecationWarning, stacklevel=2
)
return self.update_terms_translations(
project_id,
file_path,
language_code,
overwrite,
sync_terms,
tags,
fuzzy_trigger
)
def update_terms_translations(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_translations",
DeprecationWarning, stacklevel=2
)
return self.update_translations(
project_id,
file_path,
language_code,
overwrite,
fuzzy_trigger
)
def update_translations(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
"""
Updates translations
overwrite: set it to True if you want to overwrite definitions
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
def available_languages(self):
"""
Returns a comprehensive list of all languages supported by POEditor.
You can find it here (https://poeditor.com/docs/languages), too.
"""
data = self._run(
url_path="languages/available"
)
return data['result'].get('languages', [])
def list_contributors(self, project_id=None, language_code=None):
"""
Returns the list of contributors
"""
data = self._run(
url_path="contributors/list",
id=project_id,
language=language_code
)
return data['result'].get('contributors', [])
def add_contributor(self, project_id, name, email, language_code):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
language=language_code
)
return True
def add_administrator(self, project_id, name, email):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
admin=True
)
return True
def remove_contributor(self, project_id, email, language):
"""
Removes a contributor
"""
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True
|
sporteasy/python-poeditor | poeditor/client.py | POEditorAPI.list_projects | python | def list_projects(self):
data = self._run(
url_path="projects/list"
)
projects = data['result'].get('projects', [])
return [self._project_formatter(item) for item in projects] | Returns the list of projects owned by user. | train | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L178-L186 | [
"def _run(self, url_path, headers=None, **kwargs):\n \"\"\"\n Requests API\n \"\"\"\n url = self._construct_url(url_path)\n\n payload = kwargs\n payload.update({'api_token': self.api_token})\n\n return self._make_request(url, payload, headers)\n"
] | class POEditorAPI(object):
"""
Connect your software to POEditor with its simple API
Please refers to https://poeditor.com/docs/api if you have questions
"""
HOST = "https://api.poeditor.com/v2/"
SUCCESS_CODE = "success"
FILE_TYPES = ['po', 'pot', 'mo', 'xls', 'csv', 'resx', 'resw', 'android_strings',
'apple_strings', 'xliff', 'properties', 'key_value_json', 'json',
'xmb', 'xtb']
FILTER_BY = ['translated', 'untranslated', 'fuzzy', 'not_fuzzy',
'automatic', 'not_automatic', 'proofread', 'not_proofread']
UPDATING_TERMS = 'terms'
UPDATING_TERMS_TRANSLATIONS = 'terms_translations'
UPDATING_TRANSLATIONS = 'translations'
# in seconds. Upload: No more than one request every 30 seconds
MIN_UPLOAD_INTERVAL = 30
def __init__(self, api_token):
"""
All requests to the API must contain the parameter api_token.
You'll find it in My Account > API Access in your POEditor account.
"""
self.api_token = api_token
def _construct_url(self, path):
return '{}{}'.format(self.HOST, path)
def _make_request(self, url, payload, headers=None):
kwargs = {}
if payload.get('file'):
kwargs['files'] = {'file': payload.pop('file')}
response = requests.post(url, data=payload, headers=headers, **kwargs)
if response.status_code != 200:
raise POEditorException(
status='fail',
error_code=response.status_code,
message=response.reason
)
data = response.json()
if 'response' not in data:
raise POEditorException(
status='fail',
error_code=-1,
message='"response" key is not present'
)
if 'status' in data['response'] and \
data['response']['status'] != self.SUCCESS_CODE:
raise POEditorException(
error_code=data['response'].get('code'),
status=data['response']['status'],
message=data['response'].get('message')
)
return data
def _run(self, url_path, headers=None, **kwargs):
"""
Requests API
"""
url = self._construct_url(url_path)
payload = kwargs
payload.update({'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _apiv1_run(self, action, headers=None, **kwargs):
"""
Kept for backwards compatibility of this client
See "self.clear_reference_language"
"""
warnings.warn(
"POEditor API v1 is deprecated. Use POEditorAPI._run method to call API v2",
DeprecationWarning, stacklevel=2
)
url = "https://poeditor.com/api/"
payload = kwargs
payload.update({'action': action, 'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _project_formatter(self, data):
"""
Project object
"""
open_ = False if not data['open'] or data['open'] == '0' else True
public = False if not data['public'] or data['public'] == '0' else True
output = {
'created': parse_datetime(data['created']),
'id': int(data['id']),
'name': data['name'],
'open': open_,
'public': public,
}
# the detail view returns more info than the list view
# see https://poeditor.com/docs/api#projects_view
for key in ['description', 'reference_language', 'terms']:
if key in data:
output[key] = data[key]
return output
def create_project(self, name, description=None):
"""
creates a new project. Returns the id of the project (if successful)
"""
description = description or ''
data = self._run(
url_path="projects/add",
name=name,
description=description
)
return data['result']['project']['id']
def update_project(self, project_id, name=None, description=None,
reference_language=None):
"""
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
"""
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id']
def delete_project(self, project_id):
"""
Deletes the project from the account.
You must be the owner of the project.
"""
self._run(
url_path="projects/delete",
id=project_id,
)
return True
def view_project_details(self, project_id):
"""
Returns project's details.
"""
data = self._run(
url_path="projects/view",
id=project_id
)
return self._project_formatter(data['result']['project'])
def list_project_languages(self, project_id):
"""
Returns project languages, percentage of translation done for each and the
datetime (UTC - ISO 8601) when the last change was made.
"""
data = self._run(
url_path="languages/list",
id=project_id
)
return data['result'].get('languages', [])
def add_language_to_project(self, project_id, language_code):
"""
Adds a new language to project
"""
self._run(
url_path="languages/add",
id=project_id,
language=language_code
)
return True
def delete_language_from_project(self, project_id, language_code):
"""
Deletes existing language from project
"""
self._run(
url_path="languages/delete",
id=project_id,
language=language_code
)
return True
def set_reference_language(self, project_id, language_code):
"""
Sets a reference language to project
"""
return self.update_project(project_id, reference_language=language_code)
def clear_reference_language(self, project_id):
"""
Clears reference language from project
Could not find how to reproduce the "clear_reference_language" v1 action with the v2 API.
Calling v2 projects/update with reference_language='' or reference_language=None did not work.
https://poeditor.com/docs/api#projects_update
"""
self._apiv1_run(
action="clear_reference_language",
id=project_id
)
return True
def view_project_terms(self, project_id, language_code=None):
"""
Returns project's terms and translations if the argument language is provided.
"""
data = self._run(
url_path="terms/list",
id=project_id,
language=language_code
)
return data['result'].get('terms', [])
def add_terms(self, project_id, data):
"""
Adds terms to project.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="terms/add",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_terms(self, project_id, data, fuzzy_trigger=None):
"""
Updates project terms. Lets you change the text, context, reference, plural and tags.
>>> data = [
{
"term": "Add new list",
"context": "",
"new_term": "Save list",
"new_context": "",
"reference": "\/projects",
"plural": "",
"comment": "",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Display list",
"context": "",
"new_term": "Show list",
"new_context": ""
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="terms/update",
id=project_id,
data=json.dumps(data),
**kwargs
)
return data['result']['terms']
def delete_terms(self, project_id, data):
"""
Deletes terms from project.
>>> data = [
{
"term": "one project found",
"context": ""
},
{
"term": "Show all projects",
"context": "form"
}
]
"""
data = self._run(
url_path="terms/delete",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def add_comment(self, project_id, data):
"""
Adds comments to existing terms.
>>> data = [
{
"term": "Add new list",
"context": "",
"comment": "This is a button"
},
{
"term": "one project found",
"context": "",
"comment": "Make sure you translate the plural forms"
},
{
"term": "Show all projects",
"context": "",
"comment": "This is a button"
}
]
"""
data = self._run(
url_path="terms/add_comment",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def sync_terms(self, project_id, data):
"""
Syncs your project with the array you send (terms that are not found
in the dict object will be deleted from project and the new ones
added).
Please use with caution. If wrong data is sent, existing terms and their
translations might be irreversibly lost.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="projects/sync",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_project_language(self, project_id, language_code, data, fuzzy_trigger=None):
"""
Inserts / overwrites translations.
>>> data = [
{
"term": "Projects",
"context": "project list",
"translation": {
"content": "Des projets",
"fuzzy": 0
}
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="languages/update",
id=project_id,
language=language_code,
data=json.dumps(data),
**kwargs
)
return data['result']['translations']
def export(self, project_id, language_code, file_type='po', filters=None,
tags=None, local_file=None):
"""
Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"]
"""
if file_type not in self.FILE_TYPES:
raise POEditorArgsException(
'content_type: file format {}'.format(self.FILE_TYPES))
if filters and isinstance(filters, str) and filters not in self.FILTER_BY:
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
elif filters and set(filters).difference(set(self.FILTER_BY)):
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
data = self._run(
url_path="projects/export",
id=project_id,
language=language_code,
type=file_type,
filters=filters,
tags=tags
)
# The link of the file (expires after 10 minutes).
file_url = data['result']['url']
# Download file content:
res = requests.get(file_url, stream=True)
if not local_file:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.{}'.format(file_type))
tmp_file.close()
local_file = tmp_file.name
with open(local_file, 'w+b') as po_file:
for data in res.iter_content(chunk_size=1024):
po_file.write(data)
return file_url, local_file
def _upload(self, project_id, updating, file_path, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
"""
options = [
self.UPDATING_TERMS,
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if updating not in options:
raise POEditorArgsException(
'Updating arg must be in {}'.format(options)
)
options = [
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if language_code is None and updating in options:
raise POEditorArgsException(
'Language code is required only if updating is '
'terms_translations or translations)'
)
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(
url_path="projects/upload",
id=project_id,
language=language_code,
file=local_file,
updating=updating,
tags=tags,
sync_terms=sync_terms,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
return data['result']
def update_terms(self, project_id, file_path=None, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_terms_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_terms_translations",
DeprecationWarning, stacklevel=2
)
return self.update_terms_translations(
project_id,
file_path,
language_code,
overwrite,
sync_terms,
tags,
fuzzy_trigger
)
def update_terms_translations(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_translations",
DeprecationWarning, stacklevel=2
)
return self.update_translations(
project_id,
file_path,
language_code,
overwrite,
fuzzy_trigger
)
def update_translations(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
"""
Updates translations
overwrite: set it to True if you want to overwrite definitions
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
def available_languages(self):
"""
Returns a comprehensive list of all languages supported by POEditor.
You can find it here (https://poeditor.com/docs/languages), too.
"""
data = self._run(
url_path="languages/available"
)
return data['result'].get('languages', [])
def list_contributors(self, project_id=None, language_code=None):
"""
Returns the list of contributors
"""
data = self._run(
url_path="contributors/list",
id=project_id,
language=language_code
)
return data['result'].get('contributors', [])
def add_contributor(self, project_id, name, email, language_code):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
language=language_code
)
return True
def add_administrator(self, project_id, name, email):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
admin=True
)
return True
def remove_contributor(self, project_id, email, language):
"""
Removes a contributor
"""
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True
|
sporteasy/python-poeditor | poeditor/client.py | POEditorAPI.create_project | python | def create_project(self, name, description=None):
description = description or ''
data = self._run(
url_path="projects/add",
name=name,
description=description
)
return data['result']['project']['id'] | creates a new project. Returns the id of the project (if successful) | train | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L188-L198 | [
"def _run(self, url_path, headers=None, **kwargs):\n \"\"\"\n Requests API\n \"\"\"\n url = self._construct_url(url_path)\n\n payload = kwargs\n payload.update({'api_token': self.api_token})\n\n return self._make_request(url, payload, headers)\n"
] | class POEditorAPI(object):
"""
Connect your software to POEditor with its simple API
Please refers to https://poeditor.com/docs/api if you have questions
"""
HOST = "https://api.poeditor.com/v2/"
SUCCESS_CODE = "success"
FILE_TYPES = ['po', 'pot', 'mo', 'xls', 'csv', 'resx', 'resw', 'android_strings',
'apple_strings', 'xliff', 'properties', 'key_value_json', 'json',
'xmb', 'xtb']
FILTER_BY = ['translated', 'untranslated', 'fuzzy', 'not_fuzzy',
'automatic', 'not_automatic', 'proofread', 'not_proofread']
UPDATING_TERMS = 'terms'
UPDATING_TERMS_TRANSLATIONS = 'terms_translations'
UPDATING_TRANSLATIONS = 'translations'
# in seconds. Upload: No more than one request every 30 seconds
MIN_UPLOAD_INTERVAL = 30
def __init__(self, api_token):
"""
All requests to the API must contain the parameter api_token.
You'll find it in My Account > API Access in your POEditor account.
"""
self.api_token = api_token
def _construct_url(self, path):
return '{}{}'.format(self.HOST, path)
def _make_request(self, url, payload, headers=None):
kwargs = {}
if payload.get('file'):
kwargs['files'] = {'file': payload.pop('file')}
response = requests.post(url, data=payload, headers=headers, **kwargs)
if response.status_code != 200:
raise POEditorException(
status='fail',
error_code=response.status_code,
message=response.reason
)
data = response.json()
if 'response' not in data:
raise POEditorException(
status='fail',
error_code=-1,
message='"response" key is not present'
)
if 'status' in data['response'] and \
data['response']['status'] != self.SUCCESS_CODE:
raise POEditorException(
error_code=data['response'].get('code'),
status=data['response']['status'],
message=data['response'].get('message')
)
return data
def _run(self, url_path, headers=None, **kwargs):
"""
Requests API
"""
url = self._construct_url(url_path)
payload = kwargs
payload.update({'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _apiv1_run(self, action, headers=None, **kwargs):
"""
Kept for backwards compatibility of this client
See "self.clear_reference_language"
"""
warnings.warn(
"POEditor API v1 is deprecated. Use POEditorAPI._run method to call API v2",
DeprecationWarning, stacklevel=2
)
url = "https://poeditor.com/api/"
payload = kwargs
payload.update({'action': action, 'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _project_formatter(self, data):
"""
Project object
"""
open_ = False if not data['open'] or data['open'] == '0' else True
public = False if not data['public'] or data['public'] == '0' else True
output = {
'created': parse_datetime(data['created']),
'id': int(data['id']),
'name': data['name'],
'open': open_,
'public': public,
}
# the detail view returns more info than the list view
# see https://poeditor.com/docs/api#projects_view
for key in ['description', 'reference_language', 'terms']:
if key in data:
output[key] = data[key]
return output
def list_projects(self):
"""
Returns the list of projects owned by user.
"""
data = self._run(
url_path="projects/list"
)
projects = data['result'].get('projects', [])
return [self._project_formatter(item) for item in projects]
def update_project(self, project_id, name=None, description=None,
reference_language=None):
"""
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
"""
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id']
def delete_project(self, project_id):
"""
Deletes the project from the account.
You must be the owner of the project.
"""
self._run(
url_path="projects/delete",
id=project_id,
)
return True
def view_project_details(self, project_id):
"""
Returns project's details.
"""
data = self._run(
url_path="projects/view",
id=project_id
)
return self._project_formatter(data['result']['project'])
def list_project_languages(self, project_id):
"""
Returns project languages, percentage of translation done for each and the
datetime (UTC - ISO 8601) when the last change was made.
"""
data = self._run(
url_path="languages/list",
id=project_id
)
return data['result'].get('languages', [])
def add_language_to_project(self, project_id, language_code):
"""
Adds a new language to project
"""
self._run(
url_path="languages/add",
id=project_id,
language=language_code
)
return True
def delete_language_from_project(self, project_id, language_code):
"""
Deletes existing language from project
"""
self._run(
url_path="languages/delete",
id=project_id,
language=language_code
)
return True
def set_reference_language(self, project_id, language_code):
"""
Sets a reference language to project
"""
return self.update_project(project_id, reference_language=language_code)
def clear_reference_language(self, project_id):
"""
Clears reference language from project
Could not find how to reproduce the "clear_reference_language" v1 action with the v2 API.
Calling v2 projects/update with reference_language='' or reference_language=None did not work.
https://poeditor.com/docs/api#projects_update
"""
self._apiv1_run(
action="clear_reference_language",
id=project_id
)
return True
def view_project_terms(self, project_id, language_code=None):
"""
Returns project's terms and translations if the argument language is provided.
"""
data = self._run(
url_path="terms/list",
id=project_id,
language=language_code
)
return data['result'].get('terms', [])
def add_terms(self, project_id, data):
"""
Adds terms to project.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="terms/add",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_terms(self, project_id, data, fuzzy_trigger=None):
"""
Updates project terms. Lets you change the text, context, reference, plural and tags.
>>> data = [
{
"term": "Add new list",
"context": "",
"new_term": "Save list",
"new_context": "",
"reference": "\/projects",
"plural": "",
"comment": "",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Display list",
"context": "",
"new_term": "Show list",
"new_context": ""
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="terms/update",
id=project_id,
data=json.dumps(data),
**kwargs
)
return data['result']['terms']
def delete_terms(self, project_id, data):
"""
Deletes terms from project.
>>> data = [
{
"term": "one project found",
"context": ""
},
{
"term": "Show all projects",
"context": "form"
}
]
"""
data = self._run(
url_path="terms/delete",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def add_comment(self, project_id, data):
"""
Adds comments to existing terms.
>>> data = [
{
"term": "Add new list",
"context": "",
"comment": "This is a button"
},
{
"term": "one project found",
"context": "",
"comment": "Make sure you translate the plural forms"
},
{
"term": "Show all projects",
"context": "",
"comment": "This is a button"
}
]
"""
data = self._run(
url_path="terms/add_comment",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def sync_terms(self, project_id, data):
"""
Syncs your project with the array you send (terms that are not found
in the dict object will be deleted from project and the new ones
added).
Please use with caution. If wrong data is sent, existing terms and their
translations might be irreversibly lost.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="projects/sync",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_project_language(self, project_id, language_code, data, fuzzy_trigger=None):
"""
Inserts / overwrites translations.
>>> data = [
{
"term": "Projects",
"context": "project list",
"translation": {
"content": "Des projets",
"fuzzy": 0
}
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="languages/update",
id=project_id,
language=language_code,
data=json.dumps(data),
**kwargs
)
return data['result']['translations']
def export(self, project_id, language_code, file_type='po', filters=None,
tags=None, local_file=None):
"""
Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"]
"""
if file_type not in self.FILE_TYPES:
raise POEditorArgsException(
'content_type: file format {}'.format(self.FILE_TYPES))
if filters and isinstance(filters, str) and filters not in self.FILTER_BY:
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
elif filters and set(filters).difference(set(self.FILTER_BY)):
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
data = self._run(
url_path="projects/export",
id=project_id,
language=language_code,
type=file_type,
filters=filters,
tags=tags
)
# The link of the file (expires after 10 minutes).
file_url = data['result']['url']
# Download file content:
res = requests.get(file_url, stream=True)
if not local_file:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.{}'.format(file_type))
tmp_file.close()
local_file = tmp_file.name
with open(local_file, 'w+b') as po_file:
for data in res.iter_content(chunk_size=1024):
po_file.write(data)
return file_url, local_file
def _upload(self, project_id, updating, file_path, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
"""
options = [
self.UPDATING_TERMS,
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if updating not in options:
raise POEditorArgsException(
'Updating arg must be in {}'.format(options)
)
options = [
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if language_code is None and updating in options:
raise POEditorArgsException(
'Language code is required only if updating is '
'terms_translations or translations)'
)
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(
url_path="projects/upload",
id=project_id,
language=language_code,
file=local_file,
updating=updating,
tags=tags,
sync_terms=sync_terms,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
return data['result']
def update_terms(self, project_id, file_path=None, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_terms_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_terms_translations",
DeprecationWarning, stacklevel=2
)
return self.update_terms_translations(
project_id,
file_path,
language_code,
overwrite,
sync_terms,
tags,
fuzzy_trigger
)
def update_terms_translations(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_translations",
DeprecationWarning, stacklevel=2
)
return self.update_translations(
project_id,
file_path,
language_code,
overwrite,
fuzzy_trigger
)
def update_translations(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
"""
Updates translations
overwrite: set it to True if you want to overwrite definitions
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
def available_languages(self):
"""
Returns a comprehensive list of all languages supported by POEditor.
You can find it here (https://poeditor.com/docs/languages), too.
"""
data = self._run(
url_path="languages/available"
)
return data['result'].get('languages', [])
def list_contributors(self, project_id=None, language_code=None):
"""
Returns the list of contributors
"""
data = self._run(
url_path="contributors/list",
id=project_id,
language=language_code
)
return data['result'].get('contributors', [])
def add_contributor(self, project_id, name, email, language_code):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
language=language_code
)
return True
def add_administrator(self, project_id, name, email):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
admin=True
)
return True
def remove_contributor(self, project_id, email, language):
"""
Removes a contributor
"""
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True
|
sporteasy/python-poeditor | poeditor/client.py | POEditorAPI.update_project | python | def update_project(self, project_id, name=None, description=None,
reference_language=None):
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id'] | Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated. | train | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L200-L219 | [
"def _run(self, url_path, headers=None, **kwargs):\n \"\"\"\n Requests API\n \"\"\"\n url = self._construct_url(url_path)\n\n payload = kwargs\n payload.update({'api_token': self.api_token})\n\n return self._make_request(url, payload, headers)\n"
] | class POEditorAPI(object):
"""
Connect your software to POEditor with its simple API
Please refers to https://poeditor.com/docs/api if you have questions
"""
HOST = "https://api.poeditor.com/v2/"
SUCCESS_CODE = "success"
FILE_TYPES = ['po', 'pot', 'mo', 'xls', 'csv', 'resx', 'resw', 'android_strings',
'apple_strings', 'xliff', 'properties', 'key_value_json', 'json',
'xmb', 'xtb']
FILTER_BY = ['translated', 'untranslated', 'fuzzy', 'not_fuzzy',
'automatic', 'not_automatic', 'proofread', 'not_proofread']
UPDATING_TERMS = 'terms'
UPDATING_TERMS_TRANSLATIONS = 'terms_translations'
UPDATING_TRANSLATIONS = 'translations'
# in seconds. Upload: No more than one request every 30 seconds
MIN_UPLOAD_INTERVAL = 30
def __init__(self, api_token):
"""
All requests to the API must contain the parameter api_token.
You'll find it in My Account > API Access in your POEditor account.
"""
self.api_token = api_token
def _construct_url(self, path):
return '{}{}'.format(self.HOST, path)
def _make_request(self, url, payload, headers=None):
kwargs = {}
if payload.get('file'):
kwargs['files'] = {'file': payload.pop('file')}
response = requests.post(url, data=payload, headers=headers, **kwargs)
if response.status_code != 200:
raise POEditorException(
status='fail',
error_code=response.status_code,
message=response.reason
)
data = response.json()
if 'response' not in data:
raise POEditorException(
status='fail',
error_code=-1,
message='"response" key is not present'
)
if 'status' in data['response'] and \
data['response']['status'] != self.SUCCESS_CODE:
raise POEditorException(
error_code=data['response'].get('code'),
status=data['response']['status'],
message=data['response'].get('message')
)
return data
def _run(self, url_path, headers=None, **kwargs):
"""
Requests API
"""
url = self._construct_url(url_path)
payload = kwargs
payload.update({'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _apiv1_run(self, action, headers=None, **kwargs):
"""
Kept for backwards compatibility of this client
See "self.clear_reference_language"
"""
warnings.warn(
"POEditor API v1 is deprecated. Use POEditorAPI._run method to call API v2",
DeprecationWarning, stacklevel=2
)
url = "https://poeditor.com/api/"
payload = kwargs
payload.update({'action': action, 'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _project_formatter(self, data):
"""
Project object
"""
open_ = False if not data['open'] or data['open'] == '0' else True
public = False if not data['public'] or data['public'] == '0' else True
output = {
'created': parse_datetime(data['created']),
'id': int(data['id']),
'name': data['name'],
'open': open_,
'public': public,
}
# the detail view returns more info than the list view
# see https://poeditor.com/docs/api#projects_view
for key in ['description', 'reference_language', 'terms']:
if key in data:
output[key] = data[key]
return output
def list_projects(self):
"""
Returns the list of projects owned by user.
"""
data = self._run(
url_path="projects/list"
)
projects = data['result'].get('projects', [])
return [self._project_formatter(item) for item in projects]
def create_project(self, name, description=None):
"""
creates a new project. Returns the id of the project (if successful)
"""
description = description or ''
data = self._run(
url_path="projects/add",
name=name,
description=description
)
return data['result']['project']['id']
def delete_project(self, project_id):
"""
Deletes the project from the account.
You must be the owner of the project.
"""
self._run(
url_path="projects/delete",
id=project_id,
)
return True
def view_project_details(self, project_id):
"""
Returns project's details.
"""
data = self._run(
url_path="projects/view",
id=project_id
)
return self._project_formatter(data['result']['project'])
def list_project_languages(self, project_id):
"""
Returns project languages, percentage of translation done for each and the
datetime (UTC - ISO 8601) when the last change was made.
"""
data = self._run(
url_path="languages/list",
id=project_id
)
return data['result'].get('languages', [])
def add_language_to_project(self, project_id, language_code):
"""
Adds a new language to project
"""
self._run(
url_path="languages/add",
id=project_id,
language=language_code
)
return True
def delete_language_from_project(self, project_id, language_code):
"""
Deletes existing language from project
"""
self._run(
url_path="languages/delete",
id=project_id,
language=language_code
)
return True
def set_reference_language(self, project_id, language_code):
"""
Sets a reference language to project
"""
return self.update_project(project_id, reference_language=language_code)
def clear_reference_language(self, project_id):
"""
Clears reference language from project
Could not find how to reproduce the "clear_reference_language" v1 action with the v2 API.
Calling v2 projects/update with reference_language='' or reference_language=None did not work.
https://poeditor.com/docs/api#projects_update
"""
self._apiv1_run(
action="clear_reference_language",
id=project_id
)
return True
def view_project_terms(self, project_id, language_code=None):
"""
Returns project's terms and translations if the argument language is provided.
"""
data = self._run(
url_path="terms/list",
id=project_id,
language=language_code
)
return data['result'].get('terms', [])
def add_terms(self, project_id, data):
"""
Adds terms to project.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="terms/add",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_terms(self, project_id, data, fuzzy_trigger=None):
"""
Updates project terms. Lets you change the text, context, reference, plural and tags.
>>> data = [
{
"term": "Add new list",
"context": "",
"new_term": "Save list",
"new_context": "",
"reference": "\/projects",
"plural": "",
"comment": "",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Display list",
"context": "",
"new_term": "Show list",
"new_context": ""
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="terms/update",
id=project_id,
data=json.dumps(data),
**kwargs
)
return data['result']['terms']
def delete_terms(self, project_id, data):
"""
Deletes terms from project.
>>> data = [
{
"term": "one project found",
"context": ""
},
{
"term": "Show all projects",
"context": "form"
}
]
"""
data = self._run(
url_path="terms/delete",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def add_comment(self, project_id, data):
"""
Adds comments to existing terms.
>>> data = [
{
"term": "Add new list",
"context": "",
"comment": "This is a button"
},
{
"term": "one project found",
"context": "",
"comment": "Make sure you translate the plural forms"
},
{
"term": "Show all projects",
"context": "",
"comment": "This is a button"
}
]
"""
data = self._run(
url_path="terms/add_comment",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def sync_terms(self, project_id, data):
"""
Syncs your project with the array you send (terms that are not found
in the dict object will be deleted from project and the new ones
added).
Please use with caution. If wrong data is sent, existing terms and their
translations might be irreversibly lost.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="projects/sync",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_project_language(self, project_id, language_code, data, fuzzy_trigger=None):
"""
Inserts / overwrites translations.
>>> data = [
{
"term": "Projects",
"context": "project list",
"translation": {
"content": "Des projets",
"fuzzy": 0
}
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="languages/update",
id=project_id,
language=language_code,
data=json.dumps(data),
**kwargs
)
return data['result']['translations']
def export(self, project_id, language_code, file_type='po', filters=None,
tags=None, local_file=None):
"""
Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"]
"""
if file_type not in self.FILE_TYPES:
raise POEditorArgsException(
'content_type: file format {}'.format(self.FILE_TYPES))
if filters and isinstance(filters, str) and filters not in self.FILTER_BY:
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
elif filters and set(filters).difference(set(self.FILTER_BY)):
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
data = self._run(
url_path="projects/export",
id=project_id,
language=language_code,
type=file_type,
filters=filters,
tags=tags
)
# The link of the file (expires after 10 minutes).
file_url = data['result']['url']
# Download file content:
res = requests.get(file_url, stream=True)
if not local_file:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.{}'.format(file_type))
tmp_file.close()
local_file = tmp_file.name
with open(local_file, 'w+b') as po_file:
for data in res.iter_content(chunk_size=1024):
po_file.write(data)
return file_url, local_file
def _upload(self, project_id, updating, file_path, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
"""
options = [
self.UPDATING_TERMS,
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if updating not in options:
raise POEditorArgsException(
'Updating arg must be in {}'.format(options)
)
options = [
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if language_code is None and updating in options:
raise POEditorArgsException(
'Language code is required only if updating is '
'terms_translations or translations)'
)
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(
url_path="projects/upload",
id=project_id,
language=language_code,
file=local_file,
updating=updating,
tags=tags,
sync_terms=sync_terms,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
return data['result']
def update_terms(self, project_id, file_path=None, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_terms_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_terms_translations",
DeprecationWarning, stacklevel=2
)
return self.update_terms_translations(
project_id,
file_path,
language_code,
overwrite,
sync_terms,
tags,
fuzzy_trigger
)
def update_terms_translations(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_translations",
DeprecationWarning, stacklevel=2
)
return self.update_translations(
project_id,
file_path,
language_code,
overwrite,
fuzzy_trigger
)
def update_translations(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
"""
Updates translations
overwrite: set it to True if you want to overwrite definitions
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
def available_languages(self):
"""
Returns a comprehensive list of all languages supported by POEditor.
You can find it here (https://poeditor.com/docs/languages), too.
"""
data = self._run(
url_path="languages/available"
)
return data['result'].get('languages', [])
def list_contributors(self, project_id=None, language_code=None):
"""
Returns the list of contributors
"""
data = self._run(
url_path="contributors/list",
id=project_id,
language=language_code
)
return data['result'].get('contributors', [])
def add_contributor(self, project_id, name, email, language_code):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
language=language_code
)
return True
def add_administrator(self, project_id, name, email):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
admin=True
)
return True
def remove_contributor(self, project_id, email, language):
"""
Removes a contributor
"""
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True
|
sporteasy/python-poeditor | poeditor/client.py | POEditorAPI.view_project_details | python | def view_project_details(self, project_id):
data = self._run(
url_path="projects/view",
id=project_id
)
return self._project_formatter(data['result']['project']) | Returns project's details. | train | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L232-L240 | [
"def _run(self, url_path, headers=None, **kwargs):\n \"\"\"\n Requests API\n \"\"\"\n url = self._construct_url(url_path)\n\n payload = kwargs\n payload.update({'api_token': self.api_token})\n\n return self._make_request(url, payload, headers)\n",
"def _project_formatter(self, data):\n \"\"\"\n Project object\n \"\"\"\n open_ = False if not data['open'] or data['open'] == '0' else True\n public = False if not data['public'] or data['public'] == '0' else True\n output = {\n 'created': parse_datetime(data['created']),\n 'id': int(data['id']),\n 'name': data['name'],\n 'open': open_,\n 'public': public,\n }\n\n # the detail view returns more info than the list view\n # see https://poeditor.com/docs/api#projects_view\n for key in ['description', 'reference_language', 'terms']:\n if key in data:\n output[key] = data[key]\n\n return output\n"
] | class POEditorAPI(object):
"""
Connect your software to POEditor with its simple API
Please refers to https://poeditor.com/docs/api if you have questions
"""
HOST = "https://api.poeditor.com/v2/"
SUCCESS_CODE = "success"
FILE_TYPES = ['po', 'pot', 'mo', 'xls', 'csv', 'resx', 'resw', 'android_strings',
'apple_strings', 'xliff', 'properties', 'key_value_json', 'json',
'xmb', 'xtb']
FILTER_BY = ['translated', 'untranslated', 'fuzzy', 'not_fuzzy',
'automatic', 'not_automatic', 'proofread', 'not_proofread']
UPDATING_TERMS = 'terms'
UPDATING_TERMS_TRANSLATIONS = 'terms_translations'
UPDATING_TRANSLATIONS = 'translations'
# in seconds. Upload: No more than one request every 30 seconds
MIN_UPLOAD_INTERVAL = 30
def __init__(self, api_token):
"""
All requests to the API must contain the parameter api_token.
You'll find it in My Account > API Access in your POEditor account.
"""
self.api_token = api_token
def _construct_url(self, path):
return '{}{}'.format(self.HOST, path)
def _make_request(self, url, payload, headers=None):
kwargs = {}
if payload.get('file'):
kwargs['files'] = {'file': payload.pop('file')}
response = requests.post(url, data=payload, headers=headers, **kwargs)
if response.status_code != 200:
raise POEditorException(
status='fail',
error_code=response.status_code,
message=response.reason
)
data = response.json()
if 'response' not in data:
raise POEditorException(
status='fail',
error_code=-1,
message='"response" key is not present'
)
if 'status' in data['response'] and \
data['response']['status'] != self.SUCCESS_CODE:
raise POEditorException(
error_code=data['response'].get('code'),
status=data['response']['status'],
message=data['response'].get('message')
)
return data
def _run(self, url_path, headers=None, **kwargs):
"""
Requests API
"""
url = self._construct_url(url_path)
payload = kwargs
payload.update({'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _apiv1_run(self, action, headers=None, **kwargs):
"""
Kept for backwards compatibility of this client
See "self.clear_reference_language"
"""
warnings.warn(
"POEditor API v1 is deprecated. Use POEditorAPI._run method to call API v2",
DeprecationWarning, stacklevel=2
)
url = "https://poeditor.com/api/"
payload = kwargs
payload.update({'action': action, 'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _project_formatter(self, data):
"""
Project object
"""
open_ = False if not data['open'] or data['open'] == '0' else True
public = False if not data['public'] or data['public'] == '0' else True
output = {
'created': parse_datetime(data['created']),
'id': int(data['id']),
'name': data['name'],
'open': open_,
'public': public,
}
# the detail view returns more info than the list view
# see https://poeditor.com/docs/api#projects_view
for key in ['description', 'reference_language', 'terms']:
if key in data:
output[key] = data[key]
return output
def list_projects(self):
"""
Returns the list of projects owned by user.
"""
data = self._run(
url_path="projects/list"
)
projects = data['result'].get('projects', [])
return [self._project_formatter(item) for item in projects]
def create_project(self, name, description=None):
"""
creates a new project. Returns the id of the project (if successful)
"""
description = description or ''
data = self._run(
url_path="projects/add",
name=name,
description=description
)
return data['result']['project']['id']
def update_project(self, project_id, name=None, description=None,
reference_language=None):
"""
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
"""
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id']
def delete_project(self, project_id):
"""
Deletes the project from the account.
You must be the owner of the project.
"""
self._run(
url_path="projects/delete",
id=project_id,
)
return True
def list_project_languages(self, project_id):
"""
Returns project languages, percentage of translation done for each and the
datetime (UTC - ISO 8601) when the last change was made.
"""
data = self._run(
url_path="languages/list",
id=project_id
)
return data['result'].get('languages', [])
def add_language_to_project(self, project_id, language_code):
"""
Adds a new language to project
"""
self._run(
url_path="languages/add",
id=project_id,
language=language_code
)
return True
def delete_language_from_project(self, project_id, language_code):
"""
Deletes existing language from project
"""
self._run(
url_path="languages/delete",
id=project_id,
language=language_code
)
return True
def set_reference_language(self, project_id, language_code):
"""
Sets a reference language to project
"""
return self.update_project(project_id, reference_language=language_code)
def clear_reference_language(self, project_id):
"""
Clears reference language from project
Could not find how to reproduce the "clear_reference_language" v1 action with the v2 API.
Calling v2 projects/update with reference_language='' or reference_language=None did not work.
https://poeditor.com/docs/api#projects_update
"""
self._apiv1_run(
action="clear_reference_language",
id=project_id
)
return True
def view_project_terms(self, project_id, language_code=None):
"""
Returns project's terms and translations if the argument language is provided.
"""
data = self._run(
url_path="terms/list",
id=project_id,
language=language_code
)
return data['result'].get('terms', [])
def add_terms(self, project_id, data):
"""
Adds terms to project.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="terms/add",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_terms(self, project_id, data, fuzzy_trigger=None):
"""
Updates project terms. Lets you change the text, context, reference, plural and tags.
>>> data = [
{
"term": "Add new list",
"context": "",
"new_term": "Save list",
"new_context": "",
"reference": "\/projects",
"plural": "",
"comment": "",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Display list",
"context": "",
"new_term": "Show list",
"new_context": ""
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="terms/update",
id=project_id,
data=json.dumps(data),
**kwargs
)
return data['result']['terms']
def delete_terms(self, project_id, data):
"""
Deletes terms from project.
>>> data = [
{
"term": "one project found",
"context": ""
},
{
"term": "Show all projects",
"context": "form"
}
]
"""
data = self._run(
url_path="terms/delete",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def add_comment(self, project_id, data):
"""
Adds comments to existing terms.
>>> data = [
{
"term": "Add new list",
"context": "",
"comment": "This is a button"
},
{
"term": "one project found",
"context": "",
"comment": "Make sure you translate the plural forms"
},
{
"term": "Show all projects",
"context": "",
"comment": "This is a button"
}
]
"""
data = self._run(
url_path="terms/add_comment",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def sync_terms(self, project_id, data):
"""
Syncs your project with the array you send (terms that are not found
in the dict object will be deleted from project and the new ones
added).
Please use with caution. If wrong data is sent, existing terms and their
translations might be irreversibly lost.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="projects/sync",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_project_language(self, project_id, language_code, data, fuzzy_trigger=None):
"""
Inserts / overwrites translations.
>>> data = [
{
"term": "Projects",
"context": "project list",
"translation": {
"content": "Des projets",
"fuzzy": 0
}
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="languages/update",
id=project_id,
language=language_code,
data=json.dumps(data),
**kwargs
)
return data['result']['translations']
def export(self, project_id, language_code, file_type='po', filters=None,
tags=None, local_file=None):
"""
Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"]
"""
if file_type not in self.FILE_TYPES:
raise POEditorArgsException(
'content_type: file format {}'.format(self.FILE_TYPES))
if filters and isinstance(filters, str) and filters not in self.FILTER_BY:
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
elif filters and set(filters).difference(set(self.FILTER_BY)):
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
data = self._run(
url_path="projects/export",
id=project_id,
language=language_code,
type=file_type,
filters=filters,
tags=tags
)
# The link of the file (expires after 10 minutes).
file_url = data['result']['url']
# Download file content:
res = requests.get(file_url, stream=True)
if not local_file:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.{}'.format(file_type))
tmp_file.close()
local_file = tmp_file.name
with open(local_file, 'w+b') as po_file:
for data in res.iter_content(chunk_size=1024):
po_file.write(data)
return file_url, local_file
def _upload(self, project_id, updating, file_path, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
"""
options = [
self.UPDATING_TERMS,
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if updating not in options:
raise POEditorArgsException(
'Updating arg must be in {}'.format(options)
)
options = [
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if language_code is None and updating in options:
raise POEditorArgsException(
'Language code is required only if updating is '
'terms_translations or translations)'
)
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(
url_path="projects/upload",
id=project_id,
language=language_code,
file=local_file,
updating=updating,
tags=tags,
sync_terms=sync_terms,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
return data['result']
def update_terms(self, project_id, file_path=None, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_terms_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_terms_translations",
DeprecationWarning, stacklevel=2
)
return self.update_terms_translations(
project_id,
file_path,
language_code,
overwrite,
sync_terms,
tags,
fuzzy_trigger
)
def update_terms_translations(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_translations",
DeprecationWarning, stacklevel=2
)
return self.update_translations(
project_id,
file_path,
language_code,
overwrite,
fuzzy_trigger
)
def update_translations(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
"""
Updates translations
overwrite: set it to True if you want to overwrite definitions
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
def available_languages(self):
"""
Returns a comprehensive list of all languages supported by POEditor.
You can find it here (https://poeditor.com/docs/languages), too.
"""
data = self._run(
url_path="languages/available"
)
return data['result'].get('languages', [])
def list_contributors(self, project_id=None, language_code=None):
"""
Returns the list of contributors
"""
data = self._run(
url_path="contributors/list",
id=project_id,
language=language_code
)
return data['result'].get('contributors', [])
def add_contributor(self, project_id, name, email, language_code):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
language=language_code
)
return True
def add_administrator(self, project_id, name, email):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
admin=True
)
return True
def remove_contributor(self, project_id, email, language):
"""
Removes a contributor
"""
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True
|
sporteasy/python-poeditor | poeditor/client.py | POEditorAPI.list_project_languages | python | def list_project_languages(self, project_id):
data = self._run(
url_path="languages/list",
id=project_id
)
return data['result'].get('languages', []) | Returns project languages, percentage of translation done for each and the
datetime (UTC - ISO 8601) when the last change was made. | train | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L242-L251 | [
"def _run(self, url_path, headers=None, **kwargs):\n \"\"\"\n Requests API\n \"\"\"\n url = self._construct_url(url_path)\n\n payload = kwargs\n payload.update({'api_token': self.api_token})\n\n return self._make_request(url, payload, headers)\n"
] | class POEditorAPI(object):
"""
Connect your software to POEditor with its simple API
Please refers to https://poeditor.com/docs/api if you have questions
"""
HOST = "https://api.poeditor.com/v2/"
SUCCESS_CODE = "success"
FILE_TYPES = ['po', 'pot', 'mo', 'xls', 'csv', 'resx', 'resw', 'android_strings',
'apple_strings', 'xliff', 'properties', 'key_value_json', 'json',
'xmb', 'xtb']
FILTER_BY = ['translated', 'untranslated', 'fuzzy', 'not_fuzzy',
'automatic', 'not_automatic', 'proofread', 'not_proofread']
UPDATING_TERMS = 'terms'
UPDATING_TERMS_TRANSLATIONS = 'terms_translations'
UPDATING_TRANSLATIONS = 'translations'
# in seconds. Upload: No more than one request every 30 seconds
MIN_UPLOAD_INTERVAL = 30
def __init__(self, api_token):
"""
All requests to the API must contain the parameter api_token.
You'll find it in My Account > API Access in your POEditor account.
"""
self.api_token = api_token
def _construct_url(self, path):
return '{}{}'.format(self.HOST, path)
def _make_request(self, url, payload, headers=None):
kwargs = {}
if payload.get('file'):
kwargs['files'] = {'file': payload.pop('file')}
response = requests.post(url, data=payload, headers=headers, **kwargs)
if response.status_code != 200:
raise POEditorException(
status='fail',
error_code=response.status_code,
message=response.reason
)
data = response.json()
if 'response' not in data:
raise POEditorException(
status='fail',
error_code=-1,
message='"response" key is not present'
)
if 'status' in data['response'] and \
data['response']['status'] != self.SUCCESS_CODE:
raise POEditorException(
error_code=data['response'].get('code'),
status=data['response']['status'],
message=data['response'].get('message')
)
return data
def _run(self, url_path, headers=None, **kwargs):
"""
Requests API
"""
url = self._construct_url(url_path)
payload = kwargs
payload.update({'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _apiv1_run(self, action, headers=None, **kwargs):
"""
Kept for backwards compatibility of this client
See "self.clear_reference_language"
"""
warnings.warn(
"POEditor API v1 is deprecated. Use POEditorAPI._run method to call API v2",
DeprecationWarning, stacklevel=2
)
url = "https://poeditor.com/api/"
payload = kwargs
payload.update({'action': action, 'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _project_formatter(self, data):
"""
Project object
"""
open_ = False if not data['open'] or data['open'] == '0' else True
public = False if not data['public'] or data['public'] == '0' else True
output = {
'created': parse_datetime(data['created']),
'id': int(data['id']),
'name': data['name'],
'open': open_,
'public': public,
}
# the detail view returns more info than the list view
# see https://poeditor.com/docs/api#projects_view
for key in ['description', 'reference_language', 'terms']:
if key in data:
output[key] = data[key]
return output
def list_projects(self):
"""
Returns the list of projects owned by user.
"""
data = self._run(
url_path="projects/list"
)
projects = data['result'].get('projects', [])
return [self._project_formatter(item) for item in projects]
def create_project(self, name, description=None):
"""
creates a new project. Returns the id of the project (if successful)
"""
description = description or ''
data = self._run(
url_path="projects/add",
name=name,
description=description
)
return data['result']['project']['id']
def update_project(self, project_id, name=None, description=None,
reference_language=None):
"""
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
"""
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id']
def delete_project(self, project_id):
"""
Deletes the project from the account.
You must be the owner of the project.
"""
self._run(
url_path="projects/delete",
id=project_id,
)
return True
def view_project_details(self, project_id):
"""
Returns project's details.
"""
data = self._run(
url_path="projects/view",
id=project_id
)
return self._project_formatter(data['result']['project'])
def add_language_to_project(self, project_id, language_code):
"""
Adds a new language to project
"""
self._run(
url_path="languages/add",
id=project_id,
language=language_code
)
return True
def delete_language_from_project(self, project_id, language_code):
"""
Deletes existing language from project
"""
self._run(
url_path="languages/delete",
id=project_id,
language=language_code
)
return True
def set_reference_language(self, project_id, language_code):
"""
Sets a reference language to project
"""
return self.update_project(project_id, reference_language=language_code)
def clear_reference_language(self, project_id):
"""
Clears reference language from project
Could not find how to reproduce the "clear_reference_language" v1 action with the v2 API.
Calling v2 projects/update with reference_language='' or reference_language=None did not work.
https://poeditor.com/docs/api#projects_update
"""
self._apiv1_run(
action="clear_reference_language",
id=project_id
)
return True
def view_project_terms(self, project_id, language_code=None):
"""
Returns project's terms and translations if the argument language is provided.
"""
data = self._run(
url_path="terms/list",
id=project_id,
language=language_code
)
return data['result'].get('terms', [])
def add_terms(self, project_id, data):
"""
Adds terms to project.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="terms/add",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_terms(self, project_id, data, fuzzy_trigger=None):
"""
Updates project terms. Lets you change the text, context, reference, plural and tags.
>>> data = [
{
"term": "Add new list",
"context": "",
"new_term": "Save list",
"new_context": "",
"reference": "\/projects",
"plural": "",
"comment": "",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Display list",
"context": "",
"new_term": "Show list",
"new_context": ""
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="terms/update",
id=project_id,
data=json.dumps(data),
**kwargs
)
return data['result']['terms']
def delete_terms(self, project_id, data):
"""
Deletes terms from project.
>>> data = [
{
"term": "one project found",
"context": ""
},
{
"term": "Show all projects",
"context": "form"
}
]
"""
data = self._run(
url_path="terms/delete",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def add_comment(self, project_id, data):
"""
Adds comments to existing terms.
>>> data = [
{
"term": "Add new list",
"context": "",
"comment": "This is a button"
},
{
"term": "one project found",
"context": "",
"comment": "Make sure you translate the plural forms"
},
{
"term": "Show all projects",
"context": "",
"comment": "This is a button"
}
]
"""
data = self._run(
url_path="terms/add_comment",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def sync_terms(self, project_id, data):
"""
Syncs your project with the array you send (terms that are not found
in the dict object will be deleted from project and the new ones
added).
Please use with caution. If wrong data is sent, existing terms and their
translations might be irreversibly lost.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="projects/sync",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_project_language(self, project_id, language_code, data, fuzzy_trigger=None):
"""
Inserts / overwrites translations.
>>> data = [
{
"term": "Projects",
"context": "project list",
"translation": {
"content": "Des projets",
"fuzzy": 0
}
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="languages/update",
id=project_id,
language=language_code,
data=json.dumps(data),
**kwargs
)
return data['result']['translations']
def export(self, project_id, language_code, file_type='po', filters=None,
tags=None, local_file=None):
"""
Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"]
"""
if file_type not in self.FILE_TYPES:
raise POEditorArgsException(
'content_type: file format {}'.format(self.FILE_TYPES))
if filters and isinstance(filters, str) and filters not in self.FILTER_BY:
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
elif filters and set(filters).difference(set(self.FILTER_BY)):
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
data = self._run(
url_path="projects/export",
id=project_id,
language=language_code,
type=file_type,
filters=filters,
tags=tags
)
# The link of the file (expires after 10 minutes).
file_url = data['result']['url']
# Download file content:
res = requests.get(file_url, stream=True)
if not local_file:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.{}'.format(file_type))
tmp_file.close()
local_file = tmp_file.name
with open(local_file, 'w+b') as po_file:
for data in res.iter_content(chunk_size=1024):
po_file.write(data)
return file_url, local_file
def _upload(self, project_id, updating, file_path, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
"""
options = [
self.UPDATING_TERMS,
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if updating not in options:
raise POEditorArgsException(
'Updating arg must be in {}'.format(options)
)
options = [
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if language_code is None and updating in options:
raise POEditorArgsException(
'Language code is required only if updating is '
'terms_translations or translations)'
)
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(
url_path="projects/upload",
id=project_id,
language=language_code,
file=local_file,
updating=updating,
tags=tags,
sync_terms=sync_terms,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
return data['result']
def update_terms(self, project_id, file_path=None, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_terms_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_terms_translations",
DeprecationWarning, stacklevel=2
)
return self.update_terms_translations(
project_id,
file_path,
language_code,
overwrite,
sync_terms,
tags,
fuzzy_trigger
)
def update_terms_translations(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_translations",
DeprecationWarning, stacklevel=2
)
return self.update_translations(
project_id,
file_path,
language_code,
overwrite,
fuzzy_trigger
)
def update_translations(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
"""
Updates translations
overwrite: set it to True if you want to overwrite definitions
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
def available_languages(self):
"""
Returns a comprehensive list of all languages supported by POEditor.
You can find it here (https://poeditor.com/docs/languages), too.
"""
data = self._run(
url_path="languages/available"
)
return data['result'].get('languages', [])
def list_contributors(self, project_id=None, language_code=None):
"""
Returns the list of contributors
"""
data = self._run(
url_path="contributors/list",
id=project_id,
language=language_code
)
return data['result'].get('contributors', [])
def add_contributor(self, project_id, name, email, language_code):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
language=language_code
)
return True
def add_administrator(self, project_id, name, email):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
admin=True
)
return True
def remove_contributor(self, project_id, email, language):
"""
Removes a contributor
"""
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True
|
sporteasy/python-poeditor | poeditor/client.py | POEditorAPI.add_language_to_project | python | def add_language_to_project(self, project_id, language_code):
self._run(
url_path="languages/add",
id=project_id,
language=language_code
)
return True | Adds a new language to project | train | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L253-L262 | [
"def _run(self, url_path, headers=None, **kwargs):\n \"\"\"\n Requests API\n \"\"\"\n url = self._construct_url(url_path)\n\n payload = kwargs\n payload.update({'api_token': self.api_token})\n\n return self._make_request(url, payload, headers)\n"
] | class POEditorAPI(object):
"""
Connect your software to POEditor with its simple API
Please refers to https://poeditor.com/docs/api if you have questions
"""
HOST = "https://api.poeditor.com/v2/"
SUCCESS_CODE = "success"
FILE_TYPES = ['po', 'pot', 'mo', 'xls', 'csv', 'resx', 'resw', 'android_strings',
'apple_strings', 'xliff', 'properties', 'key_value_json', 'json',
'xmb', 'xtb']
FILTER_BY = ['translated', 'untranslated', 'fuzzy', 'not_fuzzy',
'automatic', 'not_automatic', 'proofread', 'not_proofread']
UPDATING_TERMS = 'terms'
UPDATING_TERMS_TRANSLATIONS = 'terms_translations'
UPDATING_TRANSLATIONS = 'translations'
# in seconds. Upload: No more than one request every 30 seconds
MIN_UPLOAD_INTERVAL = 30
def __init__(self, api_token):
"""
All requests to the API must contain the parameter api_token.
You'll find it in My Account > API Access in your POEditor account.
"""
self.api_token = api_token
def _construct_url(self, path):
return '{}{}'.format(self.HOST, path)
def _make_request(self, url, payload, headers=None):
kwargs = {}
if payload.get('file'):
kwargs['files'] = {'file': payload.pop('file')}
response = requests.post(url, data=payload, headers=headers, **kwargs)
if response.status_code != 200:
raise POEditorException(
status='fail',
error_code=response.status_code,
message=response.reason
)
data = response.json()
if 'response' not in data:
raise POEditorException(
status='fail',
error_code=-1,
message='"response" key is not present'
)
if 'status' in data['response'] and \
data['response']['status'] != self.SUCCESS_CODE:
raise POEditorException(
error_code=data['response'].get('code'),
status=data['response']['status'],
message=data['response'].get('message')
)
return data
def _run(self, url_path, headers=None, **kwargs):
"""
Requests API
"""
url = self._construct_url(url_path)
payload = kwargs
payload.update({'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _apiv1_run(self, action, headers=None, **kwargs):
"""
Kept for backwards compatibility of this client
See "self.clear_reference_language"
"""
warnings.warn(
"POEditor API v1 is deprecated. Use POEditorAPI._run method to call API v2",
DeprecationWarning, stacklevel=2
)
url = "https://poeditor.com/api/"
payload = kwargs
payload.update({'action': action, 'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _project_formatter(self, data):
"""
Project object
"""
open_ = False if not data['open'] or data['open'] == '0' else True
public = False if not data['public'] or data['public'] == '0' else True
output = {
'created': parse_datetime(data['created']),
'id': int(data['id']),
'name': data['name'],
'open': open_,
'public': public,
}
# the detail view returns more info than the list view
# see https://poeditor.com/docs/api#projects_view
for key in ['description', 'reference_language', 'terms']:
if key in data:
output[key] = data[key]
return output
def list_projects(self):
"""
Returns the list of projects owned by user.
"""
data = self._run(
url_path="projects/list"
)
projects = data['result'].get('projects', [])
return [self._project_formatter(item) for item in projects]
def create_project(self, name, description=None):
"""
creates a new project. Returns the id of the project (if successful)
"""
description = description or ''
data = self._run(
url_path="projects/add",
name=name,
description=description
)
return data['result']['project']['id']
def update_project(self, project_id, name=None, description=None,
reference_language=None):
"""
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
"""
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id']
def delete_project(self, project_id):
"""
Deletes the project from the account.
You must be the owner of the project.
"""
self._run(
url_path="projects/delete",
id=project_id,
)
return True
def view_project_details(self, project_id):
"""
Returns project's details.
"""
data = self._run(
url_path="projects/view",
id=project_id
)
return self._project_formatter(data['result']['project'])
def list_project_languages(self, project_id):
"""
Returns project languages, percentage of translation done for each and the
datetime (UTC - ISO 8601) when the last change was made.
"""
data = self._run(
url_path="languages/list",
id=project_id
)
return data['result'].get('languages', [])
def delete_language_from_project(self, project_id, language_code):
"""
Deletes existing language from project
"""
self._run(
url_path="languages/delete",
id=project_id,
language=language_code
)
return True
def set_reference_language(self, project_id, language_code):
"""
Sets a reference language to project
"""
return self.update_project(project_id, reference_language=language_code)
def clear_reference_language(self, project_id):
"""
Clears reference language from project
Could not find how to reproduce the "clear_reference_language" v1 action with the v2 API.
Calling v2 projects/update with reference_language='' or reference_language=None did not work.
https://poeditor.com/docs/api#projects_update
"""
self._apiv1_run(
action="clear_reference_language",
id=project_id
)
return True
def view_project_terms(self, project_id, language_code=None):
"""
Returns project's terms and translations if the argument language is provided.
"""
data = self._run(
url_path="terms/list",
id=project_id,
language=language_code
)
return data['result'].get('terms', [])
def add_terms(self, project_id, data):
"""
Adds terms to project.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="terms/add",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_terms(self, project_id, data, fuzzy_trigger=None):
"""
Updates project terms. Lets you change the text, context, reference, plural and tags.
>>> data = [
{
"term": "Add new list",
"context": "",
"new_term": "Save list",
"new_context": "",
"reference": "\/projects",
"plural": "",
"comment": "",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Display list",
"context": "",
"new_term": "Show list",
"new_context": ""
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="terms/update",
id=project_id,
data=json.dumps(data),
**kwargs
)
return data['result']['terms']
def delete_terms(self, project_id, data):
"""
Deletes terms from project.
>>> data = [
{
"term": "one project found",
"context": ""
},
{
"term": "Show all projects",
"context": "form"
}
]
"""
data = self._run(
url_path="terms/delete",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def add_comment(self, project_id, data):
"""
Adds comments to existing terms.
>>> data = [
{
"term": "Add new list",
"context": "",
"comment": "This is a button"
},
{
"term": "one project found",
"context": "",
"comment": "Make sure you translate the plural forms"
},
{
"term": "Show all projects",
"context": "",
"comment": "This is a button"
}
]
"""
data = self._run(
url_path="terms/add_comment",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def sync_terms(self, project_id, data):
"""
Syncs your project with the array you send (terms that are not found
in the dict object will be deleted from project and the new ones
added).
Please use with caution. If wrong data is sent, existing terms and their
translations might be irreversibly lost.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="projects/sync",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_project_language(self, project_id, language_code, data, fuzzy_trigger=None):
"""
Inserts / overwrites translations.
>>> data = [
{
"term": "Projects",
"context": "project list",
"translation": {
"content": "Des projets",
"fuzzy": 0
}
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="languages/update",
id=project_id,
language=language_code,
data=json.dumps(data),
**kwargs
)
return data['result']['translations']
def export(self, project_id, language_code, file_type='po', filters=None,
tags=None, local_file=None):
"""
Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"]
"""
if file_type not in self.FILE_TYPES:
raise POEditorArgsException(
'content_type: file format {}'.format(self.FILE_TYPES))
if filters and isinstance(filters, str) and filters not in self.FILTER_BY:
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
elif filters and set(filters).difference(set(self.FILTER_BY)):
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
data = self._run(
url_path="projects/export",
id=project_id,
language=language_code,
type=file_type,
filters=filters,
tags=tags
)
# The link of the file (expires after 10 minutes).
file_url = data['result']['url']
# Download file content:
res = requests.get(file_url, stream=True)
if not local_file:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.{}'.format(file_type))
tmp_file.close()
local_file = tmp_file.name
with open(local_file, 'w+b') as po_file:
for data in res.iter_content(chunk_size=1024):
po_file.write(data)
return file_url, local_file
def _upload(self, project_id, updating, file_path, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
"""
options = [
self.UPDATING_TERMS,
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if updating not in options:
raise POEditorArgsException(
'Updating arg must be in {}'.format(options)
)
options = [
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if language_code is None and updating in options:
raise POEditorArgsException(
'Language code is required only if updating is '
'terms_translations or translations)'
)
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(
url_path="projects/upload",
id=project_id,
language=language_code,
file=local_file,
updating=updating,
tags=tags,
sync_terms=sync_terms,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
return data['result']
def update_terms(self, project_id, file_path=None, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_terms_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_terms_translations",
DeprecationWarning, stacklevel=2
)
return self.update_terms_translations(
project_id,
file_path,
language_code,
overwrite,
sync_terms,
tags,
fuzzy_trigger
)
def update_terms_translations(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_translations",
DeprecationWarning, stacklevel=2
)
return self.update_translations(
project_id,
file_path,
language_code,
overwrite,
fuzzy_trigger
)
def update_translations(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
"""
Updates translations
overwrite: set it to True if you want to overwrite definitions
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
def available_languages(self):
"""
Returns a comprehensive list of all languages supported by POEditor.
You can find it here (https://poeditor.com/docs/languages), too.
"""
data = self._run(
url_path="languages/available"
)
return data['result'].get('languages', [])
def list_contributors(self, project_id=None, language_code=None):
"""
Returns the list of contributors
"""
data = self._run(
url_path="contributors/list",
id=project_id,
language=language_code
)
return data['result'].get('contributors', [])
def add_contributor(self, project_id, name, email, language_code):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
language=language_code
)
return True
def add_administrator(self, project_id, name, email):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
admin=True
)
return True
def remove_contributor(self, project_id, email, language):
"""
Removes a contributor
"""
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True
|
sporteasy/python-poeditor | poeditor/client.py | POEditorAPI.update_terms | python | def update_terms(self, project_id, data, fuzzy_trigger=None):
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="terms/update",
id=project_id,
data=json.dumps(data),
**kwargs
)
return data['result']['terms'] | Updates project terms. Lets you change the text, context, reference, plural and tags.
>>> data = [
{
"term": "Add new list",
"context": "",
"new_term": "Save list",
"new_context": "",
"reference": "\/projects",
"plural": "",
"comment": "",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Display list",
"context": "",
"new_term": "Show list",
"new_context": ""
}
] | train | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L344-L380 | [
"def _run(self, url_path, headers=None, **kwargs):\n \"\"\"\n Requests API\n \"\"\"\n url = self._construct_url(url_path)\n\n payload = kwargs\n payload.update({'api_token': self.api_token})\n\n return self._make_request(url, payload, headers)\n",
"def _upload(self, project_id, updating, file_path, language_code=None,\n overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):\n \"\"\"\n Internal: updates terms / translations\n\n File uploads are limited to one every 30 seconds\n \"\"\"\n options = [\n self.UPDATING_TERMS,\n self.UPDATING_TERMS_TRANSLATIONS,\n self.UPDATING_TRANSLATIONS\n ]\n if updating not in options:\n raise POEditorArgsException(\n 'Updating arg must be in {}'.format(options)\n )\n\n options = [\n self.UPDATING_TERMS_TRANSLATIONS,\n self.UPDATING_TRANSLATIONS\n ]\n if language_code is None and updating in options:\n raise POEditorArgsException(\n 'Language code is required only if updating is '\n 'terms_translations or translations)'\n )\n\n if updating == self.UPDATING_TRANSLATIONS:\n tags = None\n sync_terms = None\n\n # Special content type:\n tags = tags or ''\n language_code = language_code or ''\n sync_terms = '1' if sync_terms else '0'\n overwrite = '1' if overwrite else '0'\n fuzzy_trigger = '1' if fuzzy_trigger else '0'\n project_id = str(project_id)\n\n with open(file_path, 'r+b') as local_file:\n data = self._run(\n url_path=\"projects/upload\",\n id=project_id,\n language=language_code,\n file=local_file,\n updating=updating,\n tags=tags,\n sync_terms=sync_terms,\n overwrite=overwrite,\n fuzzy_trigger=fuzzy_trigger\n )\n return data['result']\n"
] | class POEditorAPI(object):
"""
Connect your software to POEditor with its simple API
Please refers to https://poeditor.com/docs/api if you have questions
"""
HOST = "https://api.poeditor.com/v2/"
SUCCESS_CODE = "success"
FILE_TYPES = ['po', 'pot', 'mo', 'xls', 'csv', 'resx', 'resw', 'android_strings',
'apple_strings', 'xliff', 'properties', 'key_value_json', 'json',
'xmb', 'xtb']
FILTER_BY = ['translated', 'untranslated', 'fuzzy', 'not_fuzzy',
'automatic', 'not_automatic', 'proofread', 'not_proofread']
UPDATING_TERMS = 'terms'
UPDATING_TERMS_TRANSLATIONS = 'terms_translations'
UPDATING_TRANSLATIONS = 'translations'
# in seconds. Upload: No more than one request every 30 seconds
MIN_UPLOAD_INTERVAL = 30
def __init__(self, api_token):
"""
All requests to the API must contain the parameter api_token.
You'll find it in My Account > API Access in your POEditor account.
"""
self.api_token = api_token
def _construct_url(self, path):
return '{}{}'.format(self.HOST, path)
def _make_request(self, url, payload, headers=None):
kwargs = {}
if payload.get('file'):
kwargs['files'] = {'file': payload.pop('file')}
response = requests.post(url, data=payload, headers=headers, **kwargs)
if response.status_code != 200:
raise POEditorException(
status='fail',
error_code=response.status_code,
message=response.reason
)
data = response.json()
if 'response' not in data:
raise POEditorException(
status='fail',
error_code=-1,
message='"response" key is not present'
)
if 'status' in data['response'] and \
data['response']['status'] != self.SUCCESS_CODE:
raise POEditorException(
error_code=data['response'].get('code'),
status=data['response']['status'],
message=data['response'].get('message')
)
return data
def _run(self, url_path, headers=None, **kwargs):
"""
Requests API
"""
url = self._construct_url(url_path)
payload = kwargs
payload.update({'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _apiv1_run(self, action, headers=None, **kwargs):
"""
Kept for backwards compatibility of this client
See "self.clear_reference_language"
"""
warnings.warn(
"POEditor API v1 is deprecated. Use POEditorAPI._run method to call API v2",
DeprecationWarning, stacklevel=2
)
url = "https://poeditor.com/api/"
payload = kwargs
payload.update({'action': action, 'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _project_formatter(self, data):
"""
Project object
"""
open_ = False if not data['open'] or data['open'] == '0' else True
public = False if not data['public'] or data['public'] == '0' else True
output = {
'created': parse_datetime(data['created']),
'id': int(data['id']),
'name': data['name'],
'open': open_,
'public': public,
}
# the detail view returns more info than the list view
# see https://poeditor.com/docs/api#projects_view
for key in ['description', 'reference_language', 'terms']:
if key in data:
output[key] = data[key]
return output
def list_projects(self):
"""
Returns the list of projects owned by user.
"""
data = self._run(
url_path="projects/list"
)
projects = data['result'].get('projects', [])
return [self._project_formatter(item) for item in projects]
def create_project(self, name, description=None):
"""
creates a new project. Returns the id of the project (if successful)
"""
description = description or ''
data = self._run(
url_path="projects/add",
name=name,
description=description
)
return data['result']['project']['id']
def update_project(self, project_id, name=None, description=None,
reference_language=None):
"""
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
"""
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id']
def delete_project(self, project_id):
"""
Deletes the project from the account.
You must be the owner of the project.
"""
self._run(
url_path="projects/delete",
id=project_id,
)
return True
def view_project_details(self, project_id):
"""
Returns project's details.
"""
data = self._run(
url_path="projects/view",
id=project_id
)
return self._project_formatter(data['result']['project'])
def list_project_languages(self, project_id):
"""
Returns project languages, percentage of translation done for each and the
datetime (UTC - ISO 8601) when the last change was made.
"""
data = self._run(
url_path="languages/list",
id=project_id
)
return data['result'].get('languages', [])
def add_language_to_project(self, project_id, language_code):
"""
Adds a new language to project
"""
self._run(
url_path="languages/add",
id=project_id,
language=language_code
)
return True
def delete_language_from_project(self, project_id, language_code):
"""
Deletes existing language from project
"""
self._run(
url_path="languages/delete",
id=project_id,
language=language_code
)
return True
def set_reference_language(self, project_id, language_code):
"""
Sets a reference language to project
"""
return self.update_project(project_id, reference_language=language_code)
def clear_reference_language(self, project_id):
"""
Clears reference language from project
Could not find how to reproduce the "clear_reference_language" v1 action with the v2 API.
Calling v2 projects/update with reference_language='' or reference_language=None did not work.
https://poeditor.com/docs/api#projects_update
"""
self._apiv1_run(
action="clear_reference_language",
id=project_id
)
return True
def view_project_terms(self, project_id, language_code=None):
"""
Returns project's terms and translations if the argument language is provided.
"""
data = self._run(
url_path="terms/list",
id=project_id,
language=language_code
)
return data['result'].get('terms', [])
def add_terms(self, project_id, data):
"""
Adds terms to project.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="terms/add",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def delete_terms(self, project_id, data):
"""
Deletes terms from project.
>>> data = [
{
"term": "one project found",
"context": ""
},
{
"term": "Show all projects",
"context": "form"
}
]
"""
data = self._run(
url_path="terms/delete",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def add_comment(self, project_id, data):
"""
Adds comments to existing terms.
>>> data = [
{
"term": "Add new list",
"context": "",
"comment": "This is a button"
},
{
"term": "one project found",
"context": "",
"comment": "Make sure you translate the plural forms"
},
{
"term": "Show all projects",
"context": "",
"comment": "This is a button"
}
]
"""
data = self._run(
url_path="terms/add_comment",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def sync_terms(self, project_id, data):
"""
Syncs your project with the array you send (terms that are not found
in the dict object will be deleted from project and the new ones
added).
Please use with caution. If wrong data is sent, existing terms and their
translations might be irreversibly lost.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="projects/sync",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_project_language(self, project_id, language_code, data, fuzzy_trigger=None):
"""
Inserts / overwrites translations.
>>> data = [
{
"term": "Projects",
"context": "project list",
"translation": {
"content": "Des projets",
"fuzzy": 0
}
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="languages/update",
id=project_id,
language=language_code,
data=json.dumps(data),
**kwargs
)
return data['result']['translations']
def export(self, project_id, language_code, file_type='po', filters=None,
tags=None, local_file=None):
"""
Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"]
"""
if file_type not in self.FILE_TYPES:
raise POEditorArgsException(
'content_type: file format {}'.format(self.FILE_TYPES))
if filters and isinstance(filters, str) and filters not in self.FILTER_BY:
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
elif filters and set(filters).difference(set(self.FILTER_BY)):
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
data = self._run(
url_path="projects/export",
id=project_id,
language=language_code,
type=file_type,
filters=filters,
tags=tags
)
# The link of the file (expires after 10 minutes).
file_url = data['result']['url']
# Download file content:
res = requests.get(file_url, stream=True)
if not local_file:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.{}'.format(file_type))
tmp_file.close()
local_file = tmp_file.name
with open(local_file, 'w+b') as po_file:
for data in res.iter_content(chunk_size=1024):
po_file.write(data)
return file_url, local_file
def _upload(self, project_id, updating, file_path, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
"""
options = [
self.UPDATING_TERMS,
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if updating not in options:
raise POEditorArgsException(
'Updating arg must be in {}'.format(options)
)
options = [
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if language_code is None and updating in options:
raise POEditorArgsException(
'Language code is required only if updating is '
'terms_translations or translations)'
)
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(
url_path="projects/upload",
id=project_id,
language=language_code,
file=local_file,
updating=updating,
tags=tags,
sync_terms=sync_terms,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
return data['result']
def update_terms(self, project_id, file_path=None, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_terms_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_terms_translations",
DeprecationWarning, stacklevel=2
)
return self.update_terms_translations(
project_id,
file_path,
language_code,
overwrite,
sync_terms,
tags,
fuzzy_trigger
)
def update_terms_translations(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_translations",
DeprecationWarning, stacklevel=2
)
return self.update_translations(
project_id,
file_path,
language_code,
overwrite,
fuzzy_trigger
)
def update_translations(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
"""
Updates translations
overwrite: set it to True if you want to overwrite definitions
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
def available_languages(self):
"""
Returns a comprehensive list of all languages supported by POEditor.
You can find it here (https://poeditor.com/docs/languages), too.
"""
data = self._run(
url_path="languages/available"
)
return data['result'].get('languages', [])
def list_contributors(self, project_id=None, language_code=None):
"""
Returns the list of contributors
"""
data = self._run(
url_path="contributors/list",
id=project_id,
language=language_code
)
return data['result'].get('contributors', [])
def add_contributor(self, project_id, name, email, language_code):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
language=language_code
)
return True
def add_administrator(self, project_id, name, email):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
admin=True
)
return True
def remove_contributor(self, project_id, email, language):
"""
Removes a contributor
"""
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True
|
sporteasy/python-poeditor | poeditor/client.py | POEditorAPI.add_comment | python | def add_comment(self, project_id, data):
data = self._run(
url_path="terms/add_comment",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms'] | Adds comments to existing terms.
>>> data = [
{
"term": "Add new list",
"context": "",
"comment": "This is a button"
},
{
"term": "one project found",
"context": "",
"comment": "Make sure you translate the plural forms"
},
{
"term": "Show all projects",
"context": "",
"comment": "This is a button"
}
] | train | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L403-L429 | [
"def _run(self, url_path, headers=None, **kwargs):\n \"\"\"\n Requests API\n \"\"\"\n url = self._construct_url(url_path)\n\n payload = kwargs\n payload.update({'api_token': self.api_token})\n\n return self._make_request(url, payload, headers)\n"
] | class POEditorAPI(object):
"""
Connect your software to POEditor with its simple API
Please refers to https://poeditor.com/docs/api if you have questions
"""
HOST = "https://api.poeditor.com/v2/"
SUCCESS_CODE = "success"
FILE_TYPES = ['po', 'pot', 'mo', 'xls', 'csv', 'resx', 'resw', 'android_strings',
'apple_strings', 'xliff', 'properties', 'key_value_json', 'json',
'xmb', 'xtb']
FILTER_BY = ['translated', 'untranslated', 'fuzzy', 'not_fuzzy',
'automatic', 'not_automatic', 'proofread', 'not_proofread']
UPDATING_TERMS = 'terms'
UPDATING_TERMS_TRANSLATIONS = 'terms_translations'
UPDATING_TRANSLATIONS = 'translations'
# in seconds. Upload: No more than one request every 30 seconds
MIN_UPLOAD_INTERVAL = 30
def __init__(self, api_token):
"""
All requests to the API must contain the parameter api_token.
You'll find it in My Account > API Access in your POEditor account.
"""
self.api_token = api_token
def _construct_url(self, path):
return '{}{}'.format(self.HOST, path)
def _make_request(self, url, payload, headers=None):
kwargs = {}
if payload.get('file'):
kwargs['files'] = {'file': payload.pop('file')}
response = requests.post(url, data=payload, headers=headers, **kwargs)
if response.status_code != 200:
raise POEditorException(
status='fail',
error_code=response.status_code,
message=response.reason
)
data = response.json()
if 'response' not in data:
raise POEditorException(
status='fail',
error_code=-1,
message='"response" key is not present'
)
if 'status' in data['response'] and \
data['response']['status'] != self.SUCCESS_CODE:
raise POEditorException(
error_code=data['response'].get('code'),
status=data['response']['status'],
message=data['response'].get('message')
)
return data
def _run(self, url_path, headers=None, **kwargs):
"""
Requests API
"""
url = self._construct_url(url_path)
payload = kwargs
payload.update({'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _apiv1_run(self, action, headers=None, **kwargs):
"""
Kept for backwards compatibility of this client
See "self.clear_reference_language"
"""
warnings.warn(
"POEditor API v1 is deprecated. Use POEditorAPI._run method to call API v2",
DeprecationWarning, stacklevel=2
)
url = "https://poeditor.com/api/"
payload = kwargs
payload.update({'action': action, 'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _project_formatter(self, data):
"""
Project object
"""
open_ = False if not data['open'] or data['open'] == '0' else True
public = False if not data['public'] or data['public'] == '0' else True
output = {
'created': parse_datetime(data['created']),
'id': int(data['id']),
'name': data['name'],
'open': open_,
'public': public,
}
# the detail view returns more info than the list view
# see https://poeditor.com/docs/api#projects_view
for key in ['description', 'reference_language', 'terms']:
if key in data:
output[key] = data[key]
return output
def list_projects(self):
"""
Returns the list of projects owned by user.
"""
data = self._run(
url_path="projects/list"
)
projects = data['result'].get('projects', [])
return [self._project_formatter(item) for item in projects]
def create_project(self, name, description=None):
"""
creates a new project. Returns the id of the project (if successful)
"""
description = description or ''
data = self._run(
url_path="projects/add",
name=name,
description=description
)
return data['result']['project']['id']
def update_project(self, project_id, name=None, description=None,
reference_language=None):
"""
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
"""
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id']
def delete_project(self, project_id):
"""
Deletes the project from the account.
You must be the owner of the project.
"""
self._run(
url_path="projects/delete",
id=project_id,
)
return True
def view_project_details(self, project_id):
"""
Returns project's details.
"""
data = self._run(
url_path="projects/view",
id=project_id
)
return self._project_formatter(data['result']['project'])
def list_project_languages(self, project_id):
"""
Returns project languages, percentage of translation done for each and the
datetime (UTC - ISO 8601) when the last change was made.
"""
data = self._run(
url_path="languages/list",
id=project_id
)
return data['result'].get('languages', [])
def add_language_to_project(self, project_id, language_code):
"""
Adds a new language to project
"""
self._run(
url_path="languages/add",
id=project_id,
language=language_code
)
return True
def delete_language_from_project(self, project_id, language_code):
"""
Deletes existing language from project
"""
self._run(
url_path="languages/delete",
id=project_id,
language=language_code
)
return True
def set_reference_language(self, project_id, language_code):
"""
Sets a reference language to project
"""
return self.update_project(project_id, reference_language=language_code)
def clear_reference_language(self, project_id):
"""
Clears reference language from project
Could not find how to reproduce the "clear_reference_language" v1 action with the v2 API.
Calling v2 projects/update with reference_language='' or reference_language=None did not work.
https://poeditor.com/docs/api#projects_update
"""
self._apiv1_run(
action="clear_reference_language",
id=project_id
)
return True
def view_project_terms(self, project_id, language_code=None):
"""
Returns project's terms and translations if the argument language is provided.
"""
data = self._run(
url_path="terms/list",
id=project_id,
language=language_code
)
return data['result'].get('terms', [])
def add_terms(self, project_id, data):
"""
Adds terms to project.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="terms/add",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_terms(self, project_id, data, fuzzy_trigger=None):
"""
Updates project terms. Lets you change the text, context, reference, plural and tags.
>>> data = [
{
"term": "Add new list",
"context": "",
"new_term": "Save list",
"new_context": "",
"reference": "\/projects",
"plural": "",
"comment": "",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Display list",
"context": "",
"new_term": "Show list",
"new_context": ""
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="terms/update",
id=project_id,
data=json.dumps(data),
**kwargs
)
return data['result']['terms']
def delete_terms(self, project_id, data):
"""
Deletes terms from project.
>>> data = [
{
"term": "one project found",
"context": ""
},
{
"term": "Show all projects",
"context": "form"
}
]
"""
data = self._run(
url_path="terms/delete",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def sync_terms(self, project_id, data):
"""
Syncs your project with the array you send (terms that are not found
in the dict object will be deleted from project and the new ones
added).
Please use with caution. If wrong data is sent, existing terms and their
translations might be irreversibly lost.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="projects/sync",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_project_language(self, project_id, language_code, data, fuzzy_trigger=None):
"""
Inserts / overwrites translations.
>>> data = [
{
"term": "Projects",
"context": "project list",
"translation": {
"content": "Des projets",
"fuzzy": 0
}
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="languages/update",
id=project_id,
language=language_code,
data=json.dumps(data),
**kwargs
)
return data['result']['translations']
def export(self, project_id, language_code, file_type='po', filters=None,
tags=None, local_file=None):
"""
Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"]
"""
if file_type not in self.FILE_TYPES:
raise POEditorArgsException(
'content_type: file format {}'.format(self.FILE_TYPES))
if filters and isinstance(filters, str) and filters not in self.FILTER_BY:
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
elif filters and set(filters).difference(set(self.FILTER_BY)):
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
data = self._run(
url_path="projects/export",
id=project_id,
language=language_code,
type=file_type,
filters=filters,
tags=tags
)
# The link of the file (expires after 10 minutes).
file_url = data['result']['url']
# Download file content:
res = requests.get(file_url, stream=True)
if not local_file:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.{}'.format(file_type))
tmp_file.close()
local_file = tmp_file.name
with open(local_file, 'w+b') as po_file:
for data in res.iter_content(chunk_size=1024):
po_file.write(data)
return file_url, local_file
def _upload(self, project_id, updating, file_path, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
"""
options = [
self.UPDATING_TERMS,
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if updating not in options:
raise POEditorArgsException(
'Updating arg must be in {}'.format(options)
)
options = [
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if language_code is None and updating in options:
raise POEditorArgsException(
'Language code is required only if updating is '
'terms_translations or translations)'
)
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(
url_path="projects/upload",
id=project_id,
language=language_code,
file=local_file,
updating=updating,
tags=tags,
sync_terms=sync_terms,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
return data['result']
def update_terms(self, project_id, file_path=None, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_terms_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_terms_translations",
DeprecationWarning, stacklevel=2
)
return self.update_terms_translations(
project_id,
file_path,
language_code,
overwrite,
sync_terms,
tags,
fuzzy_trigger
)
def update_terms_translations(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_translations",
DeprecationWarning, stacklevel=2
)
return self.update_translations(
project_id,
file_path,
language_code,
overwrite,
fuzzy_trigger
)
def update_translations(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
"""
Updates translations
overwrite: set it to True if you want to overwrite definitions
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
def available_languages(self):
"""
Returns a comprehensive list of all languages supported by POEditor.
You can find it here (https://poeditor.com/docs/languages), too.
"""
data = self._run(
url_path="languages/available"
)
return data['result'].get('languages', [])
def list_contributors(self, project_id=None, language_code=None):
"""
Returns the list of contributors
"""
data = self._run(
url_path="contributors/list",
id=project_id,
language=language_code
)
return data['result'].get('contributors', [])
def add_contributor(self, project_id, name, email, language_code):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
language=language_code
)
return True
def add_administrator(self, project_id, name, email):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
admin=True
)
return True
def remove_contributor(self, project_id, email, language):
"""
Removes a contributor
"""
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True
|
sporteasy/python-poeditor | poeditor/client.py | POEditorAPI.update_project_language | python | def update_project_language(self, project_id, language_code, data, fuzzy_trigger=None):
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="languages/update",
id=project_id,
language=language_code,
data=json.dumps(data),
**kwargs
)
return data['result']['translations'] | Inserts / overwrites translations.
>>> data = [
{
"term": "Projects",
"context": "project list",
"translation": {
"content": "Des projets",
"fuzzy": 0
}
}
] | train | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L474-L499 | [
"def _run(self, url_path, headers=None, **kwargs):\n \"\"\"\n Requests API\n \"\"\"\n url = self._construct_url(url_path)\n\n payload = kwargs\n payload.update({'api_token': self.api_token})\n\n return self._make_request(url, payload, headers)\n"
] | class POEditorAPI(object):
"""
Connect your software to POEditor with its simple API
Please refers to https://poeditor.com/docs/api if you have questions
"""
HOST = "https://api.poeditor.com/v2/"
SUCCESS_CODE = "success"
FILE_TYPES = ['po', 'pot', 'mo', 'xls', 'csv', 'resx', 'resw', 'android_strings',
'apple_strings', 'xliff', 'properties', 'key_value_json', 'json',
'xmb', 'xtb']
FILTER_BY = ['translated', 'untranslated', 'fuzzy', 'not_fuzzy',
'automatic', 'not_automatic', 'proofread', 'not_proofread']
UPDATING_TERMS = 'terms'
UPDATING_TERMS_TRANSLATIONS = 'terms_translations'
UPDATING_TRANSLATIONS = 'translations'
# in seconds. Upload: No more than one request every 30 seconds
MIN_UPLOAD_INTERVAL = 30
def __init__(self, api_token):
"""
All requests to the API must contain the parameter api_token.
You'll find it in My Account > API Access in your POEditor account.
"""
self.api_token = api_token
def _construct_url(self, path):
return '{}{}'.format(self.HOST, path)
def _make_request(self, url, payload, headers=None):
kwargs = {}
if payload.get('file'):
kwargs['files'] = {'file': payload.pop('file')}
response = requests.post(url, data=payload, headers=headers, **kwargs)
if response.status_code != 200:
raise POEditorException(
status='fail',
error_code=response.status_code,
message=response.reason
)
data = response.json()
if 'response' not in data:
raise POEditorException(
status='fail',
error_code=-1,
message='"response" key is not present'
)
if 'status' in data['response'] and \
data['response']['status'] != self.SUCCESS_CODE:
raise POEditorException(
error_code=data['response'].get('code'),
status=data['response']['status'],
message=data['response'].get('message')
)
return data
def _run(self, url_path, headers=None, **kwargs):
"""
Requests API
"""
url = self._construct_url(url_path)
payload = kwargs
payload.update({'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _apiv1_run(self, action, headers=None, **kwargs):
"""
Kept for backwards compatibility of this client
See "self.clear_reference_language"
"""
warnings.warn(
"POEditor API v1 is deprecated. Use POEditorAPI._run method to call API v2",
DeprecationWarning, stacklevel=2
)
url = "https://poeditor.com/api/"
payload = kwargs
payload.update({'action': action, 'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _project_formatter(self, data):
"""
Project object
"""
open_ = False if not data['open'] or data['open'] == '0' else True
public = False if not data['public'] or data['public'] == '0' else True
output = {
'created': parse_datetime(data['created']),
'id': int(data['id']),
'name': data['name'],
'open': open_,
'public': public,
}
# the detail view returns more info than the list view
# see https://poeditor.com/docs/api#projects_view
for key in ['description', 'reference_language', 'terms']:
if key in data:
output[key] = data[key]
return output
def list_projects(self):
"""
Returns the list of projects owned by user.
"""
data = self._run(
url_path="projects/list"
)
projects = data['result'].get('projects', [])
return [self._project_formatter(item) for item in projects]
def create_project(self, name, description=None):
"""
creates a new project. Returns the id of the project (if successful)
"""
description = description or ''
data = self._run(
url_path="projects/add",
name=name,
description=description
)
return data['result']['project']['id']
def update_project(self, project_id, name=None, description=None,
reference_language=None):
"""
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
"""
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id']
def delete_project(self, project_id):
"""
Deletes the project from the account.
You must be the owner of the project.
"""
self._run(
url_path="projects/delete",
id=project_id,
)
return True
def view_project_details(self, project_id):
"""
Returns project's details.
"""
data = self._run(
url_path="projects/view",
id=project_id
)
return self._project_formatter(data['result']['project'])
def list_project_languages(self, project_id):
"""
Returns project languages, percentage of translation done for each and the
datetime (UTC - ISO 8601) when the last change was made.
"""
data = self._run(
url_path="languages/list",
id=project_id
)
return data['result'].get('languages', [])
def add_language_to_project(self, project_id, language_code):
"""
Adds a new language to project
"""
self._run(
url_path="languages/add",
id=project_id,
language=language_code
)
return True
def delete_language_from_project(self, project_id, language_code):
"""
Deletes existing language from project
"""
self._run(
url_path="languages/delete",
id=project_id,
language=language_code
)
return True
def set_reference_language(self, project_id, language_code):
"""
Sets a reference language to project
"""
return self.update_project(project_id, reference_language=language_code)
def clear_reference_language(self, project_id):
"""
Clears reference language from project
Could not find how to reproduce the "clear_reference_language" v1 action with the v2 API.
Calling v2 projects/update with reference_language='' or reference_language=None did not work.
https://poeditor.com/docs/api#projects_update
"""
self._apiv1_run(
action="clear_reference_language",
id=project_id
)
return True
def view_project_terms(self, project_id, language_code=None):
"""
Returns project's terms and translations if the argument language is provided.
"""
data = self._run(
url_path="terms/list",
id=project_id,
language=language_code
)
return data['result'].get('terms', [])
def add_terms(self, project_id, data):
"""
Adds terms to project.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="terms/add",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_terms(self, project_id, data, fuzzy_trigger=None):
"""
Updates project terms. Lets you change the text, context, reference, plural and tags.
>>> data = [
{
"term": "Add new list",
"context": "",
"new_term": "Save list",
"new_context": "",
"reference": "\/projects",
"plural": "",
"comment": "",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Display list",
"context": "",
"new_term": "Show list",
"new_context": ""
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="terms/update",
id=project_id,
data=json.dumps(data),
**kwargs
)
return data['result']['terms']
def delete_terms(self, project_id, data):
"""
Deletes terms from project.
>>> data = [
{
"term": "one project found",
"context": ""
},
{
"term": "Show all projects",
"context": "form"
}
]
"""
data = self._run(
url_path="terms/delete",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def add_comment(self, project_id, data):
"""
Adds comments to existing terms.
>>> data = [
{
"term": "Add new list",
"context": "",
"comment": "This is a button"
},
{
"term": "one project found",
"context": "",
"comment": "Make sure you translate the plural forms"
},
{
"term": "Show all projects",
"context": "",
"comment": "This is a button"
}
]
"""
data = self._run(
url_path="terms/add_comment",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def sync_terms(self, project_id, data):
"""
Syncs your project with the array you send (terms that are not found
in the dict object will be deleted from project and the new ones
added).
Please use with caution. If wrong data is sent, existing terms and their
translations might be irreversibly lost.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="projects/sync",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def export(self, project_id, language_code, file_type='po', filters=None,
tags=None, local_file=None):
"""
Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"]
"""
if file_type not in self.FILE_TYPES:
raise POEditorArgsException(
'content_type: file format {}'.format(self.FILE_TYPES))
if filters and isinstance(filters, str) and filters not in self.FILTER_BY:
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
elif filters and set(filters).difference(set(self.FILTER_BY)):
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
data = self._run(
url_path="projects/export",
id=project_id,
language=language_code,
type=file_type,
filters=filters,
tags=tags
)
# The link of the file (expires after 10 minutes).
file_url = data['result']['url']
# Download file content:
res = requests.get(file_url, stream=True)
if not local_file:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.{}'.format(file_type))
tmp_file.close()
local_file = tmp_file.name
with open(local_file, 'w+b') as po_file:
for data in res.iter_content(chunk_size=1024):
po_file.write(data)
return file_url, local_file
def _upload(self, project_id, updating, file_path, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
"""
options = [
self.UPDATING_TERMS,
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if updating not in options:
raise POEditorArgsException(
'Updating arg must be in {}'.format(options)
)
options = [
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if language_code is None and updating in options:
raise POEditorArgsException(
'Language code is required only if updating is '
'terms_translations or translations)'
)
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(
url_path="projects/upload",
id=project_id,
language=language_code,
file=local_file,
updating=updating,
tags=tags,
sync_terms=sync_terms,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
return data['result']
def update_terms(self, project_id, file_path=None, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_terms_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_terms_translations",
DeprecationWarning, stacklevel=2
)
return self.update_terms_translations(
project_id,
file_path,
language_code,
overwrite,
sync_terms,
tags,
fuzzy_trigger
)
def update_terms_translations(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_translations",
DeprecationWarning, stacklevel=2
)
return self.update_translations(
project_id,
file_path,
language_code,
overwrite,
fuzzy_trigger
)
def update_translations(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
"""
Updates translations
overwrite: set it to True if you want to overwrite definitions
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
def available_languages(self):
"""
Returns a comprehensive list of all languages supported by POEditor.
You can find it here (https://poeditor.com/docs/languages), too.
"""
data = self._run(
url_path="languages/available"
)
return data['result'].get('languages', [])
def list_contributors(self, project_id=None, language_code=None):
"""
Returns the list of contributors
"""
data = self._run(
url_path="contributors/list",
id=project_id,
language=language_code
)
return data['result'].get('contributors', [])
def add_contributor(self, project_id, name, email, language_code):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
language=language_code
)
return True
def add_administrator(self, project_id, name, email):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
admin=True
)
return True
def remove_contributor(self, project_id, email, language):
"""
Removes a contributor
"""
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True
|
sporteasy/python-poeditor | poeditor/client.py | POEditorAPI.export | python | def export(self, project_id, language_code, file_type='po', filters=None,
tags=None, local_file=None):
if file_type not in self.FILE_TYPES:
raise POEditorArgsException(
'content_type: file format {}'.format(self.FILE_TYPES))
if filters and isinstance(filters, str) and filters not in self.FILTER_BY:
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
elif filters and set(filters).difference(set(self.FILTER_BY)):
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
data = self._run(
url_path="projects/export",
id=project_id,
language=language_code,
type=file_type,
filters=filters,
tags=tags
)
# The link of the file (expires after 10 minutes).
file_url = data['result']['url']
# Download file content:
res = requests.get(file_url, stream=True)
if not local_file:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.{}'.format(file_type))
tmp_file.close()
local_file = tmp_file.name
with open(local_file, 'w+b') as po_file:
for data in res.iter_content(chunk_size=1024):
po_file.write(data)
return file_url, local_file | Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"] | train | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L501-L552 | [
"def _run(self, url_path, headers=None, **kwargs):\n \"\"\"\n Requests API\n \"\"\"\n url = self._construct_url(url_path)\n\n payload = kwargs\n payload.update({'api_token': self.api_token})\n\n return self._make_request(url, payload, headers)\n"
] | class POEditorAPI(object):
"""
Connect your software to POEditor with its simple API
Please refers to https://poeditor.com/docs/api if you have questions
"""
HOST = "https://api.poeditor.com/v2/"
SUCCESS_CODE = "success"
FILE_TYPES = ['po', 'pot', 'mo', 'xls', 'csv', 'resx', 'resw', 'android_strings',
'apple_strings', 'xliff', 'properties', 'key_value_json', 'json',
'xmb', 'xtb']
FILTER_BY = ['translated', 'untranslated', 'fuzzy', 'not_fuzzy',
'automatic', 'not_automatic', 'proofread', 'not_proofread']
UPDATING_TERMS = 'terms'
UPDATING_TERMS_TRANSLATIONS = 'terms_translations'
UPDATING_TRANSLATIONS = 'translations'
# in seconds. Upload: No more than one request every 30 seconds
MIN_UPLOAD_INTERVAL = 30
def __init__(self, api_token):
"""
All requests to the API must contain the parameter api_token.
You'll find it in My Account > API Access in your POEditor account.
"""
self.api_token = api_token
def _construct_url(self, path):
return '{}{}'.format(self.HOST, path)
def _make_request(self, url, payload, headers=None):
kwargs = {}
if payload.get('file'):
kwargs['files'] = {'file': payload.pop('file')}
response = requests.post(url, data=payload, headers=headers, **kwargs)
if response.status_code != 200:
raise POEditorException(
status='fail',
error_code=response.status_code,
message=response.reason
)
data = response.json()
if 'response' not in data:
raise POEditorException(
status='fail',
error_code=-1,
message='"response" key is not present'
)
if 'status' in data['response'] and \
data['response']['status'] != self.SUCCESS_CODE:
raise POEditorException(
error_code=data['response'].get('code'),
status=data['response']['status'],
message=data['response'].get('message')
)
return data
def _run(self, url_path, headers=None, **kwargs):
"""
Requests API
"""
url = self._construct_url(url_path)
payload = kwargs
payload.update({'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _apiv1_run(self, action, headers=None, **kwargs):
"""
Kept for backwards compatibility of this client
See "self.clear_reference_language"
"""
warnings.warn(
"POEditor API v1 is deprecated. Use POEditorAPI._run method to call API v2",
DeprecationWarning, stacklevel=2
)
url = "https://poeditor.com/api/"
payload = kwargs
payload.update({'action': action, 'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _project_formatter(self, data):
"""
Project object
"""
open_ = False if not data['open'] or data['open'] == '0' else True
public = False if not data['public'] or data['public'] == '0' else True
output = {
'created': parse_datetime(data['created']),
'id': int(data['id']),
'name': data['name'],
'open': open_,
'public': public,
}
# the detail view returns more info than the list view
# see https://poeditor.com/docs/api#projects_view
for key in ['description', 'reference_language', 'terms']:
if key in data:
output[key] = data[key]
return output
def list_projects(self):
"""
Returns the list of projects owned by user.
"""
data = self._run(
url_path="projects/list"
)
projects = data['result'].get('projects', [])
return [self._project_formatter(item) for item in projects]
def create_project(self, name, description=None):
"""
creates a new project. Returns the id of the project (if successful)
"""
description = description or ''
data = self._run(
url_path="projects/add",
name=name,
description=description
)
return data['result']['project']['id']
def update_project(self, project_id, name=None, description=None,
reference_language=None):
"""
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
"""
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id']
def delete_project(self, project_id):
"""
Deletes the project from the account.
You must be the owner of the project.
"""
self._run(
url_path="projects/delete",
id=project_id,
)
return True
def view_project_details(self, project_id):
"""
Returns project's details.
"""
data = self._run(
url_path="projects/view",
id=project_id
)
return self._project_formatter(data['result']['project'])
def list_project_languages(self, project_id):
"""
Returns project languages, percentage of translation done for each and the
datetime (UTC - ISO 8601) when the last change was made.
"""
data = self._run(
url_path="languages/list",
id=project_id
)
return data['result'].get('languages', [])
def add_language_to_project(self, project_id, language_code):
"""
Adds a new language to project
"""
self._run(
url_path="languages/add",
id=project_id,
language=language_code
)
return True
def delete_language_from_project(self, project_id, language_code):
"""
Deletes existing language from project
"""
self._run(
url_path="languages/delete",
id=project_id,
language=language_code
)
return True
def set_reference_language(self, project_id, language_code):
"""
Sets a reference language to project
"""
return self.update_project(project_id, reference_language=language_code)
def clear_reference_language(self, project_id):
"""
Clears reference language from project
Could not find how to reproduce the "clear_reference_language" v1 action with the v2 API.
Calling v2 projects/update with reference_language='' or reference_language=None did not work.
https://poeditor.com/docs/api#projects_update
"""
self._apiv1_run(
action="clear_reference_language",
id=project_id
)
return True
def view_project_terms(self, project_id, language_code=None):
"""
Returns project's terms and translations if the argument language is provided.
"""
data = self._run(
url_path="terms/list",
id=project_id,
language=language_code
)
return data['result'].get('terms', [])
def add_terms(self, project_id, data):
"""
Adds terms to project.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="terms/add",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_terms(self, project_id, data, fuzzy_trigger=None):
"""
Updates project terms. Lets you change the text, context, reference, plural and tags.
>>> data = [
{
"term": "Add new list",
"context": "",
"new_term": "Save list",
"new_context": "",
"reference": "\/projects",
"plural": "",
"comment": "",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Display list",
"context": "",
"new_term": "Show list",
"new_context": ""
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="terms/update",
id=project_id,
data=json.dumps(data),
**kwargs
)
return data['result']['terms']
def delete_terms(self, project_id, data):
"""
Deletes terms from project.
>>> data = [
{
"term": "one project found",
"context": ""
},
{
"term": "Show all projects",
"context": "form"
}
]
"""
data = self._run(
url_path="terms/delete",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def add_comment(self, project_id, data):
"""
Adds comments to existing terms.
>>> data = [
{
"term": "Add new list",
"context": "",
"comment": "This is a button"
},
{
"term": "one project found",
"context": "",
"comment": "Make sure you translate the plural forms"
},
{
"term": "Show all projects",
"context": "",
"comment": "This is a button"
}
]
"""
data = self._run(
url_path="terms/add_comment",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def sync_terms(self, project_id, data):
"""
Syncs your project with the array you send (terms that are not found
in the dict object will be deleted from project and the new ones
added).
Please use with caution. If wrong data is sent, existing terms and their
translations might be irreversibly lost.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="projects/sync",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_project_language(self, project_id, language_code, data, fuzzy_trigger=None):
"""
Inserts / overwrites translations.
>>> data = [
{
"term": "Projects",
"context": "project list",
"translation": {
"content": "Des projets",
"fuzzy": 0
}
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="languages/update",
id=project_id,
language=language_code,
data=json.dumps(data),
**kwargs
)
return data['result']['translations']
def _upload(self, project_id, updating, file_path, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
"""
options = [
self.UPDATING_TERMS,
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if updating not in options:
raise POEditorArgsException(
'Updating arg must be in {}'.format(options)
)
options = [
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if language_code is None and updating in options:
raise POEditorArgsException(
'Language code is required only if updating is '
'terms_translations or translations)'
)
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(
url_path="projects/upload",
id=project_id,
language=language_code,
file=local_file,
updating=updating,
tags=tags,
sync_terms=sync_terms,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
return data['result']
def update_terms(self, project_id, file_path=None, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_terms_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_terms_translations",
DeprecationWarning, stacklevel=2
)
return self.update_terms_translations(
project_id,
file_path,
language_code,
overwrite,
sync_terms,
tags,
fuzzy_trigger
)
def update_terms_translations(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_translations",
DeprecationWarning, stacklevel=2
)
return self.update_translations(
project_id,
file_path,
language_code,
overwrite,
fuzzy_trigger
)
def update_translations(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
"""
Updates translations
overwrite: set it to True if you want to overwrite definitions
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
def available_languages(self):
"""
Returns a comprehensive list of all languages supported by POEditor.
You can find it here (https://poeditor.com/docs/languages), too.
"""
data = self._run(
url_path="languages/available"
)
return data['result'].get('languages', [])
def list_contributors(self, project_id=None, language_code=None):
"""
Returns the list of contributors
"""
data = self._run(
url_path="contributors/list",
id=project_id,
language=language_code
)
return data['result'].get('contributors', [])
def add_contributor(self, project_id, name, email, language_code):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
language=language_code
)
return True
def add_administrator(self, project_id, name, email):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
admin=True
)
return True
def remove_contributor(self, project_id, email, language):
"""
Removes a contributor
"""
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True
|
sporteasy/python-poeditor | poeditor/client.py | POEditorAPI._upload | python | def _upload(self, project_id, updating, file_path, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
options = [
self.UPDATING_TERMS,
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if updating not in options:
raise POEditorArgsException(
'Updating arg must be in {}'.format(options)
)
options = [
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if language_code is None and updating in options:
raise POEditorArgsException(
'Language code is required only if updating is '
'terms_translations or translations)'
)
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(
url_path="projects/upload",
id=project_id,
language=language_code,
file=local_file,
updating=updating,
tags=tags,
sync_terms=sync_terms,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
return data['result'] | Internal: updates terms / translations
File uploads are limited to one every 30 seconds | train | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L554-L605 | [
"def _run(self, url_path, headers=None, **kwargs):\n \"\"\"\n Requests API\n \"\"\"\n url = self._construct_url(url_path)\n\n payload = kwargs\n payload.update({'api_token': self.api_token})\n\n return self._make_request(url, payload, headers)\n"
] | class POEditorAPI(object):
"""
Connect your software to POEditor with its simple API
Please refers to https://poeditor.com/docs/api if you have questions
"""
HOST = "https://api.poeditor.com/v2/"
SUCCESS_CODE = "success"
FILE_TYPES = ['po', 'pot', 'mo', 'xls', 'csv', 'resx', 'resw', 'android_strings',
'apple_strings', 'xliff', 'properties', 'key_value_json', 'json',
'xmb', 'xtb']
FILTER_BY = ['translated', 'untranslated', 'fuzzy', 'not_fuzzy',
'automatic', 'not_automatic', 'proofread', 'not_proofread']
UPDATING_TERMS = 'terms'
UPDATING_TERMS_TRANSLATIONS = 'terms_translations'
UPDATING_TRANSLATIONS = 'translations'
# in seconds. Upload: No more than one request every 30 seconds
MIN_UPLOAD_INTERVAL = 30
def __init__(self, api_token):
"""
All requests to the API must contain the parameter api_token.
You'll find it in My Account > API Access in your POEditor account.
"""
self.api_token = api_token
def _construct_url(self, path):
return '{}{}'.format(self.HOST, path)
def _make_request(self, url, payload, headers=None):
kwargs = {}
if payload.get('file'):
kwargs['files'] = {'file': payload.pop('file')}
response = requests.post(url, data=payload, headers=headers, **kwargs)
if response.status_code != 200:
raise POEditorException(
status='fail',
error_code=response.status_code,
message=response.reason
)
data = response.json()
if 'response' not in data:
raise POEditorException(
status='fail',
error_code=-1,
message='"response" key is not present'
)
if 'status' in data['response'] and \
data['response']['status'] != self.SUCCESS_CODE:
raise POEditorException(
error_code=data['response'].get('code'),
status=data['response']['status'],
message=data['response'].get('message')
)
return data
def _run(self, url_path, headers=None, **kwargs):
"""
Requests API
"""
url = self._construct_url(url_path)
payload = kwargs
payload.update({'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _apiv1_run(self, action, headers=None, **kwargs):
"""
Kept for backwards compatibility of this client
See "self.clear_reference_language"
"""
warnings.warn(
"POEditor API v1 is deprecated. Use POEditorAPI._run method to call API v2",
DeprecationWarning, stacklevel=2
)
url = "https://poeditor.com/api/"
payload = kwargs
payload.update({'action': action, 'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _project_formatter(self, data):
"""
Project object
"""
open_ = False if not data['open'] or data['open'] == '0' else True
public = False if not data['public'] or data['public'] == '0' else True
output = {
'created': parse_datetime(data['created']),
'id': int(data['id']),
'name': data['name'],
'open': open_,
'public': public,
}
# the detail view returns more info than the list view
# see https://poeditor.com/docs/api#projects_view
for key in ['description', 'reference_language', 'terms']:
if key in data:
output[key] = data[key]
return output
def list_projects(self):
"""
Returns the list of projects owned by user.
"""
data = self._run(
url_path="projects/list"
)
projects = data['result'].get('projects', [])
return [self._project_formatter(item) for item in projects]
def create_project(self, name, description=None):
"""
creates a new project. Returns the id of the project (if successful)
"""
description = description or ''
data = self._run(
url_path="projects/add",
name=name,
description=description
)
return data['result']['project']['id']
def update_project(self, project_id, name=None, description=None,
reference_language=None):
"""
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
"""
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id']
def delete_project(self, project_id):
"""
Deletes the project from the account.
You must be the owner of the project.
"""
self._run(
url_path="projects/delete",
id=project_id,
)
return True
def view_project_details(self, project_id):
"""
Returns project's details.
"""
data = self._run(
url_path="projects/view",
id=project_id
)
return self._project_formatter(data['result']['project'])
def list_project_languages(self, project_id):
"""
Returns project languages, percentage of translation done for each and the
datetime (UTC - ISO 8601) when the last change was made.
"""
data = self._run(
url_path="languages/list",
id=project_id
)
return data['result'].get('languages', [])
def add_language_to_project(self, project_id, language_code):
"""
Adds a new language to project
"""
self._run(
url_path="languages/add",
id=project_id,
language=language_code
)
return True
def delete_language_from_project(self, project_id, language_code):
"""
Deletes existing language from project
"""
self._run(
url_path="languages/delete",
id=project_id,
language=language_code
)
return True
def set_reference_language(self, project_id, language_code):
"""
Sets a reference language to project
"""
return self.update_project(project_id, reference_language=language_code)
def clear_reference_language(self, project_id):
"""
Clears reference language from project
Could not find how to reproduce the "clear_reference_language" v1 action with the v2 API.
Calling v2 projects/update with reference_language='' or reference_language=None did not work.
https://poeditor.com/docs/api#projects_update
"""
self._apiv1_run(
action="clear_reference_language",
id=project_id
)
return True
def view_project_terms(self, project_id, language_code=None):
"""
Returns project's terms and translations if the argument language is provided.
"""
data = self._run(
url_path="terms/list",
id=project_id,
language=language_code
)
return data['result'].get('terms', [])
def add_terms(self, project_id, data):
"""
Adds terms to project.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="terms/add",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_terms(self, project_id, data, fuzzy_trigger=None):
"""
Updates project terms. Lets you change the text, context, reference, plural and tags.
>>> data = [
{
"term": "Add new list",
"context": "",
"new_term": "Save list",
"new_context": "",
"reference": "\/projects",
"plural": "",
"comment": "",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Display list",
"context": "",
"new_term": "Show list",
"new_context": ""
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="terms/update",
id=project_id,
data=json.dumps(data),
**kwargs
)
return data['result']['terms']
def delete_terms(self, project_id, data):
"""
Deletes terms from project.
>>> data = [
{
"term": "one project found",
"context": ""
},
{
"term": "Show all projects",
"context": "form"
}
]
"""
data = self._run(
url_path="terms/delete",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def add_comment(self, project_id, data):
"""
Adds comments to existing terms.
>>> data = [
{
"term": "Add new list",
"context": "",
"comment": "This is a button"
},
{
"term": "one project found",
"context": "",
"comment": "Make sure you translate the plural forms"
},
{
"term": "Show all projects",
"context": "",
"comment": "This is a button"
}
]
"""
data = self._run(
url_path="terms/add_comment",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def sync_terms(self, project_id, data):
"""
Syncs your project with the array you send (terms that are not found
in the dict object will be deleted from project and the new ones
added).
Please use with caution. If wrong data is sent, existing terms and their
translations might be irreversibly lost.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="projects/sync",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_project_language(self, project_id, language_code, data, fuzzy_trigger=None):
"""
Inserts / overwrites translations.
>>> data = [
{
"term": "Projects",
"context": "project list",
"translation": {
"content": "Des projets",
"fuzzy": 0
}
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="languages/update",
id=project_id,
language=language_code,
data=json.dumps(data),
**kwargs
)
return data['result']['translations']
def export(self, project_id, language_code, file_type='po', filters=None,
tags=None, local_file=None):
"""
Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"]
"""
if file_type not in self.FILE_TYPES:
raise POEditorArgsException(
'content_type: file format {}'.format(self.FILE_TYPES))
if filters and isinstance(filters, str) and filters not in self.FILTER_BY:
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
elif filters and set(filters).difference(set(self.FILTER_BY)):
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
data = self._run(
url_path="projects/export",
id=project_id,
language=language_code,
type=file_type,
filters=filters,
tags=tags
)
# The link of the file (expires after 10 minutes).
file_url = data['result']['url']
# Download file content:
res = requests.get(file_url, stream=True)
if not local_file:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.{}'.format(file_type))
tmp_file.close()
local_file = tmp_file.name
with open(local_file, 'w+b') as po_file:
for data in res.iter_content(chunk_size=1024):
po_file.write(data)
return file_url, local_file
def update_terms(self, project_id, file_path=None, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_terms_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_terms_translations",
DeprecationWarning, stacklevel=2
)
return self.update_terms_translations(
project_id,
file_path,
language_code,
overwrite,
sync_terms,
tags,
fuzzy_trigger
)
def update_terms_translations(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_translations",
DeprecationWarning, stacklevel=2
)
return self.update_translations(
project_id,
file_path,
language_code,
overwrite,
fuzzy_trigger
)
def update_translations(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
"""
Updates translations
overwrite: set it to True if you want to overwrite definitions
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
def available_languages(self):
"""
Returns a comprehensive list of all languages supported by POEditor.
You can find it here (https://poeditor.com/docs/languages), too.
"""
data = self._run(
url_path="languages/available"
)
return data['result'].get('languages', [])
def list_contributors(self, project_id=None, language_code=None):
"""
Returns the list of contributors
"""
data = self._run(
url_path="contributors/list",
id=project_id,
language=language_code
)
return data['result'].get('contributors', [])
def add_contributor(self, project_id, name, email, language_code):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
language=language_code
)
return True
def add_administrator(self, project_id, name, email):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
admin=True
)
return True
def remove_contributor(self, project_id, email, language):
"""
Removes a contributor
"""
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True
|
sporteasy/python-poeditor | poeditor/client.py | POEditorAPI.update_terms | python | def update_terms(self, project_id, file_path=None, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
) | Updates terms
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values | train | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L607-L633 | [
"def _run(self, url_path, headers=None, **kwargs):\n \"\"\"\n Requests API\n \"\"\"\n url = self._construct_url(url_path)\n\n payload = kwargs\n payload.update({'api_token': self.api_token})\n\n return self._make_request(url, payload, headers)\n",
"def _upload(self, project_id, updating, file_path, language_code=None,\n overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):\n \"\"\"\n Internal: updates terms / translations\n\n File uploads are limited to one every 30 seconds\n \"\"\"\n options = [\n self.UPDATING_TERMS,\n self.UPDATING_TERMS_TRANSLATIONS,\n self.UPDATING_TRANSLATIONS\n ]\n if updating not in options:\n raise POEditorArgsException(\n 'Updating arg must be in {}'.format(options)\n )\n\n options = [\n self.UPDATING_TERMS_TRANSLATIONS,\n self.UPDATING_TRANSLATIONS\n ]\n if language_code is None and updating in options:\n raise POEditorArgsException(\n 'Language code is required only if updating is '\n 'terms_translations or translations)'\n )\n\n if updating == self.UPDATING_TRANSLATIONS:\n tags = None\n sync_terms = None\n\n # Special content type:\n tags = tags or ''\n language_code = language_code or ''\n sync_terms = '1' if sync_terms else '0'\n overwrite = '1' if overwrite else '0'\n fuzzy_trigger = '1' if fuzzy_trigger else '0'\n project_id = str(project_id)\n\n with open(file_path, 'r+b') as local_file:\n data = self._run(\n url_path=\"projects/upload\",\n id=project_id,\n language=language_code,\n file=local_file,\n updating=updating,\n tags=tags,\n sync_terms=sync_terms,\n overwrite=overwrite,\n fuzzy_trigger=fuzzy_trigger\n )\n return data['result']\n"
] | class POEditorAPI(object):
"""
Connect your software to POEditor with its simple API
Please refers to https://poeditor.com/docs/api if you have questions
"""
HOST = "https://api.poeditor.com/v2/"
SUCCESS_CODE = "success"
FILE_TYPES = ['po', 'pot', 'mo', 'xls', 'csv', 'resx', 'resw', 'android_strings',
'apple_strings', 'xliff', 'properties', 'key_value_json', 'json',
'xmb', 'xtb']
FILTER_BY = ['translated', 'untranslated', 'fuzzy', 'not_fuzzy',
'automatic', 'not_automatic', 'proofread', 'not_proofread']
UPDATING_TERMS = 'terms'
UPDATING_TERMS_TRANSLATIONS = 'terms_translations'
UPDATING_TRANSLATIONS = 'translations'
# in seconds. Upload: No more than one request every 30 seconds
MIN_UPLOAD_INTERVAL = 30
def __init__(self, api_token):
"""
All requests to the API must contain the parameter api_token.
You'll find it in My Account > API Access in your POEditor account.
"""
self.api_token = api_token
def _construct_url(self, path):
return '{}{}'.format(self.HOST, path)
def _make_request(self, url, payload, headers=None):
kwargs = {}
if payload.get('file'):
kwargs['files'] = {'file': payload.pop('file')}
response = requests.post(url, data=payload, headers=headers, **kwargs)
if response.status_code != 200:
raise POEditorException(
status='fail',
error_code=response.status_code,
message=response.reason
)
data = response.json()
if 'response' not in data:
raise POEditorException(
status='fail',
error_code=-1,
message='"response" key is not present'
)
if 'status' in data['response'] and \
data['response']['status'] != self.SUCCESS_CODE:
raise POEditorException(
error_code=data['response'].get('code'),
status=data['response']['status'],
message=data['response'].get('message')
)
return data
def _run(self, url_path, headers=None, **kwargs):
"""
Requests API
"""
url = self._construct_url(url_path)
payload = kwargs
payload.update({'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _apiv1_run(self, action, headers=None, **kwargs):
"""
Kept for backwards compatibility of this client
See "self.clear_reference_language"
"""
warnings.warn(
"POEditor API v1 is deprecated. Use POEditorAPI._run method to call API v2",
DeprecationWarning, stacklevel=2
)
url = "https://poeditor.com/api/"
payload = kwargs
payload.update({'action': action, 'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _project_formatter(self, data):
"""
Project object
"""
open_ = False if not data['open'] or data['open'] == '0' else True
public = False if not data['public'] or data['public'] == '0' else True
output = {
'created': parse_datetime(data['created']),
'id': int(data['id']),
'name': data['name'],
'open': open_,
'public': public,
}
# the detail view returns more info than the list view
# see https://poeditor.com/docs/api#projects_view
for key in ['description', 'reference_language', 'terms']:
if key in data:
output[key] = data[key]
return output
def list_projects(self):
"""
Returns the list of projects owned by user.
"""
data = self._run(
url_path="projects/list"
)
projects = data['result'].get('projects', [])
return [self._project_formatter(item) for item in projects]
def create_project(self, name, description=None):
"""
creates a new project. Returns the id of the project (if successful)
"""
description = description or ''
data = self._run(
url_path="projects/add",
name=name,
description=description
)
return data['result']['project']['id']
def update_project(self, project_id, name=None, description=None,
reference_language=None):
"""
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
"""
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id']
def delete_project(self, project_id):
"""
Deletes the project from the account.
You must be the owner of the project.
"""
self._run(
url_path="projects/delete",
id=project_id,
)
return True
def view_project_details(self, project_id):
"""
Returns project's details.
"""
data = self._run(
url_path="projects/view",
id=project_id
)
return self._project_formatter(data['result']['project'])
def list_project_languages(self, project_id):
"""
Returns project languages, percentage of translation done for each and the
datetime (UTC - ISO 8601) when the last change was made.
"""
data = self._run(
url_path="languages/list",
id=project_id
)
return data['result'].get('languages', [])
def add_language_to_project(self, project_id, language_code):
"""
Adds a new language to project
"""
self._run(
url_path="languages/add",
id=project_id,
language=language_code
)
return True
def delete_language_from_project(self, project_id, language_code):
"""
Deletes existing language from project
"""
self._run(
url_path="languages/delete",
id=project_id,
language=language_code
)
return True
def set_reference_language(self, project_id, language_code):
"""
Sets a reference language to project
"""
return self.update_project(project_id, reference_language=language_code)
def clear_reference_language(self, project_id):
"""
Clears reference language from project
Could not find how to reproduce the "clear_reference_language" v1 action with the v2 API.
Calling v2 projects/update with reference_language='' or reference_language=None did not work.
https://poeditor.com/docs/api#projects_update
"""
self._apiv1_run(
action="clear_reference_language",
id=project_id
)
return True
def view_project_terms(self, project_id, language_code=None):
"""
Returns project's terms and translations if the argument language is provided.
"""
data = self._run(
url_path="terms/list",
id=project_id,
language=language_code
)
return data['result'].get('terms', [])
def add_terms(self, project_id, data):
"""
Adds terms to project.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="terms/add",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_terms(self, project_id, data, fuzzy_trigger=None):
"""
Updates project terms. Lets you change the text, context, reference, plural and tags.
>>> data = [
{
"term": "Add new list",
"context": "",
"new_term": "Save list",
"new_context": "",
"reference": "\/projects",
"plural": "",
"comment": "",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Display list",
"context": "",
"new_term": "Show list",
"new_context": ""
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="terms/update",
id=project_id,
data=json.dumps(data),
**kwargs
)
return data['result']['terms']
def delete_terms(self, project_id, data):
"""
Deletes terms from project.
>>> data = [
{
"term": "one project found",
"context": ""
},
{
"term": "Show all projects",
"context": "form"
}
]
"""
data = self._run(
url_path="terms/delete",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def add_comment(self, project_id, data):
"""
Adds comments to existing terms.
>>> data = [
{
"term": "Add new list",
"context": "",
"comment": "This is a button"
},
{
"term": "one project found",
"context": "",
"comment": "Make sure you translate the plural forms"
},
{
"term": "Show all projects",
"context": "",
"comment": "This is a button"
}
]
"""
data = self._run(
url_path="terms/add_comment",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def sync_terms(self, project_id, data):
"""
Syncs your project with the array you send (terms that are not found
in the dict object will be deleted from project and the new ones
added).
Please use with caution. If wrong data is sent, existing terms and their
translations might be irreversibly lost.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="projects/sync",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_project_language(self, project_id, language_code, data, fuzzy_trigger=None):
"""
Inserts / overwrites translations.
>>> data = [
{
"term": "Projects",
"context": "project list",
"translation": {
"content": "Des projets",
"fuzzy": 0
}
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="languages/update",
id=project_id,
language=language_code,
data=json.dumps(data),
**kwargs
)
return data['result']['translations']
def export(self, project_id, language_code, file_type='po', filters=None,
tags=None, local_file=None):
"""
Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"]
"""
if file_type not in self.FILE_TYPES:
raise POEditorArgsException(
'content_type: file format {}'.format(self.FILE_TYPES))
if filters and isinstance(filters, str) and filters not in self.FILTER_BY:
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
elif filters and set(filters).difference(set(self.FILTER_BY)):
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
data = self._run(
url_path="projects/export",
id=project_id,
language=language_code,
type=file_type,
filters=filters,
tags=tags
)
# The link of the file (expires after 10 minutes).
file_url = data['result']['url']
# Download file content:
res = requests.get(file_url, stream=True)
if not local_file:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.{}'.format(file_type))
tmp_file.close()
local_file = tmp_file.name
with open(local_file, 'w+b') as po_file:
for data in res.iter_content(chunk_size=1024):
po_file.write(data)
return file_url, local_file
def _upload(self, project_id, updating, file_path, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
"""
options = [
self.UPDATING_TERMS,
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if updating not in options:
raise POEditorArgsException(
'Updating arg must be in {}'.format(options)
)
options = [
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if language_code is None and updating in options:
raise POEditorArgsException(
'Language code is required only if updating is '
'terms_translations or translations)'
)
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(
url_path="projects/upload",
id=project_id,
language=language_code,
file=local_file,
updating=updating,
tags=tags,
sync_terms=sync_terms,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
return data['result']
def update_terms_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_terms_translations",
DeprecationWarning, stacklevel=2
)
return self.update_terms_translations(
project_id,
file_path,
language_code,
overwrite,
sync_terms,
tags,
fuzzy_trigger
)
def update_terms_translations(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_translations",
DeprecationWarning, stacklevel=2
)
return self.update_translations(
project_id,
file_path,
language_code,
overwrite,
fuzzy_trigger
)
def update_translations(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
"""
Updates translations
overwrite: set it to True if you want to overwrite definitions
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
def available_languages(self):
"""
Returns a comprehensive list of all languages supported by POEditor.
You can find it here (https://poeditor.com/docs/languages), too.
"""
data = self._run(
url_path="languages/available"
)
return data['result'].get('languages', [])
def list_contributors(self, project_id=None, language_code=None):
"""
Returns the list of contributors
"""
data = self._run(
url_path="contributors/list",
id=project_id,
language=language_code
)
return data['result'].get('contributors', [])
def add_contributor(self, project_id, name, email, language_code):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
language=language_code
)
return True
def add_administrator(self, project_id, name, email):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
admin=True
)
return True
def remove_contributor(self, project_id, email, language):
"""
Removes a contributor
"""
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True
|
sporteasy/python-poeditor | poeditor/client.py | POEditorAPI.update_terms_translations | python | def update_terms_translations(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
) | Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values | train | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L652-L679 | [
"def _upload(self, project_id, updating, file_path, language_code=None,\n overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):\n \"\"\"\n Internal: updates terms / translations\n\n File uploads are limited to one every 30 seconds\n \"\"\"\n options = [\n self.UPDATING_TERMS,\n self.UPDATING_TERMS_TRANSLATIONS,\n self.UPDATING_TRANSLATIONS\n ]\n if updating not in options:\n raise POEditorArgsException(\n 'Updating arg must be in {}'.format(options)\n )\n\n options = [\n self.UPDATING_TERMS_TRANSLATIONS,\n self.UPDATING_TRANSLATIONS\n ]\n if language_code is None and updating in options:\n raise POEditorArgsException(\n 'Language code is required only if updating is '\n 'terms_translations or translations)'\n )\n\n if updating == self.UPDATING_TRANSLATIONS:\n tags = None\n sync_terms = None\n\n # Special content type:\n tags = tags or ''\n language_code = language_code or ''\n sync_terms = '1' if sync_terms else '0'\n overwrite = '1' if overwrite else '0'\n fuzzy_trigger = '1' if fuzzy_trigger else '0'\n project_id = str(project_id)\n\n with open(file_path, 'r+b') as local_file:\n data = self._run(\n url_path=\"projects/upload\",\n id=project_id,\n language=language_code,\n file=local_file,\n updating=updating,\n tags=tags,\n sync_terms=sync_terms,\n overwrite=overwrite,\n fuzzy_trigger=fuzzy_trigger\n )\n return data['result']\n"
] | class POEditorAPI(object):
"""
Connect your software to POEditor with its simple API
Please refers to https://poeditor.com/docs/api if you have questions
"""
HOST = "https://api.poeditor.com/v2/"
SUCCESS_CODE = "success"
FILE_TYPES = ['po', 'pot', 'mo', 'xls', 'csv', 'resx', 'resw', 'android_strings',
'apple_strings', 'xliff', 'properties', 'key_value_json', 'json',
'xmb', 'xtb']
FILTER_BY = ['translated', 'untranslated', 'fuzzy', 'not_fuzzy',
'automatic', 'not_automatic', 'proofread', 'not_proofread']
UPDATING_TERMS = 'terms'
UPDATING_TERMS_TRANSLATIONS = 'terms_translations'
UPDATING_TRANSLATIONS = 'translations'
# in seconds. Upload: No more than one request every 30 seconds
MIN_UPLOAD_INTERVAL = 30
def __init__(self, api_token):
"""
All requests to the API must contain the parameter api_token.
You'll find it in My Account > API Access in your POEditor account.
"""
self.api_token = api_token
def _construct_url(self, path):
return '{}{}'.format(self.HOST, path)
def _make_request(self, url, payload, headers=None):
kwargs = {}
if payload.get('file'):
kwargs['files'] = {'file': payload.pop('file')}
response = requests.post(url, data=payload, headers=headers, **kwargs)
if response.status_code != 200:
raise POEditorException(
status='fail',
error_code=response.status_code,
message=response.reason
)
data = response.json()
if 'response' not in data:
raise POEditorException(
status='fail',
error_code=-1,
message='"response" key is not present'
)
if 'status' in data['response'] and \
data['response']['status'] != self.SUCCESS_CODE:
raise POEditorException(
error_code=data['response'].get('code'),
status=data['response']['status'],
message=data['response'].get('message')
)
return data
def _run(self, url_path, headers=None, **kwargs):
"""
Requests API
"""
url = self._construct_url(url_path)
payload = kwargs
payload.update({'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _apiv1_run(self, action, headers=None, **kwargs):
"""
Kept for backwards compatibility of this client
See "self.clear_reference_language"
"""
warnings.warn(
"POEditor API v1 is deprecated. Use POEditorAPI._run method to call API v2",
DeprecationWarning, stacklevel=2
)
url = "https://poeditor.com/api/"
payload = kwargs
payload.update({'action': action, 'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _project_formatter(self, data):
"""
Project object
"""
open_ = False if not data['open'] or data['open'] == '0' else True
public = False if not data['public'] or data['public'] == '0' else True
output = {
'created': parse_datetime(data['created']),
'id': int(data['id']),
'name': data['name'],
'open': open_,
'public': public,
}
# the detail view returns more info than the list view
# see https://poeditor.com/docs/api#projects_view
for key in ['description', 'reference_language', 'terms']:
if key in data:
output[key] = data[key]
return output
def list_projects(self):
"""
Returns the list of projects owned by user.
"""
data = self._run(
url_path="projects/list"
)
projects = data['result'].get('projects', [])
return [self._project_formatter(item) for item in projects]
def create_project(self, name, description=None):
"""
creates a new project. Returns the id of the project (if successful)
"""
description = description or ''
data = self._run(
url_path="projects/add",
name=name,
description=description
)
return data['result']['project']['id']
def update_project(self, project_id, name=None, description=None,
reference_language=None):
"""
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
"""
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id']
def delete_project(self, project_id):
"""
Deletes the project from the account.
You must be the owner of the project.
"""
self._run(
url_path="projects/delete",
id=project_id,
)
return True
def view_project_details(self, project_id):
"""
Returns project's details.
"""
data = self._run(
url_path="projects/view",
id=project_id
)
return self._project_formatter(data['result']['project'])
def list_project_languages(self, project_id):
"""
Returns project languages, percentage of translation done for each and the
datetime (UTC - ISO 8601) when the last change was made.
"""
data = self._run(
url_path="languages/list",
id=project_id
)
return data['result'].get('languages', [])
def add_language_to_project(self, project_id, language_code):
"""
Adds a new language to project
"""
self._run(
url_path="languages/add",
id=project_id,
language=language_code
)
return True
def delete_language_from_project(self, project_id, language_code):
"""
Deletes existing language from project
"""
self._run(
url_path="languages/delete",
id=project_id,
language=language_code
)
return True
def set_reference_language(self, project_id, language_code):
"""
Sets a reference language to project
"""
return self.update_project(project_id, reference_language=language_code)
def clear_reference_language(self, project_id):
"""
Clears reference language from project
Could not find how to reproduce the "clear_reference_language" v1 action with the v2 API.
Calling v2 projects/update with reference_language='' or reference_language=None did not work.
https://poeditor.com/docs/api#projects_update
"""
self._apiv1_run(
action="clear_reference_language",
id=project_id
)
return True
def view_project_terms(self, project_id, language_code=None):
"""
Returns project's terms and translations if the argument language is provided.
"""
data = self._run(
url_path="terms/list",
id=project_id,
language=language_code
)
return data['result'].get('terms', [])
def add_terms(self, project_id, data):
"""
Adds terms to project.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="terms/add",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_terms(self, project_id, data, fuzzy_trigger=None):
"""
Updates project terms. Lets you change the text, context, reference, plural and tags.
>>> data = [
{
"term": "Add new list",
"context": "",
"new_term": "Save list",
"new_context": "",
"reference": "\/projects",
"plural": "",
"comment": "",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Display list",
"context": "",
"new_term": "Show list",
"new_context": ""
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="terms/update",
id=project_id,
data=json.dumps(data),
**kwargs
)
return data['result']['terms']
def delete_terms(self, project_id, data):
"""
Deletes terms from project.
>>> data = [
{
"term": "one project found",
"context": ""
},
{
"term": "Show all projects",
"context": "form"
}
]
"""
data = self._run(
url_path="terms/delete",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def add_comment(self, project_id, data):
"""
Adds comments to existing terms.
>>> data = [
{
"term": "Add new list",
"context": "",
"comment": "This is a button"
},
{
"term": "one project found",
"context": "",
"comment": "Make sure you translate the plural forms"
},
{
"term": "Show all projects",
"context": "",
"comment": "This is a button"
}
]
"""
data = self._run(
url_path="terms/add_comment",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def sync_terms(self, project_id, data):
"""
Syncs your project with the array you send (terms that are not found
in the dict object will be deleted from project and the new ones
added).
Please use with caution. If wrong data is sent, existing terms and their
translations might be irreversibly lost.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="projects/sync",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_project_language(self, project_id, language_code, data, fuzzy_trigger=None):
"""
Inserts / overwrites translations.
>>> data = [
{
"term": "Projects",
"context": "project list",
"translation": {
"content": "Des projets",
"fuzzy": 0
}
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="languages/update",
id=project_id,
language=language_code,
data=json.dumps(data),
**kwargs
)
return data['result']['translations']
def export(self, project_id, language_code, file_type='po', filters=None,
tags=None, local_file=None):
"""
Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"]
"""
if file_type not in self.FILE_TYPES:
raise POEditorArgsException(
'content_type: file format {}'.format(self.FILE_TYPES))
if filters and isinstance(filters, str) and filters not in self.FILTER_BY:
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
elif filters and set(filters).difference(set(self.FILTER_BY)):
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
data = self._run(
url_path="projects/export",
id=project_id,
language=language_code,
type=file_type,
filters=filters,
tags=tags
)
# The link of the file (expires after 10 minutes).
file_url = data['result']['url']
# Download file content:
res = requests.get(file_url, stream=True)
if not local_file:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.{}'.format(file_type))
tmp_file.close()
local_file = tmp_file.name
with open(local_file, 'w+b') as po_file:
for data in res.iter_content(chunk_size=1024):
po_file.write(data)
return file_url, local_file
def _upload(self, project_id, updating, file_path, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
"""
options = [
self.UPDATING_TERMS,
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if updating not in options:
raise POEditorArgsException(
'Updating arg must be in {}'.format(options)
)
options = [
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if language_code is None and updating in options:
raise POEditorArgsException(
'Language code is required only if updating is '
'terms_translations or translations)'
)
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(
url_path="projects/upload",
id=project_id,
language=language_code,
file=local_file,
updating=updating,
tags=tags,
sync_terms=sync_terms,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
return data['result']
def update_terms(self, project_id, file_path=None, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_terms_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_terms_translations",
DeprecationWarning, stacklevel=2
)
return self.update_terms_translations(
project_id,
file_path,
language_code,
overwrite,
sync_terms,
tags,
fuzzy_trigger
)
def update_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_translations",
DeprecationWarning, stacklevel=2
)
return self.update_translations(
project_id,
file_path,
language_code,
overwrite,
fuzzy_trigger
)
def update_translations(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
"""
Updates translations
overwrite: set it to True if you want to overwrite definitions
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
def available_languages(self):
"""
Returns a comprehensive list of all languages supported by POEditor.
You can find it here (https://poeditor.com/docs/languages), too.
"""
data = self._run(
url_path="languages/available"
)
return data['result'].get('languages', [])
def list_contributors(self, project_id=None, language_code=None):
"""
Returns the list of contributors
"""
data = self._run(
url_path="contributors/list",
id=project_id,
language=language_code
)
return data['result'].get('contributors', [])
def add_contributor(self, project_id, name, email, language_code):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
language=language_code
)
return True
def add_administrator(self, project_id, name, email):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
admin=True
)
return True
def remove_contributor(self, project_id, email, language):
"""
Removes a contributor
"""
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True
|
sporteasy/python-poeditor | poeditor/client.py | POEditorAPI.update_translations | python | def update_translations(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
return self._upload(
project_id=project_id,
updating=self.UPDATING_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
) | Updates translations
overwrite: set it to True if you want to overwrite definitions
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values | train | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L695-L711 | [
"def _upload(self, project_id, updating, file_path, language_code=None,\n overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):\n \"\"\"\n Internal: updates terms / translations\n\n File uploads are limited to one every 30 seconds\n \"\"\"\n options = [\n self.UPDATING_TERMS,\n self.UPDATING_TERMS_TRANSLATIONS,\n self.UPDATING_TRANSLATIONS\n ]\n if updating not in options:\n raise POEditorArgsException(\n 'Updating arg must be in {}'.format(options)\n )\n\n options = [\n self.UPDATING_TERMS_TRANSLATIONS,\n self.UPDATING_TRANSLATIONS\n ]\n if language_code is None and updating in options:\n raise POEditorArgsException(\n 'Language code is required only if updating is '\n 'terms_translations or translations)'\n )\n\n if updating == self.UPDATING_TRANSLATIONS:\n tags = None\n sync_terms = None\n\n # Special content type:\n tags = tags or ''\n language_code = language_code or ''\n sync_terms = '1' if sync_terms else '0'\n overwrite = '1' if overwrite else '0'\n fuzzy_trigger = '1' if fuzzy_trigger else '0'\n project_id = str(project_id)\n\n with open(file_path, 'r+b') as local_file:\n data = self._run(\n url_path=\"projects/upload\",\n id=project_id,\n language=language_code,\n file=local_file,\n updating=updating,\n tags=tags,\n sync_terms=sync_terms,\n overwrite=overwrite,\n fuzzy_trigger=fuzzy_trigger\n )\n return data['result']\n"
] | class POEditorAPI(object):
"""
Connect your software to POEditor with its simple API
Please refers to https://poeditor.com/docs/api if you have questions
"""
HOST = "https://api.poeditor.com/v2/"
SUCCESS_CODE = "success"
FILE_TYPES = ['po', 'pot', 'mo', 'xls', 'csv', 'resx', 'resw', 'android_strings',
'apple_strings', 'xliff', 'properties', 'key_value_json', 'json',
'xmb', 'xtb']
FILTER_BY = ['translated', 'untranslated', 'fuzzy', 'not_fuzzy',
'automatic', 'not_automatic', 'proofread', 'not_proofread']
UPDATING_TERMS = 'terms'
UPDATING_TERMS_TRANSLATIONS = 'terms_translations'
UPDATING_TRANSLATIONS = 'translations'
# in seconds. Upload: No more than one request every 30 seconds
MIN_UPLOAD_INTERVAL = 30
def __init__(self, api_token):
"""
All requests to the API must contain the parameter api_token.
You'll find it in My Account > API Access in your POEditor account.
"""
self.api_token = api_token
def _construct_url(self, path):
return '{}{}'.format(self.HOST, path)
def _make_request(self, url, payload, headers=None):
kwargs = {}
if payload.get('file'):
kwargs['files'] = {'file': payload.pop('file')}
response = requests.post(url, data=payload, headers=headers, **kwargs)
if response.status_code != 200:
raise POEditorException(
status='fail',
error_code=response.status_code,
message=response.reason
)
data = response.json()
if 'response' not in data:
raise POEditorException(
status='fail',
error_code=-1,
message='"response" key is not present'
)
if 'status' in data['response'] and \
data['response']['status'] != self.SUCCESS_CODE:
raise POEditorException(
error_code=data['response'].get('code'),
status=data['response']['status'],
message=data['response'].get('message')
)
return data
def _run(self, url_path, headers=None, **kwargs):
"""
Requests API
"""
url = self._construct_url(url_path)
payload = kwargs
payload.update({'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _apiv1_run(self, action, headers=None, **kwargs):
"""
Kept for backwards compatibility of this client
See "self.clear_reference_language"
"""
warnings.warn(
"POEditor API v1 is deprecated. Use POEditorAPI._run method to call API v2",
DeprecationWarning, stacklevel=2
)
url = "https://poeditor.com/api/"
payload = kwargs
payload.update({'action': action, 'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _project_formatter(self, data):
"""
Project object
"""
open_ = False if not data['open'] or data['open'] == '0' else True
public = False if not data['public'] or data['public'] == '0' else True
output = {
'created': parse_datetime(data['created']),
'id': int(data['id']),
'name': data['name'],
'open': open_,
'public': public,
}
# the detail view returns more info than the list view
# see https://poeditor.com/docs/api#projects_view
for key in ['description', 'reference_language', 'terms']:
if key in data:
output[key] = data[key]
return output
def list_projects(self):
"""
Returns the list of projects owned by user.
"""
data = self._run(
url_path="projects/list"
)
projects = data['result'].get('projects', [])
return [self._project_formatter(item) for item in projects]
def create_project(self, name, description=None):
"""
creates a new project. Returns the id of the project (if successful)
"""
description = description or ''
data = self._run(
url_path="projects/add",
name=name,
description=description
)
return data['result']['project']['id']
def update_project(self, project_id, name=None, description=None,
reference_language=None):
"""
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
"""
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id']
def delete_project(self, project_id):
"""
Deletes the project from the account.
You must be the owner of the project.
"""
self._run(
url_path="projects/delete",
id=project_id,
)
return True
def view_project_details(self, project_id):
"""
Returns project's details.
"""
data = self._run(
url_path="projects/view",
id=project_id
)
return self._project_formatter(data['result']['project'])
def list_project_languages(self, project_id):
"""
Returns project languages, percentage of translation done for each and the
datetime (UTC - ISO 8601) when the last change was made.
"""
data = self._run(
url_path="languages/list",
id=project_id
)
return data['result'].get('languages', [])
def add_language_to_project(self, project_id, language_code):
"""
Adds a new language to project
"""
self._run(
url_path="languages/add",
id=project_id,
language=language_code
)
return True
def delete_language_from_project(self, project_id, language_code):
"""
Deletes existing language from project
"""
self._run(
url_path="languages/delete",
id=project_id,
language=language_code
)
return True
def set_reference_language(self, project_id, language_code):
"""
Sets a reference language to project
"""
return self.update_project(project_id, reference_language=language_code)
def clear_reference_language(self, project_id):
"""
Clears reference language from project
Could not find how to reproduce the "clear_reference_language" v1 action with the v2 API.
Calling v2 projects/update with reference_language='' or reference_language=None did not work.
https://poeditor.com/docs/api#projects_update
"""
self._apiv1_run(
action="clear_reference_language",
id=project_id
)
return True
def view_project_terms(self, project_id, language_code=None):
"""
Returns project's terms and translations if the argument language is provided.
"""
data = self._run(
url_path="terms/list",
id=project_id,
language=language_code
)
return data['result'].get('terms', [])
def add_terms(self, project_id, data):
"""
Adds terms to project.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="terms/add",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_terms(self, project_id, data, fuzzy_trigger=None):
"""
Updates project terms. Lets you change the text, context, reference, plural and tags.
>>> data = [
{
"term": "Add new list",
"context": "",
"new_term": "Save list",
"new_context": "",
"reference": "\/projects",
"plural": "",
"comment": "",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Display list",
"context": "",
"new_term": "Show list",
"new_context": ""
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="terms/update",
id=project_id,
data=json.dumps(data),
**kwargs
)
return data['result']['terms']
def delete_terms(self, project_id, data):
"""
Deletes terms from project.
>>> data = [
{
"term": "one project found",
"context": ""
},
{
"term": "Show all projects",
"context": "form"
}
]
"""
data = self._run(
url_path="terms/delete",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def add_comment(self, project_id, data):
"""
Adds comments to existing terms.
>>> data = [
{
"term": "Add new list",
"context": "",
"comment": "This is a button"
},
{
"term": "one project found",
"context": "",
"comment": "Make sure you translate the plural forms"
},
{
"term": "Show all projects",
"context": "",
"comment": "This is a button"
}
]
"""
data = self._run(
url_path="terms/add_comment",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def sync_terms(self, project_id, data):
"""
Syncs your project with the array you send (terms that are not found
in the dict object will be deleted from project and the new ones
added).
Please use with caution. If wrong data is sent, existing terms and their
translations might be irreversibly lost.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="projects/sync",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_project_language(self, project_id, language_code, data, fuzzy_trigger=None):
"""
Inserts / overwrites translations.
>>> data = [
{
"term": "Projects",
"context": "project list",
"translation": {
"content": "Des projets",
"fuzzy": 0
}
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="languages/update",
id=project_id,
language=language_code,
data=json.dumps(data),
**kwargs
)
return data['result']['translations']
def export(self, project_id, language_code, file_type='po', filters=None,
tags=None, local_file=None):
"""
Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"]
"""
if file_type not in self.FILE_TYPES:
raise POEditorArgsException(
'content_type: file format {}'.format(self.FILE_TYPES))
if filters and isinstance(filters, str) and filters not in self.FILTER_BY:
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
elif filters and set(filters).difference(set(self.FILTER_BY)):
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
data = self._run(
url_path="projects/export",
id=project_id,
language=language_code,
type=file_type,
filters=filters,
tags=tags
)
# The link of the file (expires after 10 minutes).
file_url = data['result']['url']
# Download file content:
res = requests.get(file_url, stream=True)
if not local_file:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.{}'.format(file_type))
tmp_file.close()
local_file = tmp_file.name
with open(local_file, 'w+b') as po_file:
for data in res.iter_content(chunk_size=1024):
po_file.write(data)
return file_url, local_file
def _upload(self, project_id, updating, file_path, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
"""
options = [
self.UPDATING_TERMS,
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if updating not in options:
raise POEditorArgsException(
'Updating arg must be in {}'.format(options)
)
options = [
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if language_code is None and updating in options:
raise POEditorArgsException(
'Language code is required only if updating is '
'terms_translations or translations)'
)
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(
url_path="projects/upload",
id=project_id,
language=language_code,
file=local_file,
updating=updating,
tags=tags,
sync_terms=sync_terms,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
return data['result']
def update_terms(self, project_id, file_path=None, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_terms_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_terms_translations",
DeprecationWarning, stacklevel=2
)
return self.update_terms_translations(
project_id,
file_path,
language_code,
overwrite,
sync_terms,
tags,
fuzzy_trigger
)
def update_terms_translations(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_translations",
DeprecationWarning, stacklevel=2
)
return self.update_translations(
project_id,
file_path,
language_code,
overwrite,
fuzzy_trigger
)
def available_languages(self):
"""
Returns a comprehensive list of all languages supported by POEditor.
You can find it here (https://poeditor.com/docs/languages), too.
"""
data = self._run(
url_path="languages/available"
)
return data['result'].get('languages', [])
def list_contributors(self, project_id=None, language_code=None):
"""
Returns the list of contributors
"""
data = self._run(
url_path="contributors/list",
id=project_id,
language=language_code
)
return data['result'].get('contributors', [])
def add_contributor(self, project_id, name, email, language_code):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
language=language_code
)
return True
def add_administrator(self, project_id, name, email):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
admin=True
)
return True
def remove_contributor(self, project_id, email, language):
"""
Removes a contributor
"""
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True
|
sporteasy/python-poeditor | poeditor/client.py | POEditorAPI.list_contributors | python | def list_contributors(self, project_id=None, language_code=None):
data = self._run(
url_path="contributors/list",
id=project_id,
language=language_code
)
return data['result'].get('contributors', []) | Returns the list of contributors | train | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L723-L732 | [
"def _run(self, url_path, headers=None, **kwargs):\n \"\"\"\n Requests API\n \"\"\"\n url = self._construct_url(url_path)\n\n payload = kwargs\n payload.update({'api_token': self.api_token})\n\n return self._make_request(url, payload, headers)\n"
] | class POEditorAPI(object):
"""
Connect your software to POEditor with its simple API
Please refers to https://poeditor.com/docs/api if you have questions
"""
HOST = "https://api.poeditor.com/v2/"
SUCCESS_CODE = "success"
FILE_TYPES = ['po', 'pot', 'mo', 'xls', 'csv', 'resx', 'resw', 'android_strings',
'apple_strings', 'xliff', 'properties', 'key_value_json', 'json',
'xmb', 'xtb']
FILTER_BY = ['translated', 'untranslated', 'fuzzy', 'not_fuzzy',
'automatic', 'not_automatic', 'proofread', 'not_proofread']
UPDATING_TERMS = 'terms'
UPDATING_TERMS_TRANSLATIONS = 'terms_translations'
UPDATING_TRANSLATIONS = 'translations'
# in seconds. Upload: No more than one request every 30 seconds
MIN_UPLOAD_INTERVAL = 30
def __init__(self, api_token):
"""
All requests to the API must contain the parameter api_token.
You'll find it in My Account > API Access in your POEditor account.
"""
self.api_token = api_token
def _construct_url(self, path):
return '{}{}'.format(self.HOST, path)
def _make_request(self, url, payload, headers=None):
kwargs = {}
if payload.get('file'):
kwargs['files'] = {'file': payload.pop('file')}
response = requests.post(url, data=payload, headers=headers, **kwargs)
if response.status_code != 200:
raise POEditorException(
status='fail',
error_code=response.status_code,
message=response.reason
)
data = response.json()
if 'response' not in data:
raise POEditorException(
status='fail',
error_code=-1,
message='"response" key is not present'
)
if 'status' in data['response'] and \
data['response']['status'] != self.SUCCESS_CODE:
raise POEditorException(
error_code=data['response'].get('code'),
status=data['response']['status'],
message=data['response'].get('message')
)
return data
def _run(self, url_path, headers=None, **kwargs):
"""
Requests API
"""
url = self._construct_url(url_path)
payload = kwargs
payload.update({'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _apiv1_run(self, action, headers=None, **kwargs):
"""
Kept for backwards compatibility of this client
See "self.clear_reference_language"
"""
warnings.warn(
"POEditor API v1 is deprecated. Use POEditorAPI._run method to call API v2",
DeprecationWarning, stacklevel=2
)
url = "https://poeditor.com/api/"
payload = kwargs
payload.update({'action': action, 'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _project_formatter(self, data):
"""
Project object
"""
open_ = False if not data['open'] or data['open'] == '0' else True
public = False if not data['public'] or data['public'] == '0' else True
output = {
'created': parse_datetime(data['created']),
'id': int(data['id']),
'name': data['name'],
'open': open_,
'public': public,
}
# the detail view returns more info than the list view
# see https://poeditor.com/docs/api#projects_view
for key in ['description', 'reference_language', 'terms']:
if key in data:
output[key] = data[key]
return output
def list_projects(self):
"""
Returns the list of projects owned by user.
"""
data = self._run(
url_path="projects/list"
)
projects = data['result'].get('projects', [])
return [self._project_formatter(item) for item in projects]
def create_project(self, name, description=None):
"""
creates a new project. Returns the id of the project (if successful)
"""
description = description or ''
data = self._run(
url_path="projects/add",
name=name,
description=description
)
return data['result']['project']['id']
def update_project(self, project_id, name=None, description=None,
reference_language=None):
"""
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
"""
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id']
def delete_project(self, project_id):
"""
Deletes the project from the account.
You must be the owner of the project.
"""
self._run(
url_path="projects/delete",
id=project_id,
)
return True
def view_project_details(self, project_id):
"""
Returns project's details.
"""
data = self._run(
url_path="projects/view",
id=project_id
)
return self._project_formatter(data['result']['project'])
def list_project_languages(self, project_id):
"""
Returns project languages, percentage of translation done for each and the
datetime (UTC - ISO 8601) when the last change was made.
"""
data = self._run(
url_path="languages/list",
id=project_id
)
return data['result'].get('languages', [])
def add_language_to_project(self, project_id, language_code):
"""
Adds a new language to project
"""
self._run(
url_path="languages/add",
id=project_id,
language=language_code
)
return True
def delete_language_from_project(self, project_id, language_code):
"""
Deletes existing language from project
"""
self._run(
url_path="languages/delete",
id=project_id,
language=language_code
)
return True
def set_reference_language(self, project_id, language_code):
"""
Sets a reference language to project
"""
return self.update_project(project_id, reference_language=language_code)
def clear_reference_language(self, project_id):
"""
Clears reference language from project
Could not find how to reproduce the "clear_reference_language" v1 action with the v2 API.
Calling v2 projects/update with reference_language='' or reference_language=None did not work.
https://poeditor.com/docs/api#projects_update
"""
self._apiv1_run(
action="clear_reference_language",
id=project_id
)
return True
def view_project_terms(self, project_id, language_code=None):
"""
Returns project's terms and translations if the argument language is provided.
"""
data = self._run(
url_path="terms/list",
id=project_id,
language=language_code
)
return data['result'].get('terms', [])
def add_terms(self, project_id, data):
"""
Adds terms to project.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="terms/add",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_terms(self, project_id, data, fuzzy_trigger=None):
"""
Updates project terms. Lets you change the text, context, reference, plural and tags.
>>> data = [
{
"term": "Add new list",
"context": "",
"new_term": "Save list",
"new_context": "",
"reference": "\/projects",
"plural": "",
"comment": "",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Display list",
"context": "",
"new_term": "Show list",
"new_context": ""
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="terms/update",
id=project_id,
data=json.dumps(data),
**kwargs
)
return data['result']['terms']
def delete_terms(self, project_id, data):
"""
Deletes terms from project.
>>> data = [
{
"term": "one project found",
"context": ""
},
{
"term": "Show all projects",
"context": "form"
}
]
"""
data = self._run(
url_path="terms/delete",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def add_comment(self, project_id, data):
"""
Adds comments to existing terms.
>>> data = [
{
"term": "Add new list",
"context": "",
"comment": "This is a button"
},
{
"term": "one project found",
"context": "",
"comment": "Make sure you translate the plural forms"
},
{
"term": "Show all projects",
"context": "",
"comment": "This is a button"
}
]
"""
data = self._run(
url_path="terms/add_comment",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def sync_terms(self, project_id, data):
"""
Syncs your project with the array you send (terms that are not found
in the dict object will be deleted from project and the new ones
added).
Please use with caution. If wrong data is sent, existing terms and their
translations might be irreversibly lost.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="projects/sync",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_project_language(self, project_id, language_code, data, fuzzy_trigger=None):
"""
Inserts / overwrites translations.
>>> data = [
{
"term": "Projects",
"context": "project list",
"translation": {
"content": "Des projets",
"fuzzy": 0
}
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="languages/update",
id=project_id,
language=language_code,
data=json.dumps(data),
**kwargs
)
return data['result']['translations']
def export(self, project_id, language_code, file_type='po', filters=None,
tags=None, local_file=None):
"""
Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"]
"""
if file_type not in self.FILE_TYPES:
raise POEditorArgsException(
'content_type: file format {}'.format(self.FILE_TYPES))
if filters and isinstance(filters, str) and filters not in self.FILTER_BY:
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
elif filters and set(filters).difference(set(self.FILTER_BY)):
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
data = self._run(
url_path="projects/export",
id=project_id,
language=language_code,
type=file_type,
filters=filters,
tags=tags
)
# The link of the file (expires after 10 minutes).
file_url = data['result']['url']
# Download file content:
res = requests.get(file_url, stream=True)
if not local_file:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.{}'.format(file_type))
tmp_file.close()
local_file = tmp_file.name
with open(local_file, 'w+b') as po_file:
for data in res.iter_content(chunk_size=1024):
po_file.write(data)
return file_url, local_file
def _upload(self, project_id, updating, file_path, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
"""
options = [
self.UPDATING_TERMS,
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if updating not in options:
raise POEditorArgsException(
'Updating arg must be in {}'.format(options)
)
options = [
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if language_code is None and updating in options:
raise POEditorArgsException(
'Language code is required only if updating is '
'terms_translations or translations)'
)
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(
url_path="projects/upload",
id=project_id,
language=language_code,
file=local_file,
updating=updating,
tags=tags,
sync_terms=sync_terms,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
return data['result']
def update_terms(self, project_id, file_path=None, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_terms_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_terms_translations",
DeprecationWarning, stacklevel=2
)
return self.update_terms_translations(
project_id,
file_path,
language_code,
overwrite,
sync_terms,
tags,
fuzzy_trigger
)
def update_terms_translations(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_translations",
DeprecationWarning, stacklevel=2
)
return self.update_translations(
project_id,
file_path,
language_code,
overwrite,
fuzzy_trigger
)
def update_translations(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
"""
Updates translations
overwrite: set it to True if you want to overwrite definitions
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
def available_languages(self):
"""
Returns a comprehensive list of all languages supported by POEditor.
You can find it here (https://poeditor.com/docs/languages), too.
"""
data = self._run(
url_path="languages/available"
)
return data['result'].get('languages', [])
def add_contributor(self, project_id, name, email, language_code):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
language=language_code
)
return True
def add_administrator(self, project_id, name, email):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
admin=True
)
return True
def remove_contributor(self, project_id, email, language):
"""
Removes a contributor
"""
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True
|
sporteasy/python-poeditor | poeditor/client.py | POEditorAPI.add_contributor | python | def add_contributor(self, project_id, name, email, language_code):
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
language=language_code
)
return True | Adds a contributor to a project language | train | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L734-L745 | [
"def _run(self, url_path, headers=None, **kwargs):\n \"\"\"\n Requests API\n \"\"\"\n url = self._construct_url(url_path)\n\n payload = kwargs\n payload.update({'api_token': self.api_token})\n\n return self._make_request(url, payload, headers)\n"
] | class POEditorAPI(object):
"""
Connect your software to POEditor with its simple API
Please refers to https://poeditor.com/docs/api if you have questions
"""
HOST = "https://api.poeditor.com/v2/"
SUCCESS_CODE = "success"
FILE_TYPES = ['po', 'pot', 'mo', 'xls', 'csv', 'resx', 'resw', 'android_strings',
'apple_strings', 'xliff', 'properties', 'key_value_json', 'json',
'xmb', 'xtb']
FILTER_BY = ['translated', 'untranslated', 'fuzzy', 'not_fuzzy',
'automatic', 'not_automatic', 'proofread', 'not_proofread']
UPDATING_TERMS = 'terms'
UPDATING_TERMS_TRANSLATIONS = 'terms_translations'
UPDATING_TRANSLATIONS = 'translations'
# in seconds. Upload: No more than one request every 30 seconds
MIN_UPLOAD_INTERVAL = 30
def __init__(self, api_token):
"""
All requests to the API must contain the parameter api_token.
You'll find it in My Account > API Access in your POEditor account.
"""
self.api_token = api_token
def _construct_url(self, path):
return '{}{}'.format(self.HOST, path)
def _make_request(self, url, payload, headers=None):
kwargs = {}
if payload.get('file'):
kwargs['files'] = {'file': payload.pop('file')}
response = requests.post(url, data=payload, headers=headers, **kwargs)
if response.status_code != 200:
raise POEditorException(
status='fail',
error_code=response.status_code,
message=response.reason
)
data = response.json()
if 'response' not in data:
raise POEditorException(
status='fail',
error_code=-1,
message='"response" key is not present'
)
if 'status' in data['response'] and \
data['response']['status'] != self.SUCCESS_CODE:
raise POEditorException(
error_code=data['response'].get('code'),
status=data['response']['status'],
message=data['response'].get('message')
)
return data
def _run(self, url_path, headers=None, **kwargs):
"""
Requests API
"""
url = self._construct_url(url_path)
payload = kwargs
payload.update({'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _apiv1_run(self, action, headers=None, **kwargs):
"""
Kept for backwards compatibility of this client
See "self.clear_reference_language"
"""
warnings.warn(
"POEditor API v1 is deprecated. Use POEditorAPI._run method to call API v2",
DeprecationWarning, stacklevel=2
)
url = "https://poeditor.com/api/"
payload = kwargs
payload.update({'action': action, 'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _project_formatter(self, data):
"""
Project object
"""
open_ = False if not data['open'] or data['open'] == '0' else True
public = False if not data['public'] or data['public'] == '0' else True
output = {
'created': parse_datetime(data['created']),
'id': int(data['id']),
'name': data['name'],
'open': open_,
'public': public,
}
# the detail view returns more info than the list view
# see https://poeditor.com/docs/api#projects_view
for key in ['description', 'reference_language', 'terms']:
if key in data:
output[key] = data[key]
return output
def list_projects(self):
"""
Returns the list of projects owned by user.
"""
data = self._run(
url_path="projects/list"
)
projects = data['result'].get('projects', [])
return [self._project_formatter(item) for item in projects]
def create_project(self, name, description=None):
"""
creates a new project. Returns the id of the project (if successful)
"""
description = description or ''
data = self._run(
url_path="projects/add",
name=name,
description=description
)
return data['result']['project']['id']
def update_project(self, project_id, name=None, description=None,
reference_language=None):
"""
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
"""
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id']
def delete_project(self, project_id):
"""
Deletes the project from the account.
You must be the owner of the project.
"""
self._run(
url_path="projects/delete",
id=project_id,
)
return True
def view_project_details(self, project_id):
"""
Returns project's details.
"""
data = self._run(
url_path="projects/view",
id=project_id
)
return self._project_formatter(data['result']['project'])
def list_project_languages(self, project_id):
"""
Returns project languages, percentage of translation done for each and the
datetime (UTC - ISO 8601) when the last change was made.
"""
data = self._run(
url_path="languages/list",
id=project_id
)
return data['result'].get('languages', [])
def add_language_to_project(self, project_id, language_code):
"""
Adds a new language to project
"""
self._run(
url_path="languages/add",
id=project_id,
language=language_code
)
return True
def delete_language_from_project(self, project_id, language_code):
"""
Deletes existing language from project
"""
self._run(
url_path="languages/delete",
id=project_id,
language=language_code
)
return True
def set_reference_language(self, project_id, language_code):
"""
Sets a reference language to project
"""
return self.update_project(project_id, reference_language=language_code)
def clear_reference_language(self, project_id):
"""
Clears reference language from project
Could not find how to reproduce the "clear_reference_language" v1 action with the v2 API.
Calling v2 projects/update with reference_language='' or reference_language=None did not work.
https://poeditor.com/docs/api#projects_update
"""
self._apiv1_run(
action="clear_reference_language",
id=project_id
)
return True
def view_project_terms(self, project_id, language_code=None):
"""
Returns project's terms and translations if the argument language is provided.
"""
data = self._run(
url_path="terms/list",
id=project_id,
language=language_code
)
return data['result'].get('terms', [])
def add_terms(self, project_id, data):
"""
Adds terms to project.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="terms/add",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_terms(self, project_id, data, fuzzy_trigger=None):
"""
Updates project terms. Lets you change the text, context, reference, plural and tags.
>>> data = [
{
"term": "Add new list",
"context": "",
"new_term": "Save list",
"new_context": "",
"reference": "\/projects",
"plural": "",
"comment": "",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Display list",
"context": "",
"new_term": "Show list",
"new_context": ""
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="terms/update",
id=project_id,
data=json.dumps(data),
**kwargs
)
return data['result']['terms']
def delete_terms(self, project_id, data):
"""
Deletes terms from project.
>>> data = [
{
"term": "one project found",
"context": ""
},
{
"term": "Show all projects",
"context": "form"
}
]
"""
data = self._run(
url_path="terms/delete",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def add_comment(self, project_id, data):
"""
Adds comments to existing terms.
>>> data = [
{
"term": "Add new list",
"context": "",
"comment": "This is a button"
},
{
"term": "one project found",
"context": "",
"comment": "Make sure you translate the plural forms"
},
{
"term": "Show all projects",
"context": "",
"comment": "This is a button"
}
]
"""
data = self._run(
url_path="terms/add_comment",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def sync_terms(self, project_id, data):
"""
Syncs your project with the array you send (terms that are not found
in the dict object will be deleted from project and the new ones
added).
Please use with caution. If wrong data is sent, existing terms and their
translations might be irreversibly lost.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="projects/sync",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_project_language(self, project_id, language_code, data, fuzzy_trigger=None):
"""
Inserts / overwrites translations.
>>> data = [
{
"term": "Projects",
"context": "project list",
"translation": {
"content": "Des projets",
"fuzzy": 0
}
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="languages/update",
id=project_id,
language=language_code,
data=json.dumps(data),
**kwargs
)
return data['result']['translations']
def export(self, project_id, language_code, file_type='po', filters=None,
tags=None, local_file=None):
"""
Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"]
"""
if file_type not in self.FILE_TYPES:
raise POEditorArgsException(
'content_type: file format {}'.format(self.FILE_TYPES))
if filters and isinstance(filters, str) and filters not in self.FILTER_BY:
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
elif filters and set(filters).difference(set(self.FILTER_BY)):
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
data = self._run(
url_path="projects/export",
id=project_id,
language=language_code,
type=file_type,
filters=filters,
tags=tags
)
# The link of the file (expires after 10 minutes).
file_url = data['result']['url']
# Download file content:
res = requests.get(file_url, stream=True)
if not local_file:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.{}'.format(file_type))
tmp_file.close()
local_file = tmp_file.name
with open(local_file, 'w+b') as po_file:
for data in res.iter_content(chunk_size=1024):
po_file.write(data)
return file_url, local_file
def _upload(self, project_id, updating, file_path, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
"""
options = [
self.UPDATING_TERMS,
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if updating not in options:
raise POEditorArgsException(
'Updating arg must be in {}'.format(options)
)
options = [
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if language_code is None and updating in options:
raise POEditorArgsException(
'Language code is required only if updating is '
'terms_translations or translations)'
)
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(
url_path="projects/upload",
id=project_id,
language=language_code,
file=local_file,
updating=updating,
tags=tags,
sync_terms=sync_terms,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
return data['result']
def update_terms(self, project_id, file_path=None, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_terms_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_terms_translations",
DeprecationWarning, stacklevel=2
)
return self.update_terms_translations(
project_id,
file_path,
language_code,
overwrite,
sync_terms,
tags,
fuzzy_trigger
)
def update_terms_translations(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_translations",
DeprecationWarning, stacklevel=2
)
return self.update_translations(
project_id,
file_path,
language_code,
overwrite,
fuzzy_trigger
)
def update_translations(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
"""
Updates translations
overwrite: set it to True if you want to overwrite definitions
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
def available_languages(self):
"""
Returns a comprehensive list of all languages supported by POEditor.
You can find it here (https://poeditor.com/docs/languages), too.
"""
data = self._run(
url_path="languages/available"
)
return data['result'].get('languages', [])
def list_contributors(self, project_id=None, language_code=None):
"""
Returns the list of contributors
"""
data = self._run(
url_path="contributors/list",
id=project_id,
language=language_code
)
return data['result'].get('contributors', [])
def add_administrator(self, project_id, name, email):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
admin=True
)
return True
def remove_contributor(self, project_id, email, language):
"""
Removes a contributor
"""
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True
|
sporteasy/python-poeditor | poeditor/client.py | POEditorAPI.add_administrator | python | def add_administrator(self, project_id, name, email):
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
admin=True
)
return True | Adds a contributor to a project language | train | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L747-L758 | [
"def _run(self, url_path, headers=None, **kwargs):\n \"\"\"\n Requests API\n \"\"\"\n url = self._construct_url(url_path)\n\n payload = kwargs\n payload.update({'api_token': self.api_token})\n\n return self._make_request(url, payload, headers)\n"
] | class POEditorAPI(object):
"""
Connect your software to POEditor with its simple API
Please refers to https://poeditor.com/docs/api if you have questions
"""
HOST = "https://api.poeditor.com/v2/"
SUCCESS_CODE = "success"
FILE_TYPES = ['po', 'pot', 'mo', 'xls', 'csv', 'resx', 'resw', 'android_strings',
'apple_strings', 'xliff', 'properties', 'key_value_json', 'json',
'xmb', 'xtb']
FILTER_BY = ['translated', 'untranslated', 'fuzzy', 'not_fuzzy',
'automatic', 'not_automatic', 'proofread', 'not_proofread']
UPDATING_TERMS = 'terms'
UPDATING_TERMS_TRANSLATIONS = 'terms_translations'
UPDATING_TRANSLATIONS = 'translations'
# in seconds. Upload: No more than one request every 30 seconds
MIN_UPLOAD_INTERVAL = 30
def __init__(self, api_token):
"""
All requests to the API must contain the parameter api_token.
You'll find it in My Account > API Access in your POEditor account.
"""
self.api_token = api_token
def _construct_url(self, path):
return '{}{}'.format(self.HOST, path)
def _make_request(self, url, payload, headers=None):
kwargs = {}
if payload.get('file'):
kwargs['files'] = {'file': payload.pop('file')}
response = requests.post(url, data=payload, headers=headers, **kwargs)
if response.status_code != 200:
raise POEditorException(
status='fail',
error_code=response.status_code,
message=response.reason
)
data = response.json()
if 'response' not in data:
raise POEditorException(
status='fail',
error_code=-1,
message='"response" key is not present'
)
if 'status' in data['response'] and \
data['response']['status'] != self.SUCCESS_CODE:
raise POEditorException(
error_code=data['response'].get('code'),
status=data['response']['status'],
message=data['response'].get('message')
)
return data
def _run(self, url_path, headers=None, **kwargs):
"""
Requests API
"""
url = self._construct_url(url_path)
payload = kwargs
payload.update({'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _apiv1_run(self, action, headers=None, **kwargs):
"""
Kept for backwards compatibility of this client
See "self.clear_reference_language"
"""
warnings.warn(
"POEditor API v1 is deprecated. Use POEditorAPI._run method to call API v2",
DeprecationWarning, stacklevel=2
)
url = "https://poeditor.com/api/"
payload = kwargs
payload.update({'action': action, 'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _project_formatter(self, data):
"""
Project object
"""
open_ = False if not data['open'] or data['open'] == '0' else True
public = False if not data['public'] or data['public'] == '0' else True
output = {
'created': parse_datetime(data['created']),
'id': int(data['id']),
'name': data['name'],
'open': open_,
'public': public,
}
# the detail view returns more info than the list view
# see https://poeditor.com/docs/api#projects_view
for key in ['description', 'reference_language', 'terms']:
if key in data:
output[key] = data[key]
return output
def list_projects(self):
"""
Returns the list of projects owned by user.
"""
data = self._run(
url_path="projects/list"
)
projects = data['result'].get('projects', [])
return [self._project_formatter(item) for item in projects]
def create_project(self, name, description=None):
"""
creates a new project. Returns the id of the project (if successful)
"""
description = description or ''
data = self._run(
url_path="projects/add",
name=name,
description=description
)
return data['result']['project']['id']
def update_project(self, project_id, name=None, description=None,
reference_language=None):
"""
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
"""
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id']
def delete_project(self, project_id):
"""
Deletes the project from the account.
You must be the owner of the project.
"""
self._run(
url_path="projects/delete",
id=project_id,
)
return True
def view_project_details(self, project_id):
"""
Returns project's details.
"""
data = self._run(
url_path="projects/view",
id=project_id
)
return self._project_formatter(data['result']['project'])
def list_project_languages(self, project_id):
"""
Returns project languages, percentage of translation done for each and the
datetime (UTC - ISO 8601) when the last change was made.
"""
data = self._run(
url_path="languages/list",
id=project_id
)
return data['result'].get('languages', [])
def add_language_to_project(self, project_id, language_code):
"""
Adds a new language to project
"""
self._run(
url_path="languages/add",
id=project_id,
language=language_code
)
return True
def delete_language_from_project(self, project_id, language_code):
"""
Deletes existing language from project
"""
self._run(
url_path="languages/delete",
id=project_id,
language=language_code
)
return True
def set_reference_language(self, project_id, language_code):
"""
Sets a reference language to project
"""
return self.update_project(project_id, reference_language=language_code)
def clear_reference_language(self, project_id):
"""
Clears reference language from project
Could not find how to reproduce the "clear_reference_language" v1 action with the v2 API.
Calling v2 projects/update with reference_language='' or reference_language=None did not work.
https://poeditor.com/docs/api#projects_update
"""
self._apiv1_run(
action="clear_reference_language",
id=project_id
)
return True
def view_project_terms(self, project_id, language_code=None):
"""
Returns project's terms and translations if the argument language is provided.
"""
data = self._run(
url_path="terms/list",
id=project_id,
language=language_code
)
return data['result'].get('terms', [])
def add_terms(self, project_id, data):
"""
Adds terms to project.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="terms/add",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_terms(self, project_id, data, fuzzy_trigger=None):
"""
Updates project terms. Lets you change the text, context, reference, plural and tags.
>>> data = [
{
"term": "Add new list",
"context": "",
"new_term": "Save list",
"new_context": "",
"reference": "\/projects",
"plural": "",
"comment": "",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Display list",
"context": "",
"new_term": "Show list",
"new_context": ""
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="terms/update",
id=project_id,
data=json.dumps(data),
**kwargs
)
return data['result']['terms']
def delete_terms(self, project_id, data):
"""
Deletes terms from project.
>>> data = [
{
"term": "one project found",
"context": ""
},
{
"term": "Show all projects",
"context": "form"
}
]
"""
data = self._run(
url_path="terms/delete",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def add_comment(self, project_id, data):
"""
Adds comments to existing terms.
>>> data = [
{
"term": "Add new list",
"context": "",
"comment": "This is a button"
},
{
"term": "one project found",
"context": "",
"comment": "Make sure you translate the plural forms"
},
{
"term": "Show all projects",
"context": "",
"comment": "This is a button"
}
]
"""
data = self._run(
url_path="terms/add_comment",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def sync_terms(self, project_id, data):
"""
Syncs your project with the array you send (terms that are not found
in the dict object will be deleted from project and the new ones
added).
Please use with caution. If wrong data is sent, existing terms and their
translations might be irreversibly lost.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="projects/sync",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_project_language(self, project_id, language_code, data, fuzzy_trigger=None):
"""
Inserts / overwrites translations.
>>> data = [
{
"term": "Projects",
"context": "project list",
"translation": {
"content": "Des projets",
"fuzzy": 0
}
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="languages/update",
id=project_id,
language=language_code,
data=json.dumps(data),
**kwargs
)
return data['result']['translations']
def export(self, project_id, language_code, file_type='po', filters=None,
tags=None, local_file=None):
"""
Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"]
"""
if file_type not in self.FILE_TYPES:
raise POEditorArgsException(
'content_type: file format {}'.format(self.FILE_TYPES))
if filters and isinstance(filters, str) and filters not in self.FILTER_BY:
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
elif filters and set(filters).difference(set(self.FILTER_BY)):
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
data = self._run(
url_path="projects/export",
id=project_id,
language=language_code,
type=file_type,
filters=filters,
tags=tags
)
# The link of the file (expires after 10 minutes).
file_url = data['result']['url']
# Download file content:
res = requests.get(file_url, stream=True)
if not local_file:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.{}'.format(file_type))
tmp_file.close()
local_file = tmp_file.name
with open(local_file, 'w+b') as po_file:
for data in res.iter_content(chunk_size=1024):
po_file.write(data)
return file_url, local_file
def _upload(self, project_id, updating, file_path, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
"""
options = [
self.UPDATING_TERMS,
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if updating not in options:
raise POEditorArgsException(
'Updating arg must be in {}'.format(options)
)
options = [
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if language_code is None and updating in options:
raise POEditorArgsException(
'Language code is required only if updating is '
'terms_translations or translations)'
)
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(
url_path="projects/upload",
id=project_id,
language=language_code,
file=local_file,
updating=updating,
tags=tags,
sync_terms=sync_terms,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
return data['result']
def update_terms(self, project_id, file_path=None, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_terms_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_terms_translations",
DeprecationWarning, stacklevel=2
)
return self.update_terms_translations(
project_id,
file_path,
language_code,
overwrite,
sync_terms,
tags,
fuzzy_trigger
)
def update_terms_translations(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_translations",
DeprecationWarning, stacklevel=2
)
return self.update_translations(
project_id,
file_path,
language_code,
overwrite,
fuzzy_trigger
)
def update_translations(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
"""
Updates translations
overwrite: set it to True if you want to overwrite definitions
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
def available_languages(self):
"""
Returns a comprehensive list of all languages supported by POEditor.
You can find it here (https://poeditor.com/docs/languages), too.
"""
data = self._run(
url_path="languages/available"
)
return data['result'].get('languages', [])
def list_contributors(self, project_id=None, language_code=None):
"""
Returns the list of contributors
"""
data = self._run(
url_path="contributors/list",
id=project_id,
language=language_code
)
return data['result'].get('contributors', [])
def add_contributor(self, project_id, name, email, language_code):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
language=language_code
)
return True
def remove_contributor(self, project_id, email, language):
"""
Removes a contributor
"""
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True
|
sporteasy/python-poeditor | poeditor/client.py | POEditorAPI.remove_contributor | python | def remove_contributor(self, project_id, email, language):
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True | Removes a contributor | train | https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L760-L770 | [
"def _run(self, url_path, headers=None, **kwargs):\n \"\"\"\n Requests API\n \"\"\"\n url = self._construct_url(url_path)\n\n payload = kwargs\n payload.update({'api_token': self.api_token})\n\n return self._make_request(url, payload, headers)\n"
] | class POEditorAPI(object):
"""
Connect your software to POEditor with its simple API
Please refers to https://poeditor.com/docs/api if you have questions
"""
HOST = "https://api.poeditor.com/v2/"
SUCCESS_CODE = "success"
FILE_TYPES = ['po', 'pot', 'mo', 'xls', 'csv', 'resx', 'resw', 'android_strings',
'apple_strings', 'xliff', 'properties', 'key_value_json', 'json',
'xmb', 'xtb']
FILTER_BY = ['translated', 'untranslated', 'fuzzy', 'not_fuzzy',
'automatic', 'not_automatic', 'proofread', 'not_proofread']
UPDATING_TERMS = 'terms'
UPDATING_TERMS_TRANSLATIONS = 'terms_translations'
UPDATING_TRANSLATIONS = 'translations'
# in seconds. Upload: No more than one request every 30 seconds
MIN_UPLOAD_INTERVAL = 30
def __init__(self, api_token):
"""
All requests to the API must contain the parameter api_token.
You'll find it in My Account > API Access in your POEditor account.
"""
self.api_token = api_token
def _construct_url(self, path):
return '{}{}'.format(self.HOST, path)
def _make_request(self, url, payload, headers=None):
kwargs = {}
if payload.get('file'):
kwargs['files'] = {'file': payload.pop('file')}
response = requests.post(url, data=payload, headers=headers, **kwargs)
if response.status_code != 200:
raise POEditorException(
status='fail',
error_code=response.status_code,
message=response.reason
)
data = response.json()
if 'response' not in data:
raise POEditorException(
status='fail',
error_code=-1,
message='"response" key is not present'
)
if 'status' in data['response'] and \
data['response']['status'] != self.SUCCESS_CODE:
raise POEditorException(
error_code=data['response'].get('code'),
status=data['response']['status'],
message=data['response'].get('message')
)
return data
def _run(self, url_path, headers=None, **kwargs):
"""
Requests API
"""
url = self._construct_url(url_path)
payload = kwargs
payload.update({'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _apiv1_run(self, action, headers=None, **kwargs):
"""
Kept for backwards compatibility of this client
See "self.clear_reference_language"
"""
warnings.warn(
"POEditor API v1 is deprecated. Use POEditorAPI._run method to call API v2",
DeprecationWarning, stacklevel=2
)
url = "https://poeditor.com/api/"
payload = kwargs
payload.update({'action': action, 'api_token': self.api_token})
return self._make_request(url, payload, headers)
def _project_formatter(self, data):
"""
Project object
"""
open_ = False if not data['open'] or data['open'] == '0' else True
public = False if not data['public'] or data['public'] == '0' else True
output = {
'created': parse_datetime(data['created']),
'id': int(data['id']),
'name': data['name'],
'open': open_,
'public': public,
}
# the detail view returns more info than the list view
# see https://poeditor.com/docs/api#projects_view
for key in ['description', 'reference_language', 'terms']:
if key in data:
output[key] = data[key]
return output
def list_projects(self):
"""
Returns the list of projects owned by user.
"""
data = self._run(
url_path="projects/list"
)
projects = data['result'].get('projects', [])
return [self._project_formatter(item) for item in projects]
def create_project(self, name, description=None):
"""
creates a new project. Returns the id of the project (if successful)
"""
description = description or ''
data = self._run(
url_path="projects/add",
name=name,
description=description
)
return data['result']['project']['id']
def update_project(self, project_id, name=None, description=None,
reference_language=None):
"""
Updates project settings (name, description, reference language)
If optional parameters are not sent, their respective fields are not updated.
"""
kwargs = {}
if name is not None:
kwargs['name'] = name
if description is not None:
kwargs['description'] = description
if reference_language is not None:
kwargs['reference_language'] = reference_language
data = self._run(
url_path="projects/update",
id=project_id,
**kwargs
)
return data['result']['project']['id']
def delete_project(self, project_id):
"""
Deletes the project from the account.
You must be the owner of the project.
"""
self._run(
url_path="projects/delete",
id=project_id,
)
return True
def view_project_details(self, project_id):
"""
Returns project's details.
"""
data = self._run(
url_path="projects/view",
id=project_id
)
return self._project_formatter(data['result']['project'])
def list_project_languages(self, project_id):
"""
Returns project languages, percentage of translation done for each and the
datetime (UTC - ISO 8601) when the last change was made.
"""
data = self._run(
url_path="languages/list",
id=project_id
)
return data['result'].get('languages', [])
def add_language_to_project(self, project_id, language_code):
"""
Adds a new language to project
"""
self._run(
url_path="languages/add",
id=project_id,
language=language_code
)
return True
def delete_language_from_project(self, project_id, language_code):
"""
Deletes existing language from project
"""
self._run(
url_path="languages/delete",
id=project_id,
language=language_code
)
return True
def set_reference_language(self, project_id, language_code):
"""
Sets a reference language to project
"""
return self.update_project(project_id, reference_language=language_code)
def clear_reference_language(self, project_id):
"""
Clears reference language from project
Could not find how to reproduce the "clear_reference_language" v1 action with the v2 API.
Calling v2 projects/update with reference_language='' or reference_language=None did not work.
https://poeditor.com/docs/api#projects_update
"""
self._apiv1_run(
action="clear_reference_language",
id=project_id
)
return True
def view_project_terms(self, project_id, language_code=None):
"""
Returns project's terms and translations if the argument language is provided.
"""
data = self._run(
url_path="terms/list",
id=project_id,
language=language_code
)
return data['result'].get('terms', [])
def add_terms(self, project_id, data):
"""
Adds terms to project.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="terms/add",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_terms(self, project_id, data, fuzzy_trigger=None):
"""
Updates project terms. Lets you change the text, context, reference, plural and tags.
>>> data = [
{
"term": "Add new list",
"context": "",
"new_term": "Save list",
"new_context": "",
"reference": "\/projects",
"plural": "",
"comment": "",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Display list",
"context": "",
"new_term": "Show list",
"new_context": ""
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="terms/update",
id=project_id,
data=json.dumps(data),
**kwargs
)
return data['result']['terms']
def delete_terms(self, project_id, data):
"""
Deletes terms from project.
>>> data = [
{
"term": "one project found",
"context": ""
},
{
"term": "Show all projects",
"context": "form"
}
]
"""
data = self._run(
url_path="terms/delete",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def add_comment(self, project_id, data):
"""
Adds comments to existing terms.
>>> data = [
{
"term": "Add new list",
"context": "",
"comment": "This is a button"
},
{
"term": "one project found",
"context": "",
"comment": "Make sure you translate the plural forms"
},
{
"term": "Show all projects",
"context": "",
"comment": "This is a button"
}
]
"""
data = self._run(
url_path="terms/add_comment",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def sync_terms(self, project_id, data):
"""
Syncs your project with the array you send (terms that are not found
in the dict object will be deleted from project and the new ones
added).
Please use with caution. If wrong data is sent, existing terms and their
translations might be irreversibly lost.
>>> data = [
{
"term": "Add new list",
"context": "",
"reference": "\/projects",
"plural": "",
"comment": ""
},
{
"term": "one project found",
"context": "",
"reference": "\/projects",
"plural": "%d projects found",
"comment": "Make sure you translate the plural forms",
"tags": [
"first_tag",
"second_tag"
]
},
{
"term": "Show all projects",
"context": "",
"reference": "\/projects",
"plural": "",
"tags": "just_a_tag"
}
]
"""
data = self._run(
url_path="projects/sync",
id=project_id,
data=json.dumps(data)
)
return data['result']['terms']
def update_project_language(self, project_id, language_code, data, fuzzy_trigger=None):
"""
Inserts / overwrites translations.
>>> data = [
{
"term": "Projects",
"context": "project list",
"translation": {
"content": "Des projets",
"fuzzy": 0
}
}
]
"""
kwargs = {}
if fuzzy_trigger is not None:
kwargs['fuzzy_trigger'] = fuzzy_trigger
data = self._run(
url_path="languages/update",
id=project_id,
language=language_code,
data=json.dumps(data),
**kwargs
)
return data['result']['translations']
def export(self, project_id, language_code, file_type='po', filters=None,
tags=None, local_file=None):
"""
Return terms / translations
filters - filter by self._filter_by
tags - filter results by tags;
local_file - save content into it. If None, save content into
random temp file.
>>> tags = 'name-of-tag'
>>> tags = ["name-of-tag"]
>>> tags = ["name-of-tag", "name-of-another-tag"]
>>> filters = 'translated'
>>> filters = ["translated"]
>>> filters = ["translated", "not_fuzzy"]
"""
if file_type not in self.FILE_TYPES:
raise POEditorArgsException(
'content_type: file format {}'.format(self.FILE_TYPES))
if filters and isinstance(filters, str) and filters not in self.FILTER_BY:
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
elif filters and set(filters).difference(set(self.FILTER_BY)):
raise POEditorArgsException(
"filters - filter results by {}".format(self.FILTER_BY))
data = self._run(
url_path="projects/export",
id=project_id,
language=language_code,
type=file_type,
filters=filters,
tags=tags
)
# The link of the file (expires after 10 minutes).
file_url = data['result']['url']
# Download file content:
res = requests.get(file_url, stream=True)
if not local_file:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, suffix='.{}'.format(file_type))
tmp_file.close()
local_file = tmp_file.name
with open(local_file, 'w+b') as po_file:
for data in res.iter_content(chunk_size=1024):
po_file.write(data)
return file_url, local_file
def _upload(self, project_id, updating, file_path, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
"""
options = [
self.UPDATING_TERMS,
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if updating not in options:
raise POEditorArgsException(
'Updating arg must be in {}'.format(options)
)
options = [
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if language_code is None and updating in options:
raise POEditorArgsException(
'Language code is required only if updating is '
'terms_translations or translations)'
)
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(
url_path="projects/upload",
id=project_id,
language=language_code,
file=local_file,
updating=updating,
tags=tags,
sync_terms=sync_terms,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
return data['result']
def update_terms(self, project_id, file_path=None, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_terms_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_terms_translations",
DeprecationWarning, stacklevel=2
)
return self.update_terms_translations(
project_id,
file_path,
language_code,
overwrite,
sync_terms,
tags,
fuzzy_trigger
)
def update_terms_translations(self, project_id, file_path=None,
language_code=None, overwrite=False,
sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Updates terms translations
overwrite: set it to True if you want to overwrite translations
sync_terms: set it to True if you want to sync your terms (terms that
are not found in the uploaded file will be deleted from project
and the new ones added). Ignored if updating = translations
tags: Add tags to the project terms; available when updating terms or terms_translations;
you can use the following keys: "all" - for the all the imported terms, "new" - for
the terms which aren't already in the project, "obsolete" - for the terms which are
in the project but not in the imported file and "overwritten_translations" - for the
terms for which translations change
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TERMS_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
sync_terms=sync_terms,
tags=tags,
fuzzy_trigger=fuzzy_trigger
)
def update_definitions(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
warnings.warn(
"This method has been renamed update_translations",
DeprecationWarning, stacklevel=2
)
return self.update_translations(
project_id,
file_path,
language_code,
overwrite,
fuzzy_trigger
)
def update_translations(self, project_id, file_path=None,
language_code=None, overwrite=False, fuzzy_trigger=None):
"""
Updates translations
overwrite: set it to True if you want to overwrite definitions
fuzzy_trigger: set it to True to mark corresponding translations from the
other languages as fuzzy for the updated values
"""
return self._upload(
project_id=project_id,
updating=self.UPDATING_TRANSLATIONS,
file_path=file_path,
language_code=language_code,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
def available_languages(self):
"""
Returns a comprehensive list of all languages supported by POEditor.
You can find it here (https://poeditor.com/docs/languages), too.
"""
data = self._run(
url_path="languages/available"
)
return data['result'].get('languages', [])
def list_contributors(self, project_id=None, language_code=None):
"""
Returns the list of contributors
"""
data = self._run(
url_path="contributors/list",
id=project_id,
language=language_code
)
return data['result'].get('contributors', [])
def add_contributor(self, project_id, name, email, language_code):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
language=language_code
)
return True
def add_administrator(self, project_id, name, email):
"""
Adds a contributor to a project language
"""
self._run(
url_path="contributors/add",
id=project_id,
name=name,
email=email,
admin=True
)
return True
|
codelv/enaml-native-cli | enamlnativecli/main.py | find_conda | python | def find_conda():
USER_HOME = os.path.expanduser('~')
CONDA_HOME = os.environ.get('CONDA_HOME', '')
PROGRAMDATA = os.environ.get('PROGRAMDATA', '')
# Search common install paths and sys path
search_paths = [
# Windows
join(PROGRAMDATA, 'miniconda2', 'scripts'),
join(PROGRAMDATA, 'miniconda3', 'scripts'),
join(USER_HOME, 'miniconda2', 'scripts'),
join(USER_HOME, 'miniconda3', 'scripts'),
join(CONDA_HOME, 'scripts'),
# Linux
join(USER_HOME, 'miniconda2', 'bin'),
join(USER_HOME, 'miniconda3', 'bin'),
join(CONDA_HOME, 'bin'),
# TODO: OSX
] + os.environ.get("PATH", "").split(";" if 'win' in sys.path else ":")
cmd = 'conda.exe' if IS_WIN else 'conda'
for conda_path in search_paths:
conda = join(conda_path, cmd)
if exists(conda):
return sh.Command(conda)
# Try to let the system find it
return sh.conda | Try to find conda on the system | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L74-L103 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2017, Jairus Martin.
Distributed under the terms of the GPLv3 License.
The full license is in the file COPYING.txt, distributed with this software.
Created on July 10, 2017
@author: jrm
"""
import os
import re
import sys
import json
import shutil
import tarfile
import fnmatch
import compileall
import pkg_resources
from glob import glob
from os.path import join, exists, abspath, expanduser, realpath, dirname
from argparse import ArgumentParser, Namespace, REMAINDER
from atom.api import (Atom, Bool, Callable, Dict, List, Unicode, Float, Int,
Instance, set_default)
from contextlib import contextmanager
from cookiecutter.main import cookiecutter
from cookiecutter.log import configure_logger
from distutils.dir_util import copy_tree
try:
# Try conda's version
import ruamel_yaml as yaml
except:
from ruamel import yaml
try:
from ConfigParser import ConfigParser
except:
from configparser import ConfigParser
IS_WIN = 'win' in sys.platform and not 'darwin' == sys.platform
# sh does not work on windows
if IS_WIN:
import pbs
class Sh(object):
def __getattr__(self, attr):
if hasattr(pbs, attr):
return getattr(pbs, attr)
return pbs.Command(attr)
sh = Sh()
ANDROID_SDK = join(os.environ.get('LOCALAPPDATA', ''), 'Android', 'Sdk')
adb = join(ANDROID_SDK, 'platform-tools', 'adb.exe')
emulator = join(ANDROID_SDK, 'emulator', 'emulator.exe')
if exists(adb):
sh.adb = sh.Command(adb)
else:
raise EnvironmentError("Couldn't find a adb in your System, "
"Make sure android studio is installed")
if exists(emulator):
sh.emulator = sh.Command(emulator)
else:
raise EnvironmentError("Couldn't find a emulator in your System, "
"Make sure android studio is installed")
else:
import sh
class Colors:
RED = "\033[1;31m"
BLUE = "\033[1;34m"
CYAN = "\033[1;36m"
GREEN = "\033[0;32m"
RESET = "\033[0;0m"
BOLD = "\033[;1m"
REVERSE = "\033[;7m"
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
print("[DEBUG]: -> running cd {}".format(newdir))
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
print("[DEBUG]: -> running cd {}".format(prevdir))
os.chdir(prevdir)
def cp(src, dst):
""" Like cp -R src dst """
print("[DEBUG]: -> copying {} to {}".format(src, dst))
if os.path.isfile(src):
if not exists(dirname(dst)):
os.makedirs(dirname(dst))
shutil.copy(src, dst)
else:
copy_tree(src, dst)
def shprint(cmd, *args, **kwargs):
debug = kwargs.pop('_debug', True)
write, flush = sys.stdout.write, sys.stdout.flush
kwargs.update({
'_err_to_out': True,
'_out_bufsize': 0,
'_iter': True
})
print("{}[INFO]: -> running {} {}{}".format(
Colors.CYAN, cmd, " ".join([a for a in args if
not isinstance(a, sh.RunningCommand)
]), Colors.RESET))
if IS_WIN:
kwargs.pop('_out_bufsize')
kwargs.pop('_iter')
kwargs['_bg'] = True
process = cmd(*args, **kwargs).process
for c in iter(lambda: process.stdout.read(1),''):
write(c.decode('utf-8'))
if c in ['\r', '\n']:
flush()
if not c:
break
process.wait()
return
buf = []
for c in cmd(*args, **kwargs):
if debug:
write(c)
if c in ['\r', '\n']:
flush()
else:
if c in ['\r', '\n']:
msg = ''.join(buf)
color = Colors.RED if 'error' in msg else Colors.RESET
write('{}\r[DEBUG]: {:<{w}}{}'.format(
color, msg, Colors.RESET, w=100))
flush()
buf = []
else:
buf.append(c)
write("\n")
flush()
ANDROID_ABIS = {
'x86_64': 'x86_64',
'x86': 'x86',
'armeabi-v7a': 'arm',
'arm64-v8a': 'arm64',
}
ANDROID_TARGETS = {v: k for k, v in ANDROID_ABIS.items()}
class Command(Atom):
_instance = None
#: Subcommand name ex enaml-native <name>
title = Unicode()
#: Subcommand short description
desc = Unicode()
#: Subcommand help text
help = Unicode()
#: Package context used to retrieve app config and env
ctx = Dict()
#: Reference to other CLI commands
cmds = Dict()
#: Arguments this command accepts
args = List(tuple)
#: Parser this command uses. Generated automatically.
parser = Instance(ArgumentParser)
#: If the command requires running in an app dir
app_dir_required = Bool(True)
#: Reference to the cli
cli = Instance(Atom)
@classmethod
def instance(cls):
return cls._instance
def run(self, args):
pass
class Create(Command):
title = set_default('create')
help = set_default("Create an enaml-native project")
args = set_default([
('what', dict(help='What to create (app, lib, package)?')),
('--no-input', dict(action='store_true',
help="Use all defaults")),
('-f --overwrite-if-exists', dict(action='store_true',
help="Overwrite the contents if"
"it already exists")),
('-v --verbose', dict(action='store_true', help="Verbose logging")),
])
#: Can be run from anywhere
app_dir_required = set_default(False)
def run(self, args):
template = join(dirname(__file__), 'templates', args.what)
configure_logger(
stream_level='DEBUG' if args.verbose else 'INFO',
debug_file=None,
)
cookiecutter(template,
no_input=args.no_input,
overwrite_if_exists=args.overwrite_if_exists)
print(Colors.GREEN+"[INFO] {} created successfully!".format(
args.what.title())+Colors.RESET)
class BuildRecipe(Command):
title = set_default('build-recipe')
help = set_default("Alias to conda build")
args = set_default([
('package', dict(help='Conda recipe to build')),
('args', dict(nargs=REMAINDER, help="args to pass to conda build")),
])
#: Can be run from anywhere
app_dir_required = set_default(False)
def run(self, args):
env = os.environ.copy()
if args.package.startswith('pip-'):
env.update({'CC': '/bin/false', 'CXX':'/bin/false'})
shprint(self.cli.conda, 'build', args.package, *args.args, _env=env)
print(Colors.GREEN+"[INFO] Built {} successfully!".format(
args.package)+Colors.RESET)
class MakePipRecipe(Command):
title = set_default('make-pip-recipe')
help = set_default("Creates a universal Android and iOS recipe "
"for a given pip package")
args = set_default([
('package', dict(help='pip package to build a recipe for')),
('--recursive', dict(action='store_true',
help="recursively create for all dependencies")),
('--force', dict(action='store_true',
help="force recreation if it already exists")),
('--croot', dict(nargs="?", help="conda root for building recipes")),
])
#: Can be run from anywhere
app_dir_required = set_default(False)
#: Recipes built
_built = List()
def run(self, args):
self.build(args.package, args)
print(Colors.GREEN+"[INFO] Made successfully!"+Colors.RESET)
def build(self, package, args):
ctx = self.ctx
old = set(os.listdir('.'))
# Run conda skeleton
shprint(self.cli.conda, 'skeleton', 'pypi', package)
new = set(os.listdir('.')).difference(old)
self._built.append(package)
for recipe in new:
dst = 'pip-{}'.format(recipe)
# Rename to add pip-prefix so it doesn't
# conflict with regular recipes
if args.force and exists(dst):
shutil.rmtree(dst)
shutil.move(recipe, dst)
#template = join(dirname(__file__), 'templates', 'recipe')
#cookiecutter(template, no_input=True,
# extra_context={'name': package, 'recipe': dst})
# Copy the recipe
#shutil.copy(join(recipe, 'meta.yaml'), join(dst, 'meta.yaml'))
#shutil.rmtree(recipe)
# Read the generated recipe
with open(join(dst, 'meta.yaml')) as f:
# Strip off the jinja tags (and add them in at the end)
data = f.read().split("\n")
var_lines = len([l for l in data if l.startswith("{%")])
# Skip version, name, etc..
meta = yaml.load("\n".join(data[var_lines:]),
Loader=yaml.RoundTripLoader)
# Update name
meta['package']['name'] = 'pip-'+meta['package']['name']
# Remove description it can cause issues
summary = meta['about'].get('summary', '')
summary += " Built for Android and iOS apps using enaml-native."
meta['about']['summary'] = summary
# Update the script to install for every arch
script = meta['build'].pop('script', '')
meta['build']['noarch'] = True
build_script = ['export CC=/bin/false', 'export CXX=/bin/false']
build_script += [
'{script} --no-compile '
'--install-base=$PREFIX/{prefix} '
'--install-lib=$PREFIX/{prefix}/python/site-packages '
'--install-scripts=$PREFIX/{prefix}/scripts '
'--install-data=$PREFIX/{prefix}/data '
'--install-headers=$PREFIX/{prefix}/include'.format(
script=script.strip(), prefix=p, **ctx) for p in [
'android/arm', 'android/arm64', 'android/x86',
'android/x86_64', 'iphoneos', 'iphonesimulator'
]
]
meta['build']['script'] = build_script
# Prefix all dependencies with 'pip-'
requires = []
excluded = ['python', 'cython', 'setuptools']
for stage in meta['requirements'].keys():
reqs = meta['requirements'].pop(stage, [])
requires.extend(reqs)
r = ['pip-{}'.format(r) for r in reqs if r not in excluded]
if r:
meta['requirements'][stage] = r
# Build all requirements
if args.recursive:
requires = list(set(requires))
for pkg in requires:
# Strip off any version
pkg = re.split("[<>=]", pkg)[0].strip()
if pkg in excluded or pkg in self._built:
continue # Not needed or already done
if args.force or not exists('pip-{}'.format(pkg)):
self.build(pkg, args)
# Remove tests we're cross compiling
meta.pop('test', None)
# Save it
with open(join(dst, 'meta.yaml'), 'w') as f:
f.write("\n".join(data[:var_lines])+"\n")
f.write(yaml.dump(meta, Dumper=yaml.RoundTripDumper,
width=1000))
# Now build it
build_args = ['--croot={}'.format(args.croot)
] if args.croot else []
# Want to force a failure on any compiling
env = os.environ.copy()
env.update({'CC': '/bin/false', 'CXX': '/bin/false'})
shprint(self.cli.conda, 'build', dst, *build_args)
print(Colors.GREEN+"[INFO] Built {} successfully!".format(
dst)+Colors.RESET)
class NdkStack(Command):
""" Shortcut to run ndk-stack to show debugging output of a crash in a
native library.
See https://developer.android.com/ndk/guides/ndk-stack.html
"""
title = set_default("ndk-stack")
help = set_default("Run ndk-stack on the adb output")
args = set_default([
('arch', dict(nargs='?', default="armeabi-v7a")),
('args', dict(nargs=REMAINDER, help="Extra args for ndk-stack")),
])
def run(self, args=None):
ctx = self.ctx
env = ctx['android']
ndk_stack = sh.Command(join(
os.path.expanduser(env['ndk']),
'ndk-stack.cmd' if IS_WIN else 'ndk-stack'
))
arch = args.arch if args else 'armeabi-v7a'
sym = 'venv/android/enaml-native/src/main/obj/local/{}'.format(arch)
shprint(ndk_stack, sh.adb('logcat', _piped=True), '-sym', sym)
class NdkBuild(Command):
""" Run ndk-build on enaml-native and any packages
that define an `enaml_native_ndk_build` entry_point.
"""
title = set_default("ndk-build")
help = set_default("Run ndk-build on the android project")
def run(self, args=None):
ctx = self.ctx
env = ctx['android']
# Lib version
build_ver = sys.version_info.major
for line in self.cli.conda('list').split("\n"):
print(line)
if 'android-python' in line:
build_ver = 2 if 'py27' in line else 3
py_version = ".".join(line.split()[1].split(".")[:2])
if build_ver > 2:
py_version += 'm'
break
print(Colors.GREEN+"[DEBUG] Building for {}".format(
py_version)+Colors.RESET)
ndk_build = sh.Command(join(
os.path.expanduser(env['ndk']),
'ndk-build.cmd' if IS_WIN else 'ndk-build'
))
arches = [ANDROID_TARGETS[arch] for arch in env['targets']]
#: Where the jni files are
jni_dir = env.get(
'jni_dir',
"{conda_prefix}/android/enaml-native/src/main/jni".format(**env)
)
if 'jni_dir' not in env:
env['jni_dir'] = jni_dir
#: Where native libraries go for each arch
ndk_build_dir = env.get(
'ndk_build_dir',
"{conda_prefix}/android/enaml-native/src/main/libs".format(**env)
)
if 'ndk_build_dir' not in env:
env['ndk_build_dir'] = ndk_build_dir
#: Do ndk-build in the jni dir
with cd(jni_dir):
#: Patch Application.mk to have the correct ABI's
with open('Application.mk') as f:
app_mk = f.read()
#: APP_ABI := armeabi-v7a
new_mk = []
for line in app_mk.split("\n"):
if re.match(r'APP_ABI\s*:=\s*.+', line):
line = 'APP_ABI := {}'.format(" ".join(arches))
new_mk.append(line)
with open('Application.mk', 'w') as f:
f.write("\n".join(new_mk))
#: Patch Android.mk to have the correct python version
with open('Android.mk') as f:
android_mk = f.read()
#: PY_LIB_VER := 2.7
new_mk = []
for line in android_mk.split("\n"):
if re.match(r'PY_LIB_VER\s*:=\s*.+', line):
line = 'PY_LIB_VER := {}'.format(py_version)
new_mk.append(line)
with open('Android.mk', 'w') as f:
f.write("\n".join(new_mk))
#: Now run nkd-build
shprint(ndk_build)
#: Add entry point so packages can include their own jni libs
dependencies = ctx['dependencies']#.keys()
for ep in pkg_resources.iter_entry_points(
group="enaml_native_ndk_build"):
for name in dependencies:
if ep.name.replace("-", '_') == name.replace("-", '_'):
ndk_build_hook = ep.load()
print("Custom ndk_build_hook {} found for '{}'. ".format(
ndk_build_hook, name))
ndk_build_hook(self.ctx)
break
#: Now copy all compiled python modules to the jniLibs dir so android
#: includes them
for arch in arches:
cfg = dict(
arch=arch,
local_arch=ANDROID_ABIS[arch],
ndk_build_dir=ndk_build_dir,
)
cfg.update(env) # get python_build_dir from the env
#: Where .so files go
dst = abspath('{ndk_build_dir}/{arch}'.format(**cfg))
#: Collect all .so files to the lib dir
with cd('{conda_prefix}/android/'
'{local_arch}/lib/'.format(**cfg)):
for lib in glob('*.so'):
excluded = [p for p in env.get('excluded', [])
if fnmatch.fnmatch(lib, p)]
if excluded:
continue
shutil.copy(lib, dst)
class BundleAssets(Command):
""" This is used by the gradle build to pack python into a zip.
"""
title = set_default("bundle-assets")
help = set_default("Creates a python bundle of all .py and .enaml files")
args = set_default([
('target', dict(nargs='?', default="android",
help="Build for the given target (android, iphoneos, iphonesimulator)")),
('--release', dict(action='store_true', help="Create a release bundle")),
('--no-compile', dict(action='store_true', help="Don't generate python cache")),
])
def run(self, args=None):
ctx = self.ctx
if args.target not in ['android', 'iphoneos', 'iphonesimulator']:
raise ValueError("Target must be either android, iphoneos, or iphonesimulator")
if args.target == 'android':
env = ctx['android']
else:
env = ctx['ios']
#: Now copy to android assets folder
#: Extracted file type
bundle = 'python.tar.gz'
root = abspath(os.getcwd())
# Run lib build
if args.target == 'android':
#: Um, we're passing args from another command?
self.cmds['ndk-build'].run(args)
else:
#: Collect all .so files to the lib dir
with cd('{conda_prefix}/{target}/lib/'.format(target=args.target, **env)):
dst = '{root}/ios/Libs'.format(root=root)
if exists(dst):
shutil.rmtree(dst)
os.makedirs(dst)
# Copy all libs to the
for lib in glob('*.dylib'):
excluded = [p for p in env.get('excluded', [])
if fnmatch.fnmatch(lib, p)]
if excluded:
continue
shutil.copy(lib, dst)
# Clean each arch
#: Remove old
cfg = dict(bundle_id=ctx['bundle_id'])
if args.target == 'android':
for arch in env['targets']:
cfg.update(dict(
target='android/{}'.format(arch),
local_arch=arch,
arch=ANDROID_TARGETS[arch]
))
break
else:
cfg['target'] = args.target
cfg.update(env)
#: Create
if not os.path.exists(env['python_build_dir']):
os.makedirs(env['python_build_dir'].format(**cfg))
# raise RuntimeError(
# "Error: Python build doesn't exist. "
# "You should run './enaml-native build-python' first!")
with cd(env['python_build_dir']):
#: Remove old build
if os.path.exists('build'):
shutil.rmtree('build')
#: Copy python/ build/
cp('{conda_prefix}/{target}/python/'.format(**cfg),
'{python_build_dir}/build'.format(**cfg))
#: Copy sources from app source
for src in ctx.get('sources', ['src']):
cp(join(root, src), 'build')
#: Clean any excluded sources
with cd('build'):
if not args.no_compile:
# Compile to pyc
compileall.compile_dir('.')
# Remove all py files
for dp, dn, fn in os.walk('.'):
for f in glob(join(dp, '*.py')):
if exists(f+'c') or exists(f+'o'):
os.remove(f)
# Exclude all py files and any user added patterns
for pattern in env.get('excluded', [])+['*.dist-info',
'*.egg-info']:
matches = glob(pattern)
for m in matches:
if os.path.isdir(m):
shutil.rmtree(m)
else:
os.remove(m)
#: Remove old
for ext in ['.zip', '.tar.lz4', '.so', '.tar.gz']:
if exists('python.{}'.format(ext)):
os.remove('python.{}'.format(ext))
#: Zip everything and copy to assets arch to build
with cd('build'):
print(Colors.CYAN+"[DEBUG] Creating python bundle..."+ \
Colors.RESET)
with tarfile.open('../'+bundle, "w:gz") as tar:
tar.add('.')
#shprint(sh.zip, '-r',
# 'android/app/src/main/assets/python/python.zip', '.')
#shprint(sh.zip, '-r', '../python.zip', '.')
#shprint(sh.tar, '-zcvf', '../python.tar.gz', '.')
#shprint(sh.bash, '-c',
# 'tar czf - build | lz4 -9 - python.tar.lz4')
# import msgpack
# import lz4
# import lz4.frame
# with open('../libpybundle.so', 'wb') as source:
# data = {}
# for root, dirs, files in os.walk("."):
# for file in files:
# path = join(root, file)[2:] # Skip ./
#
# # TODO Compile to pyc here
# with open(path, 'rb') as f:
# data[path] = f.read()
# for k in data.keys():
# print(k)
# msgpack.pack(data, source)
# # Compress with lz4
# MINHC = lz4.frame.COMPRESSIONLEVEL_MINHC
# with lz4.frame.open('../libpybundle.lz4', 'wb',
# compression_level=MINHC) as f:
# f.write(msgpack.packb(data))
# Copy to each lib dir
#for arch in env['targets']:
# env['abi'] = ANDROID_TARGETS[arch]
# src = '{python_build_dir}/libpybundle.so'.format(**env)
# dst = '{conda_prefix}/android/enaml-native/src/main/libs/{abi}/'.format(**env)
# print("Copying bundle to {}...".format(dst))
# shutil.copy(src, dst)
# Copy to Android assets
if args.target == 'android':
cp('{python_build_dir}/{bundle}'.format(bundle=bundle, **env),
'android/app/src/main/assets/python/{bundle}'.format(bundle=bundle))
# Copy to iOS assets
else:
# TODO Use the bundle!
cp('{python_build_dir}/build'.format(bundle=bundle, **env),
'ios/assets/python'.format(bundle=bundle))
#cp('{python_build_dir}/{bundle}'.format(bundle=bundle, **env),
# 'ios/app/src/main/assets/python/{bundle}'.format(bundle=bundle))
print(Colors.GREEN+"[INFO] Python bundled successfully!"+Colors.RESET)
class ListPackages(Command):
title = set_default("list")
help = set_default("List installed packages (alias to conda list)")
#: Can be run from anywhere
app_dir_required = set_default(False)
def run(self, args):
shprint(self.cli.conda, 'list')
class Install(Command):
""" The "Install" command does a `conda install` of the package names given
and then runs the linker command.
"""
title = set_default("install")
help = set_default("Install and link an enaml-native package")
args = set_default([
('args', dict(nargs=REMAINDER, help="Alias to conda install")),
])
#: Can be run from anywhere
app_dir_required = set_default(False)
def run(self, args):
if os.environ.get('CONDA_DEFAULT_ENV') in [None, 'root']:
print(Colors.RED+'enaml-native install should only be used'
'within an app env!'+Colors.RESET)
raise SystemExit(0)
shprint(self.cli.conda, 'install', '-y', *args.args)
#: Link everything for now
self.cmds['link'].run()
class Uninstall(Command):
""" The "Uninstall" command unlinks the package (if needed) and does a
`conda uninstall` of the package names given.
"""
title = set_default("uninstall")
help = set_default("Uninstall and unlink enaml-native package")
args = set_default([
('args', dict(help="Args to conda uninstall", nargs=REMAINDER)),
])
#: Can be run from anywhere
app_dir_required = set_default(False)
def run(self, args):
if os.environ.get('CONDA_DEFAULT_ENV') in [None, 'root']:
print(Colors.RED+'enaml-native uninstall should only be used'
'within an app env!'+Colors.RESET)
raise SystemExit(0)
#: Unlink first
if hasattr(args, 'names'):
# TODO...
self.cmds['unlink'].run(args)
shprint(self.cli.conda, 'uninstall', '-y', *args.args)
class Link(Command):
""" The "Link" command tries to modify the android and ios projects
to include all of the necessary changes for this package to work.
A custom linkiner can be used by adding a "enaml_native_linker"
entry_point which shall be a function that receives the app package.json
(context) an argument.
Example
----------
def linker(ctx):
# Link android and ios projects here
return True #: To tell the cli the linking was handled and should
return
"""
title = set_default("link")
help = set_default("Link an enaml-native package "
"(updates android and ios projects)")
args = set_default([
('names', dict(
help="Package name (optional) If not set links all projects.",
nargs='*')),
])
#: Where "enaml native packages" are installed within the root
package_dir = 'venv'
def run(self, args=None):
print("Linking {}".format(args.names if args and args.names
else "all packages..."))
if args and args.names:
for name in args.names:
self.link(self.package_dir, name)
else:
#: Link everything
for target in ['android', 'iphoneos', 'iphonesimulator']:
sysroot = join(self.package_dir, target)
for path in os.listdir(sysroot):
self.link(sysroot, path)
def link(self, path, pkg):
""" Link the package in the current directory.
"""
# Check if a custom linker exists to handle linking this package
#for ep in pkg_resources.iter_entry_points(group="enaml_native_linker"):
# if ep.name.replace("-", '_') == pkg.replace("-", '_'):
# linker = ep.load()
# print("Custom linker {} found for '{}'. Linking...".format(
# linker, pkg))
# if linker(self.ctx, path):
# return
#: Use the default builtin linker script
if exists(join(path, pkg, 'build.gradle')):
print(Colors.BLUE+"[INFO] Linking {}/build.gradle".format(
pkg)+Colors.RESET)
self.link_android(path, pkg)
if exists(join(path, pkg, 'Podfile')):
print(Colors.BLUE+"[INFO] Linking {}/Podfile".format(
pkg)+Colors.RESET)
self.link_ios(path, pkg)
@staticmethod
def is_settings_linked(source, pkg):
""" Returns true if the "include ':<project>'" line exists in the file
"""
for line in source.split("\n"):
if re.search(r"include\s*['\"]:{}['\"]".format(pkg), line):
return True
return False
@staticmethod
def is_build_linked(source, pkg):
""" Returns true if the "compile project(':<project>')"
line exists exists in the file """
for line in source.split("\n"):
if re.search(r"(api|compile)\s+project\(['\"]:{}['\"]\)".format(pkg),
line):
return True
return False
@staticmethod
def find_packages(path):
""" Find all java files matching the "*Package.java" pattern within
the given enaml package directory relative to the java source path.
"""
matches = []
root = join(path, 'src', 'main', 'java')
for folder, dirnames, filenames in os.walk(root):
for filename in fnmatch.filter(filenames, '*Package.java'):
#: Open and make sure it's an EnamlPackage somewhere
with open(join(folder, filename)) as f:
if "implements EnamlPackage" in f.read():
package = os.path.relpath(folder, root)
matches.append(os.path.join(package, filename))
return matches
@staticmethod
def is_app_linked(source, pkg, java_package):
""" Returns true if the compile project line exists exists in the file
"""
for line in source.split("\n"):
if java_package in line:
return True
return False
def link_android(self, path, pkg):
""" Link's the android project to this library.
1. Includes this project's directory in the app's
android/settings.gradle
It adds:
include ':<project-name>'
project(':<project-name>').projectDir = new File(
rootProject.projectDir, '../packages/<project-name>/android')
2. Add's this project as a dependency to the android/app/build.gradle
It adds:
compile project(':<project-name>')
to the dependencies.
3. If preset, adds the import and package statement
to the android/app/src/main/java/<bundle/id>/MainApplication.java
"""
bundle_id = self.ctx['bundle_id']
pkg_root = join(path, pkg)
#: Check if it's already linked
with open(join('android', 'settings.gradle')) as f:
settings_gradle = f.read()
with open(join('android', 'app', 'build.gradle')) as f:
build_gradle = f.read()
#: Find the MainApplication.java
main_app_java_path = join('android', 'app', 'src', 'main', 'java',
join(*bundle_id.split(".")),
'MainApplication.java')
with open(main_app_java_path) as f:
main_application_java = f.read()
try:
#: Now link all the EnamlPackages we can find in the new "package"
new_packages = Link.find_packages(join(path, pkg))
if not new_packages:
print("[Android] {} No EnamlPackages found to link!".format(
pkg))
return
#: Link settings.gradle
if not Link.is_settings_linked(settings_gradle, pkg):
#: Add two statements
new_settings = settings_gradle.split("\n")
new_settings.append("") # Blank line
new_settings.append("include ':{name}'".format(name=pkg))
new_settings.append("project(':{name}').projectDir = "
"new File(rootProject.projectDir, "
"'../{path}/android/{name}')"
.format(name=pkg, path=self.package_dir))
with open(join('android', 'settings.gradle'), 'w') as f:
f.write("\n".join(new_settings))
print("[Android] {} linked in settings.gradle!".format(pkg))
else:
print("[Android] {} was already linked in "
"settings.gradle!".format(pkg))
#: Link app/build.gradle
if not Link.is_build_linked(build_gradle, pkg):
#: Add two statements
new_build = build_gradle.split("\n")
#: Find correct line number
found = False
for i, line in enumerate(new_build):
if re.match(r"dependencies\s*{", line):
found = True
continue
if found and "}" in line:
#: Hackish way to find line of the closing bracket after
#: the dependencies { block is found
break
if not found:
raise ValueError("Unable to find dependencies in "
"{pkg}/app/build.gradle!".format(pkg=pkg))
#: Insert before the closing bracket
new_build.insert(i, " api project(':{name}')".format(
name=pkg))
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write("\n".join(new_build))
print("[Android] {} linked in app/build.gradle!".format(pkg))
else:
print("[Android] {} was already linked in "
"app/build.gradle!".format(pkg))
new_app_java = []
for package in new_packages:
#: Add our import statement
javacls = os.path.splitext(package)[0].replace("/", ".")
if not Link.is_app_linked(main_application_java, pkg, javacls):
#: Reuse previous if avialable
new_app_java = (new_app_java or
main_application_java.split("\n"))
#: Find last import statement
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line, "import *;"):
j = i
new_app_java.insert(j+1, "import {};".format(javacls))
#: Add the package statement
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line.strip(), "new *Package()"):
j = i
if j == 0:
raise ValueError("Could not find the correct spot to "
"add package {}".format(javacls))
else:
#: Get indent and add to previous line
#: Add comma to previous line
new_app_java[j] = new_app_java[j]+ ","
#: Insert new line
new_app_java.insert(j+1, " new {}()"
.format(javacls.split(".")[-1]))
else:
print("[Android] {} was already linked in {}!".format(
pkg, main_app_java_path))
if new_app_java:
with open(main_app_java_path, 'w') as f:
f.write("\n".join(new_app_java))
print(Colors.GREEN+"[Android] {} linked successfully!".format(
pkg)+Colors.RESET)
except Exception as e:
print(Colors.GREEN+"[Android] {} Failed to link. "
"Reverting due to error: "
"{}".format(pkg, e)+Colors.RESET)
#: Undo any changes
with open(join('android', 'settings.gradle'), 'w') as f:
f.write(settings_gradle)
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write(build_gradle)
with open(main_app_java_path, 'w') as f:
f.write(main_application_java)
#: Now blow up
raise
def link_ios(self, path, pkg):
print("[iOS] Link TODO:...")
class Unlink(Command):
""" The "Unlink" command tries to undo the modifications done by the
linker..
A custom unlinkiner can be used by adding a "enaml_native_unlinker"
entry_point which shall be a function that receives the app
package.json (context) an argument.
Example
----------
def unlinker(ctx):
# Unlink android and ios projects here
return True #: To tell the cli the unlinking was handled and
should return
"""
title = set_default("unlink")
help = set_default("Unlink an enaml-native package")
args = set_default([
('names', dict(help="Package name", nargs="+")),
])
def run(self, args=None):
""" The name IS required here. """
print(Colors.BLUE+"[INFO] Unlinking {}...".format(
args.names)+Colors.RESET)
for name in args.names:
self.unlink(Link.package_dir, name)
def unlink(self, path, pkg):
""" Unlink the package in the current directory.
"""
#: Check if a custom unlinker exists to handle unlinking this package
for ep in pkg_resources.iter_entry_points(
group="enaml_native_unlinker"):
if ep.name.replace("-", '_') == pkg.replace("-", '_'):
unlinker = ep.load()
print("Custom unlinker {} found for '{}'. "
"Unlinking...".format(unlinker, pkg))
if unlinker(self.ctx, path):
return
if exists(join(path, 'android', pkg, 'build.gradle')):
print("[Android] unlinking {}".format(pkg))
self.unlink_android(path, pkg)
for target in ['iphoneos', 'iphonesimulator']:
if exists(join(path, target, pkg, 'Podfile')):
print("[iOS] unlinking {}".format(pkg))
self.unlink_ios(path, pkg)
def unlink_android(self, path, pkg):
""" Unlink's the android project to this library.
1. In the app's android/settings.gradle, it removes the following
lines (if they exist):
include ':<project-name>'
project(':<project-name>').projectDir = new File(
rootProject.projectDir,
'../venv/packages/<project-name>/android')
2. In the app's android/app/build.gradle, it removes the following
line (if present)
compile project(':<project-name>')
3. In the app's
android/app/src/main/java/<bundle/id>/MainApplication.java,
it removes:
import <package>.<Name>Package;
new <Name>Package(),
If no comma exists it will remove the comma from the previous
line.
"""
bundle_id = self.ctx['bundle_id']
#: Check if it's already linked
with open(join('android', 'settings.gradle')) as f:
settings_gradle = f.read()
with open(join('android', 'app', 'build.gradle')) as f:
build_gradle = f.read()
#: Find the MainApplication.java
main_app_java_path = join('android', 'app', 'src', 'main', 'java',
join(*bundle_id.split(".")),
'MainApplication.java')
with open(main_app_java_path) as f:
main_application_java = f.read()
try:
#: Now link all the EnamlPackages we can find in the new "package"
new_packages = Link.find_packages(join(path, 'android', pkg))
if not new_packages:
print(Colors.RED+"\t[Android] {} No EnamlPackages found to "
"unlink!".format(pkg)+Colors.RESET)
return
#: Unlink settings.gradle
if Link.is_settings_linked(settings_gradle, pkg):
#: Remove the two statements
new_settings = [
line for line in settings_gradle.split("\n")
if line.strip() not in [
"include ':{name}'".format(name=pkg),
"project(':{name}').projectDir = "
"new File(rootProject.projectDir, "
"'../{path}/android/{name}')".format(path=path,
name=pkg)
]
]
with open(join('android', 'settings.gradle'), 'w') as f:
f.write("\n".join(new_settings))
print("\t[Android] {} unlinked settings.gradle!".format(pkg))
else:
print("\t[Android] {} was not linked in "
"settings.gradle!".format(pkg))
#: Unlink app/build.gradle
if Link.is_build_linked(build_gradle, pkg):
#: Add two statements
new_build = [
line for line in build_gradle.split("\n")
if line.strip() not in [
"compile project(':{name}')".format(name=pkg),
"api project(':{name}')".format(name=pkg),
]
]
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write("\n".join(new_build))
print("\t[Android] {} unlinked in "
"app/build.gradle!".format(pkg))
else:
print("\t[Android] {} was not linked in "
"app/build.gradle!".format(pkg))
new_app_java = []
for package in new_packages:
#: Add our import statement
javacls = os.path.splitext(package)[0].replace("/", ".")
if Link.is_app_linked(main_application_java, pkg, javacls):
#: Reuse previous if avialable
new_app_java = (new_app_java or
main_application_java.split("\n"))
new_app_java = [
line for line in new_app_java
if line.strip() not in [
"import {};".format(javacls),
"new {}()".format(javacls.split(".")[-1]),
"new {}(),".format(javacls.split(".")[-1]),
]
]
#: Now find the last package and remove the comma if it
#: exists
found = False
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line.strip(), "new *Package()"):
found = True
elif fnmatch.fnmatch(line.strip(), "new *Package(),"):
j = i
#: We removed the last package so add a comma
if not found:
#: This kills any whitespace...
new_app_java[j] = new_app_java[j][
:new_app_java[j].rfind(',')]
else:
print("\t[Android] {} was not linked in {}!".format(
pkg, main_app_java_path))
if new_app_java:
with open(main_app_java_path, 'w') as f:
f.write("\n".join(new_app_java))
print(Colors.GREEN+"\t[Android] {} unlinked successfully!".format(
pkg)+Colors.RESET)
except Exception as e:
print(Colors.RED+"\t[Android] {} Failed to unlink. "
"Reverting due to error: {}".format(pkg, e)+Colors.RESET)
#: Undo any changes
with open(join('android', 'settings.gradle'), 'w') as f:
f.write(settings_gradle)
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write(build_gradle)
with open(main_app_java_path, 'w') as f:
f.write(main_application_java)
#: Now blow up
raise
class BuildAndroid(Command):
title = set_default("build-android")
help = set_default("Build android project")
args = set_default([
('--release', dict(action='store_true', help="Release mode")),
('extra', dict(nargs=REMAINDER, help="Args to pass to gradle")),
])
def run(self, args=None):
with cd("android"):
gradlew = sh.Command('gradlew.bat' if IS_WIN else './gradlew')
if args and args.release:
shprint(gradlew, 'assembleRelease', *args.extra, _debug=True)
else:
shprint(gradlew, 'assembleDebug', *args.extra, _debug=True)
class CleanAndroid(Command):
title = set_default("clean-android")
help = set_default("Clean the android project")
def run(self, args=None):
with cd('android'):
gradlew = sh.Command('gradlew.bat' if IS_WIN else './gradlew')
shprint(gradlew, 'clean', _debug=True)
class RunAndroid(Command):
title = set_default("run-android")
help = set_default("Build android project, install it, and run")
args = set_default([
('--release', dict(action='store_true', help="Build in Release mode")),
('extra', dict(nargs=REMAINDER, help="Extra args to pass to gradle")),
])
def run(self, args=None):
ctx = self.ctx
bundle_id = ctx['bundle_id']
with cd("android"):
release_apk = os.path.abspath(join(
'.', 'app', 'build', 'outputs', 'apk',
'app-release-unsigned.apk'))
gradlew = sh.Command('gradlew.bat' if IS_WIN else './gradlew')
#: If no devices are connected, start the simulator
if len(sh.adb('devices').stdout.strip())==1:
device = sh.emulator('-list-avds').stdout.split("\n")[0]
shprint(sh.emulator, '-avd', device)
if args and args.release:
shprint(gradlew, 'assembleRelease', *args.extra, _debug=True)
#shprint(sh.adb,'uninstall','-k','"{}"'.format(bundle_id))
shprint(sh.adb, 'install', release_apk)
else:
shprint(gradlew, 'installDebug',*args.extra, _debug=True)
shprint(sh.adb, 'shell', 'am', 'start', '-n',
'{bundle_id}/{bundle_id}.MainActivity'.format(
bundle_id=bundle_id))
class CleanIOS(Command):
title = set_default("clean-ios")
help = set_default("Clean the ios project")
def run(self, args=None):
with cd('ios'):
shprint(sh.xcodebuild, 'clean', '-project', 'App.xcodeproj',
'-configuration', 'ReleaseAdhoc', '-alltargets')
class RunIOS(Command):
title = set_default("run-ios")
help = set_default("Build and run the ios project")
args = set_default([
('--release', dict(action='store_true', help="Build in Release mode")),
])
def run(self, args=None):
ctx = self.ctx
env = ctx['ios']
with cd('ios'):
ws = glob("*.xcworkspace")
if not ws:
raise RuntimeError("Couldn't find a xcworkspace in the ios folder! "
"Did you run `pod install`? ")
workspace = ws[0]
scheme = '.'.join(workspace.split('.')[0:-1])
shprint(sh.xcrun, 'xcodebuild',
'-scheme', scheme,
'-workspace', workspace,
'-configuration', 'Release' if args and args.release else 'Debug',
'-allowProvisioningUpdates',
'-derivedDataPath', 'run')
#shprint(sh.xcrun, 'simctl', 'install', 'booted',
# 'build/Build/Products/Debug-iphonesimulator/
# {project}.app'.format(**env))
shprint(sh.xcrun, 'simctl', 'launch', 'booted', ctx['bundle_id'])
class BuildIOS(Command):
title = set_default("build-ios")
help = set_default("Build the ios project")
args = set_default([
('--release', dict(action='store_true', help="Build in Release mode")),
])
def run(self, args=None):
ctx = self.ctx
with cd('ios'):
ws = glob("*.xcworkspace")
if not ws:
raise RuntimeError("Couldn't find a xcworkspace in the ios folder! "
"Did you run `pod install`? ")
workspace = ws[0]
scheme = '.'.join(workspace.split('.')[0:-1])
shprint(sh.xcrun,
'xcodebuild',
'-scheme', scheme,
'-workspace', workspace,
'-configuration', 'Release' if args and args.release else 'Debug',
'-allowProvisioningUpdates',
'-derivedDataPath', 'build')
class Server(Command):
""" Run a dev server to host files. Only view files can be reloaded at the
moment.
"""
title = set_default("start")
help = set_default("Start a debug server for serving files to the app")
#: Dev server index page to render
index_page = Unicode("enaml-native dev server. "
"When you change a source file it pushes to the app.")
args = set_default([
('--remote-debugging', dict(action='store_true',
help="Run in remote debugging mode")),
])
#: Server port
port = Int(8888)
#: Time in ms to wait before triggering a reload
reload_delay = Float(1)
_reload_count = Int() #: Pending reload requests
#: Watchdog observer
observer = Instance(object)
#: Watchdog handler
watcher = Instance(object)
#: Websocket handler implementation
handlers = List()
#: Callable to add a callback from a thread into the event loop
add_callback = Callable()
#: Callable to add a callback at some later time
call_later = Callable()
#: Changed file events
changes = List()
#: Run in bridge (forwarding) mode for remote debugging
remote_debugging = Bool()
#: Can be run from anywhere
app_dir_required = set_default(False)
def run(self, args=None):
ctx = self.ctx
#: Look for tornado or twisted in reqs
use_twisted = 'twisted' in ', '.join(ctx.get('dependencies', []))
#: Save setting
self.remote_debugging = args and args.remote_debugging
if self.remote_debugging:
#: Do reverse forwarding so you can use remote-debugging over
#: adb (via USB even if Wifi is not accessible)
shprint(sh.adb, 'reverse',
'tcp:{}'.format(self.port), 'tcp:{}'.format(self.port))
else:
#: Setup observer
try:
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
except ImportError:
print(Colors.RED + "[WARNING] Watchdog is required the dev "
"server: Run 'pip install watchdog'" + Colors.RESET)
return
self.observer = Observer()
server = self
class AppNotifier(LoggingEventHandler):
def on_any_event(self, event):
super(AppNotifier, self).on_any_event(event)
#: Use add callback to push to event loop thread
server.add_callback(server.on_file_changed, event)
with cd('src'):
if not self.remote_debugging:
print("Watching {}".format(abspath('.')))
self.watcher = AppNotifier()
self.observer.schedule(self.watcher, abspath('.'),
recursive=True)
self.observer.start()
if use_twisted:
self.run_twisted(args)
else:
self.run_tornado(args)
def run_tornado(self, args):
""" Tornado dev server implementation """
server = self
import tornado.ioloop
import tornado.web
import tornado.websocket
ioloop = tornado.ioloop.IOLoop.current()
class DevWebSocketHandler(tornado.websocket.WebSocketHandler):
def open(self):
super(DevWebSocketHandler, self).open()
server.on_open(self)
def on_message(self, message):
server.on_message(self, message)
def on_close(self):
super(DevWebSocketHandler, self).on_close()
server.on_close(self)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write(server.index_page)
#: Set the call later method
server.call_later = ioloop.call_later
server.add_callback = ioloop.add_callback
app = tornado.web.Application([
(r"/", MainHandler),
(r"/dev", DevWebSocketHandler),
])
app.listen(self.port)
print("Tornado Dev server started on {}".format(self.port))
ioloop.start()
def run_twisted(self, args):
""" Twisted dev server implementation """
server = self
from twisted.internet import reactor
from twisted.web import resource
from twisted.web.static import File
from twisted.web.server import Site
from autobahn.twisted.websocket import (WebSocketServerFactory,
WebSocketServerProtocol)
from autobahn.twisted.resource import WebSocketResource
class DevWebSocketHandler(WebSocketServerProtocol):
def onConnect(self, request):
super(DevWebSocketHandler, self).onConnect(request)
server.on_open(self)
def onMessage(self, payload, isBinary):
server.on_message(self, payload)
def onClose(self, wasClean, code, reason):
super(DevWebSocketHandler,self).onClose(wasClean, code, reason)
server.on_close(self)
def write_message(self, message, binary=False):
self.sendMessage(message, binary)
#: Set the call later method
server.call_later = reactor.callLater
server.add_callback = reactor.callFromThread
factory = WebSocketServerFactory(u"ws://0.0.0.0:{}".format(self.port))
factory.protocol = DevWebSocketHandler
class MainHandler(resource.Resource):
def render_GET(self, req):
return str(server.index_page)
root = resource.Resource()
root.putChild("", MainHandler())
root.putChild("dev", WebSocketResource(factory))
reactor.listenTCP(self.port, Site(root))
print("Twisted Dev server started on {}".format(self.port))
reactor.run()
#: ========================================================
#: Shared protocol implementation
#: ========================================================
def on_open(self, handler):
self._reload_count = 0
print("Client {} connected!".format(handler))
self.handlers.append(handler)
def on_message(self, handler, msg):
""" In remote debugging mode this simply acts as a forwarding
proxy for the two clients.
"""
if self.remote_debugging:
#: Forward to other clients
for h in self.handlers:
if h != handler:
h.write_message(msg, True)
else:
print(msg)
def send_message(self, msg):
""" Send a message to the client. This should not be used in
remote debugging mode.
"""
if not self.handlers:
return #: Client not connected
for h in self.handlers:
h.write_message(msg)
def on_close(self, handler):
print("Client {} left!".format(handler))
self.handlers.remove(handler)
def on_file_changed(self, event):
""" """
print(event)
self._reload_count +=1
self.changes.append(event)
self.call_later(self.reload_delay, self._trigger_reload, event)
def _trigger_reload(self, event):
self._reload_count -=1
if self._reload_count == 0:
files = {}
for event in self.changes:
path = os.path.relpath(event.src_path, os.getcwd())
if os.path.splitext(path)[-1] not in ['.py', '.enaml']:
continue
with open(event.src_path) as f:
data = f.read()
#: Add to changed files
files[path] = data
if files:
#: Send the reload request
msg = {
'type':'reload',
'files':files
}
print("Reloading: {}".format(files.keys()))
self.send_message(json.dumps(msg))
#: Clear changes
self.changes = []
def find_commands(cls):
""" Finds commands by finding the subclasses of Command"""
cmds = []
for subclass in cls.__subclasses__():
cmds.append(subclass)
cmds.extend(find_commands(subclass))
return cmds
class EnamlNativeCli(Atom):
#: Root parser
parser = Instance(ArgumentParser)
#: Loaded from package
ctx = Dict()
#: Parsed args
args = Instance(Namespace)
#: Location of package file
package = Unicode("environment.yml")
#: If enaml-native is being run within an app directory
in_app_directory = Bool()
#: Conda command
conda = Instance(sh.Command)
#: Commands
commands = List(Command)
def _default_commands(self):
""" Build the list of CLI commands by finding subclasses of the Command
class
Also allows commands to be installed using the "enaml_native_command"
entry point. This entry point should return a Command subclass
"""
commands = [c() for c in find_commands(Command)]
#: Get commands installed via entry points
for ep in pkg_resources.iter_entry_points(
group="enaml_native_command"):
c = ep.load()
if not issubclass(c, Command):
print("Warning: entry point {} did not return a valid enaml "
"cli command! This command will be ignored!".format(
ep.name))
commands.append(c())
return commands
def _default_in_app_directory(self):
""" Return if we are in a directory that contains the package.json file
which should indicate it's in the root directory of an enaml-native
app.
"""
return exists(self.package)
def _default_ctx(self):
""" Return the package config or context and normalize some of the
values
"""
if not self.in_app_directory:
print("Warning: {} does not exist. Using the default.".format(
self.package))
ctx = {}
else:
with open(self.package) as f:
ctx = dict(yaml.load(f, Loader=yaml.RoundTripLoader))
if self.in_app_directory:
# Update the env for each platform
excluded = list(ctx.get('excluded', []))
for env in [ctx['ios'], ctx['android']]:
if 'python_build_dir' not in env:
env['python_build_dir'] = expanduser(abspath('build/python'))
if 'conda_prefix' not in env:
env['conda_prefix'] = os.environ.get(
'CONDA_PREFIX', expanduser(abspath('venv')))
# Join the shared and local exclusions
env['excluded'] = list(env.get('excluded', [])) + excluded
return ctx
def _default_parser(self):
""" Generate a parser using the command list """
parser = ArgumentParser(prog='enaml-native')
#: Build commands by name
cmds = {c.title: c for c in self.commands}
#: Build parser, prepare commands
subparsers = parser.add_subparsers()
for c in self.commands:
p = subparsers.add_parser(c.title, help=c.help)
c.parser = p
for (flags, kwargs) in c.args:
p.add_argument(*flags.split(), **kwargs)
p.set_defaults(cmd=c)
c.ctx = self.ctx
c.cmds = cmds
c.cli = self
return parser
def _default_conda(self):
return find_conda()
def check_dependencies(self):
try:
self.conda('--version')
except:
raise EnvironmentError(
"conda could not be found. Please install miniconda from "
"https://conda.io/miniconda.html or set CONDA_HOME to the"
"location where conda is installed.")
def start(self):
""" Run the commands"""
self.check_dependencies()
self.args = self.parser.parse_args()
# Python 3 doesn't set the cmd if no args are given
if not hasattr(self.args, 'cmd'):
self.parser.print_help()
return
cmd = self.args.cmd
try:
if cmd.app_dir_required and not self.in_app_directory:
raise EnvironmentError(
"'enaml-native {}' must be run within an app root "
"directory not: {}".format(cmd.title, os.getcwd()))
cmd.run(self.args)
except sh.ErrorReturnCode as e:
raise
def main():
EnamlNativeCli().start()
if __name__ == '__main__':
main()
|
codelv/enaml-native-cli | enamlnativecli/main.py | cp | python | def cp(src, dst):
print("[DEBUG]: -> copying {} to {}".format(src, dst))
if os.path.isfile(src):
if not exists(dirname(dst)):
os.makedirs(dirname(dst))
shutil.copy(src, dst)
else:
copy_tree(src, dst) | Like cp -R src dst | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L128-L136 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2017, Jairus Martin.
Distributed under the terms of the GPLv3 License.
The full license is in the file COPYING.txt, distributed with this software.
Created on July 10, 2017
@author: jrm
"""
import os
import re
import sys
import json
import shutil
import tarfile
import fnmatch
import compileall
import pkg_resources
from glob import glob
from os.path import join, exists, abspath, expanduser, realpath, dirname
from argparse import ArgumentParser, Namespace, REMAINDER
from atom.api import (Atom, Bool, Callable, Dict, List, Unicode, Float, Int,
Instance, set_default)
from contextlib import contextmanager
from cookiecutter.main import cookiecutter
from cookiecutter.log import configure_logger
from distutils.dir_util import copy_tree
try:
# Try conda's version
import ruamel_yaml as yaml
except:
from ruamel import yaml
try:
from ConfigParser import ConfigParser
except:
from configparser import ConfigParser
IS_WIN = 'win' in sys.platform and not 'darwin' == sys.platform
# sh does not work on windows
if IS_WIN:
import pbs
class Sh(object):
def __getattr__(self, attr):
if hasattr(pbs, attr):
return getattr(pbs, attr)
return pbs.Command(attr)
sh = Sh()
ANDROID_SDK = join(os.environ.get('LOCALAPPDATA', ''), 'Android', 'Sdk')
adb = join(ANDROID_SDK, 'platform-tools', 'adb.exe')
emulator = join(ANDROID_SDK, 'emulator', 'emulator.exe')
if exists(adb):
sh.adb = sh.Command(adb)
else:
raise EnvironmentError("Couldn't find a adb in your System, "
"Make sure android studio is installed")
if exists(emulator):
sh.emulator = sh.Command(emulator)
else:
raise EnvironmentError("Couldn't find a emulator in your System, "
"Make sure android studio is installed")
else:
import sh
def find_conda():
""" Try to find conda on the system """
USER_HOME = os.path.expanduser('~')
CONDA_HOME = os.environ.get('CONDA_HOME', '')
PROGRAMDATA = os.environ.get('PROGRAMDATA', '')
# Search common install paths and sys path
search_paths = [
# Windows
join(PROGRAMDATA, 'miniconda2', 'scripts'),
join(PROGRAMDATA, 'miniconda3', 'scripts'),
join(USER_HOME, 'miniconda2', 'scripts'),
join(USER_HOME, 'miniconda3', 'scripts'),
join(CONDA_HOME, 'scripts'),
# Linux
join(USER_HOME, 'miniconda2', 'bin'),
join(USER_HOME, 'miniconda3', 'bin'),
join(CONDA_HOME, 'bin'),
# TODO: OSX
] + os.environ.get("PATH", "").split(";" if 'win' in sys.path else ":")
cmd = 'conda.exe' if IS_WIN else 'conda'
for conda_path in search_paths:
conda = join(conda_path, cmd)
if exists(conda):
return sh.Command(conda)
# Try to let the system find it
return sh.conda
class Colors:
RED = "\033[1;31m"
BLUE = "\033[1;34m"
CYAN = "\033[1;36m"
GREEN = "\033[0;32m"
RESET = "\033[0;0m"
BOLD = "\033[;1m"
REVERSE = "\033[;7m"
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
print("[DEBUG]: -> running cd {}".format(newdir))
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
print("[DEBUG]: -> running cd {}".format(prevdir))
os.chdir(prevdir)
def shprint(cmd, *args, **kwargs):
debug = kwargs.pop('_debug', True)
write, flush = sys.stdout.write, sys.stdout.flush
kwargs.update({
'_err_to_out': True,
'_out_bufsize': 0,
'_iter': True
})
print("{}[INFO]: -> running {} {}{}".format(
Colors.CYAN, cmd, " ".join([a for a in args if
not isinstance(a, sh.RunningCommand)
]), Colors.RESET))
if IS_WIN:
kwargs.pop('_out_bufsize')
kwargs.pop('_iter')
kwargs['_bg'] = True
process = cmd(*args, **kwargs).process
for c in iter(lambda: process.stdout.read(1),''):
write(c.decode('utf-8'))
if c in ['\r', '\n']:
flush()
if not c:
break
process.wait()
return
buf = []
for c in cmd(*args, **kwargs):
if debug:
write(c)
if c in ['\r', '\n']:
flush()
else:
if c in ['\r', '\n']:
msg = ''.join(buf)
color = Colors.RED if 'error' in msg else Colors.RESET
write('{}\r[DEBUG]: {:<{w}}{}'.format(
color, msg, Colors.RESET, w=100))
flush()
buf = []
else:
buf.append(c)
write("\n")
flush()
ANDROID_ABIS = {
'x86_64': 'x86_64',
'x86': 'x86',
'armeabi-v7a': 'arm',
'arm64-v8a': 'arm64',
}
ANDROID_TARGETS = {v: k for k, v in ANDROID_ABIS.items()}
class Command(Atom):
_instance = None
#: Subcommand name ex enaml-native <name>
title = Unicode()
#: Subcommand short description
desc = Unicode()
#: Subcommand help text
help = Unicode()
#: Package context used to retrieve app config and env
ctx = Dict()
#: Reference to other CLI commands
cmds = Dict()
#: Arguments this command accepts
args = List(tuple)
#: Parser this command uses. Generated automatically.
parser = Instance(ArgumentParser)
#: If the command requires running in an app dir
app_dir_required = Bool(True)
#: Reference to the cli
cli = Instance(Atom)
@classmethod
def instance(cls):
return cls._instance
def run(self, args):
pass
class Create(Command):
title = set_default('create')
help = set_default("Create an enaml-native project")
args = set_default([
('what', dict(help='What to create (app, lib, package)?')),
('--no-input', dict(action='store_true',
help="Use all defaults")),
('-f --overwrite-if-exists', dict(action='store_true',
help="Overwrite the contents if"
"it already exists")),
('-v --verbose', dict(action='store_true', help="Verbose logging")),
])
#: Can be run from anywhere
app_dir_required = set_default(False)
def run(self, args):
template = join(dirname(__file__), 'templates', args.what)
configure_logger(
stream_level='DEBUG' if args.verbose else 'INFO',
debug_file=None,
)
cookiecutter(template,
no_input=args.no_input,
overwrite_if_exists=args.overwrite_if_exists)
print(Colors.GREEN+"[INFO] {} created successfully!".format(
args.what.title())+Colors.RESET)
class BuildRecipe(Command):
title = set_default('build-recipe')
help = set_default("Alias to conda build")
args = set_default([
('package', dict(help='Conda recipe to build')),
('args', dict(nargs=REMAINDER, help="args to pass to conda build")),
])
#: Can be run from anywhere
app_dir_required = set_default(False)
def run(self, args):
env = os.environ.copy()
if args.package.startswith('pip-'):
env.update({'CC': '/bin/false', 'CXX':'/bin/false'})
shprint(self.cli.conda, 'build', args.package, *args.args, _env=env)
print(Colors.GREEN+"[INFO] Built {} successfully!".format(
args.package)+Colors.RESET)
class MakePipRecipe(Command):
title = set_default('make-pip-recipe')
help = set_default("Creates a universal Android and iOS recipe "
"for a given pip package")
args = set_default([
('package', dict(help='pip package to build a recipe for')),
('--recursive', dict(action='store_true',
help="recursively create for all dependencies")),
('--force', dict(action='store_true',
help="force recreation if it already exists")),
('--croot', dict(nargs="?", help="conda root for building recipes")),
])
#: Can be run from anywhere
app_dir_required = set_default(False)
#: Recipes built
_built = List()
def run(self, args):
self.build(args.package, args)
print(Colors.GREEN+"[INFO] Made successfully!"+Colors.RESET)
def build(self, package, args):
ctx = self.ctx
old = set(os.listdir('.'))
# Run conda skeleton
shprint(self.cli.conda, 'skeleton', 'pypi', package)
new = set(os.listdir('.')).difference(old)
self._built.append(package)
for recipe in new:
dst = 'pip-{}'.format(recipe)
# Rename to add pip-prefix so it doesn't
# conflict with regular recipes
if args.force and exists(dst):
shutil.rmtree(dst)
shutil.move(recipe, dst)
#template = join(dirname(__file__), 'templates', 'recipe')
#cookiecutter(template, no_input=True,
# extra_context={'name': package, 'recipe': dst})
# Copy the recipe
#shutil.copy(join(recipe, 'meta.yaml'), join(dst, 'meta.yaml'))
#shutil.rmtree(recipe)
# Read the generated recipe
with open(join(dst, 'meta.yaml')) as f:
# Strip off the jinja tags (and add them in at the end)
data = f.read().split("\n")
var_lines = len([l for l in data if l.startswith("{%")])
# Skip version, name, etc..
meta = yaml.load("\n".join(data[var_lines:]),
Loader=yaml.RoundTripLoader)
# Update name
meta['package']['name'] = 'pip-'+meta['package']['name']
# Remove description it can cause issues
summary = meta['about'].get('summary', '')
summary += " Built for Android and iOS apps using enaml-native."
meta['about']['summary'] = summary
# Update the script to install for every arch
script = meta['build'].pop('script', '')
meta['build']['noarch'] = True
build_script = ['export CC=/bin/false', 'export CXX=/bin/false']
build_script += [
'{script} --no-compile '
'--install-base=$PREFIX/{prefix} '
'--install-lib=$PREFIX/{prefix}/python/site-packages '
'--install-scripts=$PREFIX/{prefix}/scripts '
'--install-data=$PREFIX/{prefix}/data '
'--install-headers=$PREFIX/{prefix}/include'.format(
script=script.strip(), prefix=p, **ctx) for p in [
'android/arm', 'android/arm64', 'android/x86',
'android/x86_64', 'iphoneos', 'iphonesimulator'
]
]
meta['build']['script'] = build_script
# Prefix all dependencies with 'pip-'
requires = []
excluded = ['python', 'cython', 'setuptools']
for stage in meta['requirements'].keys():
reqs = meta['requirements'].pop(stage, [])
requires.extend(reqs)
r = ['pip-{}'.format(r) for r in reqs if r not in excluded]
if r:
meta['requirements'][stage] = r
# Build all requirements
if args.recursive:
requires = list(set(requires))
for pkg in requires:
# Strip off any version
pkg = re.split("[<>=]", pkg)[0].strip()
if pkg in excluded or pkg in self._built:
continue # Not needed or already done
if args.force or not exists('pip-{}'.format(pkg)):
self.build(pkg, args)
# Remove tests we're cross compiling
meta.pop('test', None)
# Save it
with open(join(dst, 'meta.yaml'), 'w') as f:
f.write("\n".join(data[:var_lines])+"\n")
f.write(yaml.dump(meta, Dumper=yaml.RoundTripDumper,
width=1000))
# Now build it
build_args = ['--croot={}'.format(args.croot)
] if args.croot else []
# Want to force a failure on any compiling
env = os.environ.copy()
env.update({'CC': '/bin/false', 'CXX': '/bin/false'})
shprint(self.cli.conda, 'build', dst, *build_args)
print(Colors.GREEN+"[INFO] Built {} successfully!".format(
dst)+Colors.RESET)
class NdkStack(Command):
""" Shortcut to run ndk-stack to show debugging output of a crash in a
native library.
See https://developer.android.com/ndk/guides/ndk-stack.html
"""
title = set_default("ndk-stack")
help = set_default("Run ndk-stack on the adb output")
args = set_default([
('arch', dict(nargs='?', default="armeabi-v7a")),
('args', dict(nargs=REMAINDER, help="Extra args for ndk-stack")),
])
def run(self, args=None):
ctx = self.ctx
env = ctx['android']
ndk_stack = sh.Command(join(
os.path.expanduser(env['ndk']),
'ndk-stack.cmd' if IS_WIN else 'ndk-stack'
))
arch = args.arch if args else 'armeabi-v7a'
sym = 'venv/android/enaml-native/src/main/obj/local/{}'.format(arch)
shprint(ndk_stack, sh.adb('logcat', _piped=True), '-sym', sym)
class NdkBuild(Command):
""" Run ndk-build on enaml-native and any packages
that define an `enaml_native_ndk_build` entry_point.
"""
title = set_default("ndk-build")
help = set_default("Run ndk-build on the android project")
def run(self, args=None):
ctx = self.ctx
env = ctx['android']
# Lib version
build_ver = sys.version_info.major
for line in self.cli.conda('list').split("\n"):
print(line)
if 'android-python' in line:
build_ver = 2 if 'py27' in line else 3
py_version = ".".join(line.split()[1].split(".")[:2])
if build_ver > 2:
py_version += 'm'
break
print(Colors.GREEN+"[DEBUG] Building for {}".format(
py_version)+Colors.RESET)
ndk_build = sh.Command(join(
os.path.expanduser(env['ndk']),
'ndk-build.cmd' if IS_WIN else 'ndk-build'
))
arches = [ANDROID_TARGETS[arch] for arch in env['targets']]
#: Where the jni files are
jni_dir = env.get(
'jni_dir',
"{conda_prefix}/android/enaml-native/src/main/jni".format(**env)
)
if 'jni_dir' not in env:
env['jni_dir'] = jni_dir
#: Where native libraries go for each arch
ndk_build_dir = env.get(
'ndk_build_dir',
"{conda_prefix}/android/enaml-native/src/main/libs".format(**env)
)
if 'ndk_build_dir' not in env:
env['ndk_build_dir'] = ndk_build_dir
#: Do ndk-build in the jni dir
with cd(jni_dir):
#: Patch Application.mk to have the correct ABI's
with open('Application.mk') as f:
app_mk = f.read()
#: APP_ABI := armeabi-v7a
new_mk = []
for line in app_mk.split("\n"):
if re.match(r'APP_ABI\s*:=\s*.+', line):
line = 'APP_ABI := {}'.format(" ".join(arches))
new_mk.append(line)
with open('Application.mk', 'w') as f:
f.write("\n".join(new_mk))
#: Patch Android.mk to have the correct python version
with open('Android.mk') as f:
android_mk = f.read()
#: PY_LIB_VER := 2.7
new_mk = []
for line in android_mk.split("\n"):
if re.match(r'PY_LIB_VER\s*:=\s*.+', line):
line = 'PY_LIB_VER := {}'.format(py_version)
new_mk.append(line)
with open('Android.mk', 'w') as f:
f.write("\n".join(new_mk))
#: Now run nkd-build
shprint(ndk_build)
#: Add entry point so packages can include their own jni libs
dependencies = ctx['dependencies']#.keys()
for ep in pkg_resources.iter_entry_points(
group="enaml_native_ndk_build"):
for name in dependencies:
if ep.name.replace("-", '_') == name.replace("-", '_'):
ndk_build_hook = ep.load()
print("Custom ndk_build_hook {} found for '{}'. ".format(
ndk_build_hook, name))
ndk_build_hook(self.ctx)
break
#: Now copy all compiled python modules to the jniLibs dir so android
#: includes them
for arch in arches:
cfg = dict(
arch=arch,
local_arch=ANDROID_ABIS[arch],
ndk_build_dir=ndk_build_dir,
)
cfg.update(env) # get python_build_dir from the env
#: Where .so files go
dst = abspath('{ndk_build_dir}/{arch}'.format(**cfg))
#: Collect all .so files to the lib dir
with cd('{conda_prefix}/android/'
'{local_arch}/lib/'.format(**cfg)):
for lib in glob('*.so'):
excluded = [p for p in env.get('excluded', [])
if fnmatch.fnmatch(lib, p)]
if excluded:
continue
shutil.copy(lib, dst)
class BundleAssets(Command):
""" This is used by the gradle build to pack python into a zip.
"""
title = set_default("bundle-assets")
help = set_default("Creates a python bundle of all .py and .enaml files")
args = set_default([
('target', dict(nargs='?', default="android",
help="Build for the given target (android, iphoneos, iphonesimulator)")),
('--release', dict(action='store_true', help="Create a release bundle")),
('--no-compile', dict(action='store_true', help="Don't generate python cache")),
])
def run(self, args=None):
ctx = self.ctx
if args.target not in ['android', 'iphoneos', 'iphonesimulator']:
raise ValueError("Target must be either android, iphoneos, or iphonesimulator")
if args.target == 'android':
env = ctx['android']
else:
env = ctx['ios']
#: Now copy to android assets folder
#: Extracted file type
bundle = 'python.tar.gz'
root = abspath(os.getcwd())
# Run lib build
if args.target == 'android':
#: Um, we're passing args from another command?
self.cmds['ndk-build'].run(args)
else:
#: Collect all .so files to the lib dir
with cd('{conda_prefix}/{target}/lib/'.format(target=args.target, **env)):
dst = '{root}/ios/Libs'.format(root=root)
if exists(dst):
shutil.rmtree(dst)
os.makedirs(dst)
# Copy all libs to the
for lib in glob('*.dylib'):
excluded = [p for p in env.get('excluded', [])
if fnmatch.fnmatch(lib, p)]
if excluded:
continue
shutil.copy(lib, dst)
# Clean each arch
#: Remove old
cfg = dict(bundle_id=ctx['bundle_id'])
if args.target == 'android':
for arch in env['targets']:
cfg.update(dict(
target='android/{}'.format(arch),
local_arch=arch,
arch=ANDROID_TARGETS[arch]
))
break
else:
cfg['target'] = args.target
cfg.update(env)
#: Create
if not os.path.exists(env['python_build_dir']):
os.makedirs(env['python_build_dir'].format(**cfg))
# raise RuntimeError(
# "Error: Python build doesn't exist. "
# "You should run './enaml-native build-python' first!")
with cd(env['python_build_dir']):
#: Remove old build
if os.path.exists('build'):
shutil.rmtree('build')
#: Copy python/ build/
cp('{conda_prefix}/{target}/python/'.format(**cfg),
'{python_build_dir}/build'.format(**cfg))
#: Copy sources from app source
for src in ctx.get('sources', ['src']):
cp(join(root, src), 'build')
#: Clean any excluded sources
with cd('build'):
if not args.no_compile:
# Compile to pyc
compileall.compile_dir('.')
# Remove all py files
for dp, dn, fn in os.walk('.'):
for f in glob(join(dp, '*.py')):
if exists(f+'c') or exists(f+'o'):
os.remove(f)
# Exclude all py files and any user added patterns
for pattern in env.get('excluded', [])+['*.dist-info',
'*.egg-info']:
matches = glob(pattern)
for m in matches:
if os.path.isdir(m):
shutil.rmtree(m)
else:
os.remove(m)
#: Remove old
for ext in ['.zip', '.tar.lz4', '.so', '.tar.gz']:
if exists('python.{}'.format(ext)):
os.remove('python.{}'.format(ext))
#: Zip everything and copy to assets arch to build
with cd('build'):
print(Colors.CYAN+"[DEBUG] Creating python bundle..."+ \
Colors.RESET)
with tarfile.open('../'+bundle, "w:gz") as tar:
tar.add('.')
#shprint(sh.zip, '-r',
# 'android/app/src/main/assets/python/python.zip', '.')
#shprint(sh.zip, '-r', '../python.zip', '.')
#shprint(sh.tar, '-zcvf', '../python.tar.gz', '.')
#shprint(sh.bash, '-c',
# 'tar czf - build | lz4 -9 - python.tar.lz4')
# import msgpack
# import lz4
# import lz4.frame
# with open('../libpybundle.so', 'wb') as source:
# data = {}
# for root, dirs, files in os.walk("."):
# for file in files:
# path = join(root, file)[2:] # Skip ./
#
# # TODO Compile to pyc here
# with open(path, 'rb') as f:
# data[path] = f.read()
# for k in data.keys():
# print(k)
# msgpack.pack(data, source)
# # Compress with lz4
# MINHC = lz4.frame.COMPRESSIONLEVEL_MINHC
# with lz4.frame.open('../libpybundle.lz4', 'wb',
# compression_level=MINHC) as f:
# f.write(msgpack.packb(data))
# Copy to each lib dir
#for arch in env['targets']:
# env['abi'] = ANDROID_TARGETS[arch]
# src = '{python_build_dir}/libpybundle.so'.format(**env)
# dst = '{conda_prefix}/android/enaml-native/src/main/libs/{abi}/'.format(**env)
# print("Copying bundle to {}...".format(dst))
# shutil.copy(src, dst)
# Copy to Android assets
if args.target == 'android':
cp('{python_build_dir}/{bundle}'.format(bundle=bundle, **env),
'android/app/src/main/assets/python/{bundle}'.format(bundle=bundle))
# Copy to iOS assets
else:
# TODO Use the bundle!
cp('{python_build_dir}/build'.format(bundle=bundle, **env),
'ios/assets/python'.format(bundle=bundle))
#cp('{python_build_dir}/{bundle}'.format(bundle=bundle, **env),
# 'ios/app/src/main/assets/python/{bundle}'.format(bundle=bundle))
print(Colors.GREEN+"[INFO] Python bundled successfully!"+Colors.RESET)
class ListPackages(Command):
title = set_default("list")
help = set_default("List installed packages (alias to conda list)")
#: Can be run from anywhere
app_dir_required = set_default(False)
def run(self, args):
shprint(self.cli.conda, 'list')
class Install(Command):
""" The "Install" command does a `conda install` of the package names given
and then runs the linker command.
"""
title = set_default("install")
help = set_default("Install and link an enaml-native package")
args = set_default([
('args', dict(nargs=REMAINDER, help="Alias to conda install")),
])
#: Can be run from anywhere
app_dir_required = set_default(False)
def run(self, args):
if os.environ.get('CONDA_DEFAULT_ENV') in [None, 'root']:
print(Colors.RED+'enaml-native install should only be used'
'within an app env!'+Colors.RESET)
raise SystemExit(0)
shprint(self.cli.conda, 'install', '-y', *args.args)
#: Link everything for now
self.cmds['link'].run()
class Uninstall(Command):
""" The "Uninstall" command unlinks the package (if needed) and does a
`conda uninstall` of the package names given.
"""
title = set_default("uninstall")
help = set_default("Uninstall and unlink enaml-native package")
args = set_default([
('args', dict(help="Args to conda uninstall", nargs=REMAINDER)),
])
#: Can be run from anywhere
app_dir_required = set_default(False)
def run(self, args):
if os.environ.get('CONDA_DEFAULT_ENV') in [None, 'root']:
print(Colors.RED+'enaml-native uninstall should only be used'
'within an app env!'+Colors.RESET)
raise SystemExit(0)
#: Unlink first
if hasattr(args, 'names'):
# TODO...
self.cmds['unlink'].run(args)
shprint(self.cli.conda, 'uninstall', '-y', *args.args)
class Link(Command):
""" The "Link" command tries to modify the android and ios projects
to include all of the necessary changes for this package to work.
A custom linkiner can be used by adding a "enaml_native_linker"
entry_point which shall be a function that receives the app package.json
(context) an argument.
Example
----------
def linker(ctx):
# Link android and ios projects here
return True #: To tell the cli the linking was handled and should
return
"""
title = set_default("link")
help = set_default("Link an enaml-native package "
"(updates android and ios projects)")
args = set_default([
('names', dict(
help="Package name (optional) If not set links all projects.",
nargs='*')),
])
#: Where "enaml native packages" are installed within the root
package_dir = 'venv'
def run(self, args=None):
print("Linking {}".format(args.names if args and args.names
else "all packages..."))
if args and args.names:
for name in args.names:
self.link(self.package_dir, name)
else:
#: Link everything
for target in ['android', 'iphoneos', 'iphonesimulator']:
sysroot = join(self.package_dir, target)
for path in os.listdir(sysroot):
self.link(sysroot, path)
def link(self, path, pkg):
""" Link the package in the current directory.
"""
# Check if a custom linker exists to handle linking this package
#for ep in pkg_resources.iter_entry_points(group="enaml_native_linker"):
# if ep.name.replace("-", '_') == pkg.replace("-", '_'):
# linker = ep.load()
# print("Custom linker {} found for '{}'. Linking...".format(
# linker, pkg))
# if linker(self.ctx, path):
# return
#: Use the default builtin linker script
if exists(join(path, pkg, 'build.gradle')):
print(Colors.BLUE+"[INFO] Linking {}/build.gradle".format(
pkg)+Colors.RESET)
self.link_android(path, pkg)
if exists(join(path, pkg, 'Podfile')):
print(Colors.BLUE+"[INFO] Linking {}/Podfile".format(
pkg)+Colors.RESET)
self.link_ios(path, pkg)
@staticmethod
def is_settings_linked(source, pkg):
""" Returns true if the "include ':<project>'" line exists in the file
"""
for line in source.split("\n"):
if re.search(r"include\s*['\"]:{}['\"]".format(pkg), line):
return True
return False
@staticmethod
def is_build_linked(source, pkg):
""" Returns true if the "compile project(':<project>')"
line exists exists in the file """
for line in source.split("\n"):
if re.search(r"(api|compile)\s+project\(['\"]:{}['\"]\)".format(pkg),
line):
return True
return False
@staticmethod
def find_packages(path):
""" Find all java files matching the "*Package.java" pattern within
the given enaml package directory relative to the java source path.
"""
matches = []
root = join(path, 'src', 'main', 'java')
for folder, dirnames, filenames in os.walk(root):
for filename in fnmatch.filter(filenames, '*Package.java'):
#: Open and make sure it's an EnamlPackage somewhere
with open(join(folder, filename)) as f:
if "implements EnamlPackage" in f.read():
package = os.path.relpath(folder, root)
matches.append(os.path.join(package, filename))
return matches
@staticmethod
def is_app_linked(source, pkg, java_package):
""" Returns true if the compile project line exists exists in the file
"""
for line in source.split("\n"):
if java_package in line:
return True
return False
def link_android(self, path, pkg):
""" Link's the android project to this library.
1. Includes this project's directory in the app's
android/settings.gradle
It adds:
include ':<project-name>'
project(':<project-name>').projectDir = new File(
rootProject.projectDir, '../packages/<project-name>/android')
2. Add's this project as a dependency to the android/app/build.gradle
It adds:
compile project(':<project-name>')
to the dependencies.
3. If preset, adds the import and package statement
to the android/app/src/main/java/<bundle/id>/MainApplication.java
"""
bundle_id = self.ctx['bundle_id']
pkg_root = join(path, pkg)
#: Check if it's already linked
with open(join('android', 'settings.gradle')) as f:
settings_gradle = f.read()
with open(join('android', 'app', 'build.gradle')) as f:
build_gradle = f.read()
#: Find the MainApplication.java
main_app_java_path = join('android', 'app', 'src', 'main', 'java',
join(*bundle_id.split(".")),
'MainApplication.java')
with open(main_app_java_path) as f:
main_application_java = f.read()
try:
#: Now link all the EnamlPackages we can find in the new "package"
new_packages = Link.find_packages(join(path, pkg))
if not new_packages:
print("[Android] {} No EnamlPackages found to link!".format(
pkg))
return
#: Link settings.gradle
if not Link.is_settings_linked(settings_gradle, pkg):
#: Add two statements
new_settings = settings_gradle.split("\n")
new_settings.append("") # Blank line
new_settings.append("include ':{name}'".format(name=pkg))
new_settings.append("project(':{name}').projectDir = "
"new File(rootProject.projectDir, "
"'../{path}/android/{name}')"
.format(name=pkg, path=self.package_dir))
with open(join('android', 'settings.gradle'), 'w') as f:
f.write("\n".join(new_settings))
print("[Android] {} linked in settings.gradle!".format(pkg))
else:
print("[Android] {} was already linked in "
"settings.gradle!".format(pkg))
#: Link app/build.gradle
if not Link.is_build_linked(build_gradle, pkg):
#: Add two statements
new_build = build_gradle.split("\n")
#: Find correct line number
found = False
for i, line in enumerate(new_build):
if re.match(r"dependencies\s*{", line):
found = True
continue
if found and "}" in line:
#: Hackish way to find line of the closing bracket after
#: the dependencies { block is found
break
if not found:
raise ValueError("Unable to find dependencies in "
"{pkg}/app/build.gradle!".format(pkg=pkg))
#: Insert before the closing bracket
new_build.insert(i, " api project(':{name}')".format(
name=pkg))
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write("\n".join(new_build))
print("[Android] {} linked in app/build.gradle!".format(pkg))
else:
print("[Android] {} was already linked in "
"app/build.gradle!".format(pkg))
new_app_java = []
for package in new_packages:
#: Add our import statement
javacls = os.path.splitext(package)[0].replace("/", ".")
if not Link.is_app_linked(main_application_java, pkg, javacls):
#: Reuse previous if avialable
new_app_java = (new_app_java or
main_application_java.split("\n"))
#: Find last import statement
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line, "import *;"):
j = i
new_app_java.insert(j+1, "import {};".format(javacls))
#: Add the package statement
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line.strip(), "new *Package()"):
j = i
if j == 0:
raise ValueError("Could not find the correct spot to "
"add package {}".format(javacls))
else:
#: Get indent and add to previous line
#: Add comma to previous line
new_app_java[j] = new_app_java[j]+ ","
#: Insert new line
new_app_java.insert(j+1, " new {}()"
.format(javacls.split(".")[-1]))
else:
print("[Android] {} was already linked in {}!".format(
pkg, main_app_java_path))
if new_app_java:
with open(main_app_java_path, 'w') as f:
f.write("\n".join(new_app_java))
print(Colors.GREEN+"[Android] {} linked successfully!".format(
pkg)+Colors.RESET)
except Exception as e:
print(Colors.GREEN+"[Android] {} Failed to link. "
"Reverting due to error: "
"{}".format(pkg, e)+Colors.RESET)
#: Undo any changes
with open(join('android', 'settings.gradle'), 'w') as f:
f.write(settings_gradle)
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write(build_gradle)
with open(main_app_java_path, 'w') as f:
f.write(main_application_java)
#: Now blow up
raise
def link_ios(self, path, pkg):
print("[iOS] Link TODO:...")
class Unlink(Command):
""" The "Unlink" command tries to undo the modifications done by the
linker..
A custom unlinkiner can be used by adding a "enaml_native_unlinker"
entry_point which shall be a function that receives the app
package.json (context) an argument.
Example
----------
def unlinker(ctx):
# Unlink android and ios projects here
return True #: To tell the cli the unlinking was handled and
should return
"""
title = set_default("unlink")
help = set_default("Unlink an enaml-native package")
args = set_default([
('names', dict(help="Package name", nargs="+")),
])
def run(self, args=None):
""" The name IS required here. """
print(Colors.BLUE+"[INFO] Unlinking {}...".format(
args.names)+Colors.RESET)
for name in args.names:
self.unlink(Link.package_dir, name)
def unlink(self, path, pkg):
""" Unlink the package in the current directory.
"""
#: Check if a custom unlinker exists to handle unlinking this package
for ep in pkg_resources.iter_entry_points(
group="enaml_native_unlinker"):
if ep.name.replace("-", '_') == pkg.replace("-", '_'):
unlinker = ep.load()
print("Custom unlinker {} found for '{}'. "
"Unlinking...".format(unlinker, pkg))
if unlinker(self.ctx, path):
return
if exists(join(path, 'android', pkg, 'build.gradle')):
print("[Android] unlinking {}".format(pkg))
self.unlink_android(path, pkg)
for target in ['iphoneos', 'iphonesimulator']:
if exists(join(path, target, pkg, 'Podfile')):
print("[iOS] unlinking {}".format(pkg))
self.unlink_ios(path, pkg)
def unlink_android(self, path, pkg):
""" Unlink's the android project to this library.
1. In the app's android/settings.gradle, it removes the following
lines (if they exist):
include ':<project-name>'
project(':<project-name>').projectDir = new File(
rootProject.projectDir,
'../venv/packages/<project-name>/android')
2. In the app's android/app/build.gradle, it removes the following
line (if present)
compile project(':<project-name>')
3. In the app's
android/app/src/main/java/<bundle/id>/MainApplication.java,
it removes:
import <package>.<Name>Package;
new <Name>Package(),
If no comma exists it will remove the comma from the previous
line.
"""
bundle_id = self.ctx['bundle_id']
#: Check if it's already linked
with open(join('android', 'settings.gradle')) as f:
settings_gradle = f.read()
with open(join('android', 'app', 'build.gradle')) as f:
build_gradle = f.read()
#: Find the MainApplication.java
main_app_java_path = join('android', 'app', 'src', 'main', 'java',
join(*bundle_id.split(".")),
'MainApplication.java')
with open(main_app_java_path) as f:
main_application_java = f.read()
try:
#: Now link all the EnamlPackages we can find in the new "package"
new_packages = Link.find_packages(join(path, 'android', pkg))
if not new_packages:
print(Colors.RED+"\t[Android] {} No EnamlPackages found to "
"unlink!".format(pkg)+Colors.RESET)
return
#: Unlink settings.gradle
if Link.is_settings_linked(settings_gradle, pkg):
#: Remove the two statements
new_settings = [
line for line in settings_gradle.split("\n")
if line.strip() not in [
"include ':{name}'".format(name=pkg),
"project(':{name}').projectDir = "
"new File(rootProject.projectDir, "
"'../{path}/android/{name}')".format(path=path,
name=pkg)
]
]
with open(join('android', 'settings.gradle'), 'w') as f:
f.write("\n".join(new_settings))
print("\t[Android] {} unlinked settings.gradle!".format(pkg))
else:
print("\t[Android] {} was not linked in "
"settings.gradle!".format(pkg))
#: Unlink app/build.gradle
if Link.is_build_linked(build_gradle, pkg):
#: Add two statements
new_build = [
line for line in build_gradle.split("\n")
if line.strip() not in [
"compile project(':{name}')".format(name=pkg),
"api project(':{name}')".format(name=pkg),
]
]
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write("\n".join(new_build))
print("\t[Android] {} unlinked in "
"app/build.gradle!".format(pkg))
else:
print("\t[Android] {} was not linked in "
"app/build.gradle!".format(pkg))
new_app_java = []
for package in new_packages:
#: Add our import statement
javacls = os.path.splitext(package)[0].replace("/", ".")
if Link.is_app_linked(main_application_java, pkg, javacls):
#: Reuse previous if avialable
new_app_java = (new_app_java or
main_application_java.split("\n"))
new_app_java = [
line for line in new_app_java
if line.strip() not in [
"import {};".format(javacls),
"new {}()".format(javacls.split(".")[-1]),
"new {}(),".format(javacls.split(".")[-1]),
]
]
#: Now find the last package and remove the comma if it
#: exists
found = False
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line.strip(), "new *Package()"):
found = True
elif fnmatch.fnmatch(line.strip(), "new *Package(),"):
j = i
#: We removed the last package so add a comma
if not found:
#: This kills any whitespace...
new_app_java[j] = new_app_java[j][
:new_app_java[j].rfind(',')]
else:
print("\t[Android] {} was not linked in {}!".format(
pkg, main_app_java_path))
if new_app_java:
with open(main_app_java_path, 'w') as f:
f.write("\n".join(new_app_java))
print(Colors.GREEN+"\t[Android] {} unlinked successfully!".format(
pkg)+Colors.RESET)
except Exception as e:
print(Colors.RED+"\t[Android] {} Failed to unlink. "
"Reverting due to error: {}".format(pkg, e)+Colors.RESET)
#: Undo any changes
with open(join('android', 'settings.gradle'), 'w') as f:
f.write(settings_gradle)
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write(build_gradle)
with open(main_app_java_path, 'w') as f:
f.write(main_application_java)
#: Now blow up
raise
class BuildAndroid(Command):
title = set_default("build-android")
help = set_default("Build android project")
args = set_default([
('--release', dict(action='store_true', help="Release mode")),
('extra', dict(nargs=REMAINDER, help="Args to pass to gradle")),
])
def run(self, args=None):
with cd("android"):
gradlew = sh.Command('gradlew.bat' if IS_WIN else './gradlew')
if args and args.release:
shprint(gradlew, 'assembleRelease', *args.extra, _debug=True)
else:
shprint(gradlew, 'assembleDebug', *args.extra, _debug=True)
class CleanAndroid(Command):
title = set_default("clean-android")
help = set_default("Clean the android project")
def run(self, args=None):
with cd('android'):
gradlew = sh.Command('gradlew.bat' if IS_WIN else './gradlew')
shprint(gradlew, 'clean', _debug=True)
class RunAndroid(Command):
title = set_default("run-android")
help = set_default("Build android project, install it, and run")
args = set_default([
('--release', dict(action='store_true', help="Build in Release mode")),
('extra', dict(nargs=REMAINDER, help="Extra args to pass to gradle")),
])
def run(self, args=None):
ctx = self.ctx
bundle_id = ctx['bundle_id']
with cd("android"):
release_apk = os.path.abspath(join(
'.', 'app', 'build', 'outputs', 'apk',
'app-release-unsigned.apk'))
gradlew = sh.Command('gradlew.bat' if IS_WIN else './gradlew')
#: If no devices are connected, start the simulator
if len(sh.adb('devices').stdout.strip())==1:
device = sh.emulator('-list-avds').stdout.split("\n")[0]
shprint(sh.emulator, '-avd', device)
if args and args.release:
shprint(gradlew, 'assembleRelease', *args.extra, _debug=True)
#shprint(sh.adb,'uninstall','-k','"{}"'.format(bundle_id))
shprint(sh.adb, 'install', release_apk)
else:
shprint(gradlew, 'installDebug',*args.extra, _debug=True)
shprint(sh.adb, 'shell', 'am', 'start', '-n',
'{bundle_id}/{bundle_id}.MainActivity'.format(
bundle_id=bundle_id))
class CleanIOS(Command):
title = set_default("clean-ios")
help = set_default("Clean the ios project")
def run(self, args=None):
with cd('ios'):
shprint(sh.xcodebuild, 'clean', '-project', 'App.xcodeproj',
'-configuration', 'ReleaseAdhoc', '-alltargets')
class RunIOS(Command):
title = set_default("run-ios")
help = set_default("Build and run the ios project")
args = set_default([
('--release', dict(action='store_true', help="Build in Release mode")),
])
def run(self, args=None):
ctx = self.ctx
env = ctx['ios']
with cd('ios'):
ws = glob("*.xcworkspace")
if not ws:
raise RuntimeError("Couldn't find a xcworkspace in the ios folder! "
"Did you run `pod install`? ")
workspace = ws[0]
scheme = '.'.join(workspace.split('.')[0:-1])
shprint(sh.xcrun, 'xcodebuild',
'-scheme', scheme,
'-workspace', workspace,
'-configuration', 'Release' if args and args.release else 'Debug',
'-allowProvisioningUpdates',
'-derivedDataPath', 'run')
#shprint(sh.xcrun, 'simctl', 'install', 'booted',
# 'build/Build/Products/Debug-iphonesimulator/
# {project}.app'.format(**env))
shprint(sh.xcrun, 'simctl', 'launch', 'booted', ctx['bundle_id'])
class BuildIOS(Command):
title = set_default("build-ios")
help = set_default("Build the ios project")
args = set_default([
('--release', dict(action='store_true', help="Build in Release mode")),
])
def run(self, args=None):
ctx = self.ctx
with cd('ios'):
ws = glob("*.xcworkspace")
if not ws:
raise RuntimeError("Couldn't find a xcworkspace in the ios folder! "
"Did you run `pod install`? ")
workspace = ws[0]
scheme = '.'.join(workspace.split('.')[0:-1])
shprint(sh.xcrun,
'xcodebuild',
'-scheme', scheme,
'-workspace', workspace,
'-configuration', 'Release' if args and args.release else 'Debug',
'-allowProvisioningUpdates',
'-derivedDataPath', 'build')
class Server(Command):
""" Run a dev server to host files. Only view files can be reloaded at the
moment.
"""
title = set_default("start")
help = set_default("Start a debug server for serving files to the app")
#: Dev server index page to render
index_page = Unicode("enaml-native dev server. "
"When you change a source file it pushes to the app.")
args = set_default([
('--remote-debugging', dict(action='store_true',
help="Run in remote debugging mode")),
])
#: Server port
port = Int(8888)
#: Time in ms to wait before triggering a reload
reload_delay = Float(1)
_reload_count = Int() #: Pending reload requests
#: Watchdog observer
observer = Instance(object)
#: Watchdog handler
watcher = Instance(object)
#: Websocket handler implementation
handlers = List()
#: Callable to add a callback from a thread into the event loop
add_callback = Callable()
#: Callable to add a callback at some later time
call_later = Callable()
#: Changed file events
changes = List()
#: Run in bridge (forwarding) mode for remote debugging
remote_debugging = Bool()
#: Can be run from anywhere
app_dir_required = set_default(False)
def run(self, args=None):
ctx = self.ctx
#: Look for tornado or twisted in reqs
use_twisted = 'twisted' in ', '.join(ctx.get('dependencies', []))
#: Save setting
self.remote_debugging = args and args.remote_debugging
if self.remote_debugging:
#: Do reverse forwarding so you can use remote-debugging over
#: adb (via USB even if Wifi is not accessible)
shprint(sh.adb, 'reverse',
'tcp:{}'.format(self.port), 'tcp:{}'.format(self.port))
else:
#: Setup observer
try:
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
except ImportError:
print(Colors.RED + "[WARNING] Watchdog is required the dev "
"server: Run 'pip install watchdog'" + Colors.RESET)
return
self.observer = Observer()
server = self
class AppNotifier(LoggingEventHandler):
def on_any_event(self, event):
super(AppNotifier, self).on_any_event(event)
#: Use add callback to push to event loop thread
server.add_callback(server.on_file_changed, event)
with cd('src'):
if not self.remote_debugging:
print("Watching {}".format(abspath('.')))
self.watcher = AppNotifier()
self.observer.schedule(self.watcher, abspath('.'),
recursive=True)
self.observer.start()
if use_twisted:
self.run_twisted(args)
else:
self.run_tornado(args)
def run_tornado(self, args):
""" Tornado dev server implementation """
server = self
import tornado.ioloop
import tornado.web
import tornado.websocket
ioloop = tornado.ioloop.IOLoop.current()
class DevWebSocketHandler(tornado.websocket.WebSocketHandler):
def open(self):
super(DevWebSocketHandler, self).open()
server.on_open(self)
def on_message(self, message):
server.on_message(self, message)
def on_close(self):
super(DevWebSocketHandler, self).on_close()
server.on_close(self)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write(server.index_page)
#: Set the call later method
server.call_later = ioloop.call_later
server.add_callback = ioloop.add_callback
app = tornado.web.Application([
(r"/", MainHandler),
(r"/dev", DevWebSocketHandler),
])
app.listen(self.port)
print("Tornado Dev server started on {}".format(self.port))
ioloop.start()
def run_twisted(self, args):
""" Twisted dev server implementation """
server = self
from twisted.internet import reactor
from twisted.web import resource
from twisted.web.static import File
from twisted.web.server import Site
from autobahn.twisted.websocket import (WebSocketServerFactory,
WebSocketServerProtocol)
from autobahn.twisted.resource import WebSocketResource
class DevWebSocketHandler(WebSocketServerProtocol):
def onConnect(self, request):
super(DevWebSocketHandler, self).onConnect(request)
server.on_open(self)
def onMessage(self, payload, isBinary):
server.on_message(self, payload)
def onClose(self, wasClean, code, reason):
super(DevWebSocketHandler,self).onClose(wasClean, code, reason)
server.on_close(self)
def write_message(self, message, binary=False):
self.sendMessage(message, binary)
#: Set the call later method
server.call_later = reactor.callLater
server.add_callback = reactor.callFromThread
factory = WebSocketServerFactory(u"ws://0.0.0.0:{}".format(self.port))
factory.protocol = DevWebSocketHandler
class MainHandler(resource.Resource):
def render_GET(self, req):
return str(server.index_page)
root = resource.Resource()
root.putChild("", MainHandler())
root.putChild("dev", WebSocketResource(factory))
reactor.listenTCP(self.port, Site(root))
print("Twisted Dev server started on {}".format(self.port))
reactor.run()
#: ========================================================
#: Shared protocol implementation
#: ========================================================
def on_open(self, handler):
self._reload_count = 0
print("Client {} connected!".format(handler))
self.handlers.append(handler)
def on_message(self, handler, msg):
""" In remote debugging mode this simply acts as a forwarding
proxy for the two clients.
"""
if self.remote_debugging:
#: Forward to other clients
for h in self.handlers:
if h != handler:
h.write_message(msg, True)
else:
print(msg)
def send_message(self, msg):
""" Send a message to the client. This should not be used in
remote debugging mode.
"""
if not self.handlers:
return #: Client not connected
for h in self.handlers:
h.write_message(msg)
def on_close(self, handler):
print("Client {} left!".format(handler))
self.handlers.remove(handler)
def on_file_changed(self, event):
""" """
print(event)
self._reload_count +=1
self.changes.append(event)
self.call_later(self.reload_delay, self._trigger_reload, event)
def _trigger_reload(self, event):
self._reload_count -=1
if self._reload_count == 0:
files = {}
for event in self.changes:
path = os.path.relpath(event.src_path, os.getcwd())
if os.path.splitext(path)[-1] not in ['.py', '.enaml']:
continue
with open(event.src_path) as f:
data = f.read()
#: Add to changed files
files[path] = data
if files:
#: Send the reload request
msg = {
'type':'reload',
'files':files
}
print("Reloading: {}".format(files.keys()))
self.send_message(json.dumps(msg))
#: Clear changes
self.changes = []
def find_commands(cls):
""" Finds commands by finding the subclasses of Command"""
cmds = []
for subclass in cls.__subclasses__():
cmds.append(subclass)
cmds.extend(find_commands(subclass))
return cmds
class EnamlNativeCli(Atom):
#: Root parser
parser = Instance(ArgumentParser)
#: Loaded from package
ctx = Dict()
#: Parsed args
args = Instance(Namespace)
#: Location of package file
package = Unicode("environment.yml")
#: If enaml-native is being run within an app directory
in_app_directory = Bool()
#: Conda command
conda = Instance(sh.Command)
#: Commands
commands = List(Command)
def _default_commands(self):
""" Build the list of CLI commands by finding subclasses of the Command
class
Also allows commands to be installed using the "enaml_native_command"
entry point. This entry point should return a Command subclass
"""
commands = [c() for c in find_commands(Command)]
#: Get commands installed via entry points
for ep in pkg_resources.iter_entry_points(
group="enaml_native_command"):
c = ep.load()
if not issubclass(c, Command):
print("Warning: entry point {} did not return a valid enaml "
"cli command! This command will be ignored!".format(
ep.name))
commands.append(c())
return commands
def _default_in_app_directory(self):
""" Return if we are in a directory that contains the package.json file
which should indicate it's in the root directory of an enaml-native
app.
"""
return exists(self.package)
def _default_ctx(self):
""" Return the package config or context and normalize some of the
values
"""
if not self.in_app_directory:
print("Warning: {} does not exist. Using the default.".format(
self.package))
ctx = {}
else:
with open(self.package) as f:
ctx = dict(yaml.load(f, Loader=yaml.RoundTripLoader))
if self.in_app_directory:
# Update the env for each platform
excluded = list(ctx.get('excluded', []))
for env in [ctx['ios'], ctx['android']]:
if 'python_build_dir' not in env:
env['python_build_dir'] = expanduser(abspath('build/python'))
if 'conda_prefix' not in env:
env['conda_prefix'] = os.environ.get(
'CONDA_PREFIX', expanduser(abspath('venv')))
# Join the shared and local exclusions
env['excluded'] = list(env.get('excluded', [])) + excluded
return ctx
def _default_parser(self):
""" Generate a parser using the command list """
parser = ArgumentParser(prog='enaml-native')
#: Build commands by name
cmds = {c.title: c for c in self.commands}
#: Build parser, prepare commands
subparsers = parser.add_subparsers()
for c in self.commands:
p = subparsers.add_parser(c.title, help=c.help)
c.parser = p
for (flags, kwargs) in c.args:
p.add_argument(*flags.split(), **kwargs)
p.set_defaults(cmd=c)
c.ctx = self.ctx
c.cmds = cmds
c.cli = self
return parser
def _default_conda(self):
return find_conda()
def check_dependencies(self):
try:
self.conda('--version')
except:
raise EnvironmentError(
"conda could not be found. Please install miniconda from "
"https://conda.io/miniconda.html or set CONDA_HOME to the"
"location where conda is installed.")
def start(self):
""" Run the commands"""
self.check_dependencies()
self.args = self.parser.parse_args()
# Python 3 doesn't set the cmd if no args are given
if not hasattr(self.args, 'cmd'):
self.parser.print_help()
return
cmd = self.args.cmd
try:
if cmd.app_dir_required and not self.in_app_directory:
raise EnvironmentError(
"'enaml-native {}' must be run within an app root "
"directory not: {}".format(cmd.title, os.getcwd()))
cmd.run(self.args)
except sh.ErrorReturnCode as e:
raise
def main():
EnamlNativeCli().start()
if __name__ == '__main__':
main()
|
codelv/enaml-native-cli | enamlnativecli/main.py | find_commands | python | def find_commands(cls):
cmds = []
for subclass in cls.__subclasses__():
cmds.append(subclass)
cmds.extend(find_commands(subclass))
return cmds | Finds commands by finding the subclasses of Command | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1617-L1623 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2017, Jairus Martin.
Distributed under the terms of the GPLv3 License.
The full license is in the file COPYING.txt, distributed with this software.
Created on July 10, 2017
@author: jrm
"""
import os
import re
import sys
import json
import shutil
import tarfile
import fnmatch
import compileall
import pkg_resources
from glob import glob
from os.path import join, exists, abspath, expanduser, realpath, dirname
from argparse import ArgumentParser, Namespace, REMAINDER
from atom.api import (Atom, Bool, Callable, Dict, List, Unicode, Float, Int,
Instance, set_default)
from contextlib import contextmanager
from cookiecutter.main import cookiecutter
from cookiecutter.log import configure_logger
from distutils.dir_util import copy_tree
try:
# Try conda's version
import ruamel_yaml as yaml
except:
from ruamel import yaml
try:
from ConfigParser import ConfigParser
except:
from configparser import ConfigParser
IS_WIN = 'win' in sys.platform and not 'darwin' == sys.platform
# sh does not work on windows
if IS_WIN:
import pbs
class Sh(object):
def __getattr__(self, attr):
if hasattr(pbs, attr):
return getattr(pbs, attr)
return pbs.Command(attr)
sh = Sh()
ANDROID_SDK = join(os.environ.get('LOCALAPPDATA', ''), 'Android', 'Sdk')
adb = join(ANDROID_SDK, 'platform-tools', 'adb.exe')
emulator = join(ANDROID_SDK, 'emulator', 'emulator.exe')
if exists(adb):
sh.adb = sh.Command(adb)
else:
raise EnvironmentError("Couldn't find a adb in your System, "
"Make sure android studio is installed")
if exists(emulator):
sh.emulator = sh.Command(emulator)
else:
raise EnvironmentError("Couldn't find a emulator in your System, "
"Make sure android studio is installed")
else:
import sh
def find_conda():
""" Try to find conda on the system """
USER_HOME = os.path.expanduser('~')
CONDA_HOME = os.environ.get('CONDA_HOME', '')
PROGRAMDATA = os.environ.get('PROGRAMDATA', '')
# Search common install paths and sys path
search_paths = [
# Windows
join(PROGRAMDATA, 'miniconda2', 'scripts'),
join(PROGRAMDATA, 'miniconda3', 'scripts'),
join(USER_HOME, 'miniconda2', 'scripts'),
join(USER_HOME, 'miniconda3', 'scripts'),
join(CONDA_HOME, 'scripts'),
# Linux
join(USER_HOME, 'miniconda2', 'bin'),
join(USER_HOME, 'miniconda3', 'bin'),
join(CONDA_HOME, 'bin'),
# TODO: OSX
] + os.environ.get("PATH", "").split(";" if 'win' in sys.path else ":")
cmd = 'conda.exe' if IS_WIN else 'conda'
for conda_path in search_paths:
conda = join(conda_path, cmd)
if exists(conda):
return sh.Command(conda)
# Try to let the system find it
return sh.conda
class Colors:
RED = "\033[1;31m"
BLUE = "\033[1;34m"
CYAN = "\033[1;36m"
GREEN = "\033[0;32m"
RESET = "\033[0;0m"
BOLD = "\033[;1m"
REVERSE = "\033[;7m"
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
print("[DEBUG]: -> running cd {}".format(newdir))
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
print("[DEBUG]: -> running cd {}".format(prevdir))
os.chdir(prevdir)
def cp(src, dst):
""" Like cp -R src dst """
print("[DEBUG]: -> copying {} to {}".format(src, dst))
if os.path.isfile(src):
if not exists(dirname(dst)):
os.makedirs(dirname(dst))
shutil.copy(src, dst)
else:
copy_tree(src, dst)
def shprint(cmd, *args, **kwargs):
debug = kwargs.pop('_debug', True)
write, flush = sys.stdout.write, sys.stdout.flush
kwargs.update({
'_err_to_out': True,
'_out_bufsize': 0,
'_iter': True
})
print("{}[INFO]: -> running {} {}{}".format(
Colors.CYAN, cmd, " ".join([a for a in args if
not isinstance(a, sh.RunningCommand)
]), Colors.RESET))
if IS_WIN:
kwargs.pop('_out_bufsize')
kwargs.pop('_iter')
kwargs['_bg'] = True
process = cmd(*args, **kwargs).process
for c in iter(lambda: process.stdout.read(1),''):
write(c.decode('utf-8'))
if c in ['\r', '\n']:
flush()
if not c:
break
process.wait()
return
buf = []
for c in cmd(*args, **kwargs):
if debug:
write(c)
if c in ['\r', '\n']:
flush()
else:
if c in ['\r', '\n']:
msg = ''.join(buf)
color = Colors.RED if 'error' in msg else Colors.RESET
write('{}\r[DEBUG]: {:<{w}}{}'.format(
color, msg, Colors.RESET, w=100))
flush()
buf = []
else:
buf.append(c)
write("\n")
flush()
ANDROID_ABIS = {
'x86_64': 'x86_64',
'x86': 'x86',
'armeabi-v7a': 'arm',
'arm64-v8a': 'arm64',
}
ANDROID_TARGETS = {v: k for k, v in ANDROID_ABIS.items()}
class Command(Atom):
_instance = None
#: Subcommand name ex enaml-native <name>
title = Unicode()
#: Subcommand short description
desc = Unicode()
#: Subcommand help text
help = Unicode()
#: Package context used to retrieve app config and env
ctx = Dict()
#: Reference to other CLI commands
cmds = Dict()
#: Arguments this command accepts
args = List(tuple)
#: Parser this command uses. Generated automatically.
parser = Instance(ArgumentParser)
#: If the command requires running in an app dir
app_dir_required = Bool(True)
#: Reference to the cli
cli = Instance(Atom)
@classmethod
def instance(cls):
return cls._instance
def run(self, args):
pass
class Create(Command):
title = set_default('create')
help = set_default("Create an enaml-native project")
args = set_default([
('what', dict(help='What to create (app, lib, package)?')),
('--no-input', dict(action='store_true',
help="Use all defaults")),
('-f --overwrite-if-exists', dict(action='store_true',
help="Overwrite the contents if"
"it already exists")),
('-v --verbose', dict(action='store_true', help="Verbose logging")),
])
#: Can be run from anywhere
app_dir_required = set_default(False)
def run(self, args):
template = join(dirname(__file__), 'templates', args.what)
configure_logger(
stream_level='DEBUG' if args.verbose else 'INFO',
debug_file=None,
)
cookiecutter(template,
no_input=args.no_input,
overwrite_if_exists=args.overwrite_if_exists)
print(Colors.GREEN+"[INFO] {} created successfully!".format(
args.what.title())+Colors.RESET)
class BuildRecipe(Command):
title = set_default('build-recipe')
help = set_default("Alias to conda build")
args = set_default([
('package', dict(help='Conda recipe to build')),
('args', dict(nargs=REMAINDER, help="args to pass to conda build")),
])
#: Can be run from anywhere
app_dir_required = set_default(False)
def run(self, args):
env = os.environ.copy()
if args.package.startswith('pip-'):
env.update({'CC': '/bin/false', 'CXX':'/bin/false'})
shprint(self.cli.conda, 'build', args.package, *args.args, _env=env)
print(Colors.GREEN+"[INFO] Built {} successfully!".format(
args.package)+Colors.RESET)
class MakePipRecipe(Command):
title = set_default('make-pip-recipe')
help = set_default("Creates a universal Android and iOS recipe "
"for a given pip package")
args = set_default([
('package', dict(help='pip package to build a recipe for')),
('--recursive', dict(action='store_true',
help="recursively create for all dependencies")),
('--force', dict(action='store_true',
help="force recreation if it already exists")),
('--croot', dict(nargs="?", help="conda root for building recipes")),
])
#: Can be run from anywhere
app_dir_required = set_default(False)
#: Recipes built
_built = List()
def run(self, args):
self.build(args.package, args)
print(Colors.GREEN+"[INFO] Made successfully!"+Colors.RESET)
def build(self, package, args):
ctx = self.ctx
old = set(os.listdir('.'))
# Run conda skeleton
shprint(self.cli.conda, 'skeleton', 'pypi', package)
new = set(os.listdir('.')).difference(old)
self._built.append(package)
for recipe in new:
dst = 'pip-{}'.format(recipe)
# Rename to add pip-prefix so it doesn't
# conflict with regular recipes
if args.force and exists(dst):
shutil.rmtree(dst)
shutil.move(recipe, dst)
#template = join(dirname(__file__), 'templates', 'recipe')
#cookiecutter(template, no_input=True,
# extra_context={'name': package, 'recipe': dst})
# Copy the recipe
#shutil.copy(join(recipe, 'meta.yaml'), join(dst, 'meta.yaml'))
#shutil.rmtree(recipe)
# Read the generated recipe
with open(join(dst, 'meta.yaml')) as f:
# Strip off the jinja tags (and add them in at the end)
data = f.read().split("\n")
var_lines = len([l for l in data if l.startswith("{%")])
# Skip version, name, etc..
meta = yaml.load("\n".join(data[var_lines:]),
Loader=yaml.RoundTripLoader)
# Update name
meta['package']['name'] = 'pip-'+meta['package']['name']
# Remove description it can cause issues
summary = meta['about'].get('summary', '')
summary += " Built for Android and iOS apps using enaml-native."
meta['about']['summary'] = summary
# Update the script to install for every arch
script = meta['build'].pop('script', '')
meta['build']['noarch'] = True
build_script = ['export CC=/bin/false', 'export CXX=/bin/false']
build_script += [
'{script} --no-compile '
'--install-base=$PREFIX/{prefix} '
'--install-lib=$PREFIX/{prefix}/python/site-packages '
'--install-scripts=$PREFIX/{prefix}/scripts '
'--install-data=$PREFIX/{prefix}/data '
'--install-headers=$PREFIX/{prefix}/include'.format(
script=script.strip(), prefix=p, **ctx) for p in [
'android/arm', 'android/arm64', 'android/x86',
'android/x86_64', 'iphoneos', 'iphonesimulator'
]
]
meta['build']['script'] = build_script
# Prefix all dependencies with 'pip-'
requires = []
excluded = ['python', 'cython', 'setuptools']
for stage in meta['requirements'].keys():
reqs = meta['requirements'].pop(stage, [])
requires.extend(reqs)
r = ['pip-{}'.format(r) for r in reqs if r not in excluded]
if r:
meta['requirements'][stage] = r
# Build all requirements
if args.recursive:
requires = list(set(requires))
for pkg in requires:
# Strip off any version
pkg = re.split("[<>=]", pkg)[0].strip()
if pkg in excluded or pkg in self._built:
continue # Not needed or already done
if args.force or not exists('pip-{}'.format(pkg)):
self.build(pkg, args)
# Remove tests we're cross compiling
meta.pop('test', None)
# Save it
with open(join(dst, 'meta.yaml'), 'w') as f:
f.write("\n".join(data[:var_lines])+"\n")
f.write(yaml.dump(meta, Dumper=yaml.RoundTripDumper,
width=1000))
# Now build it
build_args = ['--croot={}'.format(args.croot)
] if args.croot else []
# Want to force a failure on any compiling
env = os.environ.copy()
env.update({'CC': '/bin/false', 'CXX': '/bin/false'})
shprint(self.cli.conda, 'build', dst, *build_args)
print(Colors.GREEN+"[INFO] Built {} successfully!".format(
dst)+Colors.RESET)
class NdkStack(Command):
""" Shortcut to run ndk-stack to show debugging output of a crash in a
native library.
See https://developer.android.com/ndk/guides/ndk-stack.html
"""
title = set_default("ndk-stack")
help = set_default("Run ndk-stack on the adb output")
args = set_default([
('arch', dict(nargs='?', default="armeabi-v7a")),
('args', dict(nargs=REMAINDER, help="Extra args for ndk-stack")),
])
def run(self, args=None):
ctx = self.ctx
env = ctx['android']
ndk_stack = sh.Command(join(
os.path.expanduser(env['ndk']),
'ndk-stack.cmd' if IS_WIN else 'ndk-stack'
))
arch = args.arch if args else 'armeabi-v7a'
sym = 'venv/android/enaml-native/src/main/obj/local/{}'.format(arch)
shprint(ndk_stack, sh.adb('logcat', _piped=True), '-sym', sym)
class NdkBuild(Command):
""" Run ndk-build on enaml-native and any packages
that define an `enaml_native_ndk_build` entry_point.
"""
title = set_default("ndk-build")
help = set_default("Run ndk-build on the android project")
def run(self, args=None):
ctx = self.ctx
env = ctx['android']
# Lib version
build_ver = sys.version_info.major
for line in self.cli.conda('list').split("\n"):
print(line)
if 'android-python' in line:
build_ver = 2 if 'py27' in line else 3
py_version = ".".join(line.split()[1].split(".")[:2])
if build_ver > 2:
py_version += 'm'
break
print(Colors.GREEN+"[DEBUG] Building for {}".format(
py_version)+Colors.RESET)
ndk_build = sh.Command(join(
os.path.expanduser(env['ndk']),
'ndk-build.cmd' if IS_WIN else 'ndk-build'
))
arches = [ANDROID_TARGETS[arch] for arch in env['targets']]
#: Where the jni files are
jni_dir = env.get(
'jni_dir',
"{conda_prefix}/android/enaml-native/src/main/jni".format(**env)
)
if 'jni_dir' not in env:
env['jni_dir'] = jni_dir
#: Where native libraries go for each arch
ndk_build_dir = env.get(
'ndk_build_dir',
"{conda_prefix}/android/enaml-native/src/main/libs".format(**env)
)
if 'ndk_build_dir' not in env:
env['ndk_build_dir'] = ndk_build_dir
#: Do ndk-build in the jni dir
with cd(jni_dir):
#: Patch Application.mk to have the correct ABI's
with open('Application.mk') as f:
app_mk = f.read()
#: APP_ABI := armeabi-v7a
new_mk = []
for line in app_mk.split("\n"):
if re.match(r'APP_ABI\s*:=\s*.+', line):
line = 'APP_ABI := {}'.format(" ".join(arches))
new_mk.append(line)
with open('Application.mk', 'w') as f:
f.write("\n".join(new_mk))
#: Patch Android.mk to have the correct python version
with open('Android.mk') as f:
android_mk = f.read()
#: PY_LIB_VER := 2.7
new_mk = []
for line in android_mk.split("\n"):
if re.match(r'PY_LIB_VER\s*:=\s*.+', line):
line = 'PY_LIB_VER := {}'.format(py_version)
new_mk.append(line)
with open('Android.mk', 'w') as f:
f.write("\n".join(new_mk))
#: Now run nkd-build
shprint(ndk_build)
#: Add entry point so packages can include their own jni libs
dependencies = ctx['dependencies']#.keys()
for ep in pkg_resources.iter_entry_points(
group="enaml_native_ndk_build"):
for name in dependencies:
if ep.name.replace("-", '_') == name.replace("-", '_'):
ndk_build_hook = ep.load()
print("Custom ndk_build_hook {} found for '{}'. ".format(
ndk_build_hook, name))
ndk_build_hook(self.ctx)
break
#: Now copy all compiled python modules to the jniLibs dir so android
#: includes them
for arch in arches:
cfg = dict(
arch=arch,
local_arch=ANDROID_ABIS[arch],
ndk_build_dir=ndk_build_dir,
)
cfg.update(env) # get python_build_dir from the env
#: Where .so files go
dst = abspath('{ndk_build_dir}/{arch}'.format(**cfg))
#: Collect all .so files to the lib dir
with cd('{conda_prefix}/android/'
'{local_arch}/lib/'.format(**cfg)):
for lib in glob('*.so'):
excluded = [p for p in env.get('excluded', [])
if fnmatch.fnmatch(lib, p)]
if excluded:
continue
shutil.copy(lib, dst)
class BundleAssets(Command):
""" This is used by the gradle build to pack python into a zip.
"""
title = set_default("bundle-assets")
help = set_default("Creates a python bundle of all .py and .enaml files")
args = set_default([
('target', dict(nargs='?', default="android",
help="Build for the given target (android, iphoneos, iphonesimulator)")),
('--release', dict(action='store_true', help="Create a release bundle")),
('--no-compile', dict(action='store_true', help="Don't generate python cache")),
])
def run(self, args=None):
ctx = self.ctx
if args.target not in ['android', 'iphoneos', 'iphonesimulator']:
raise ValueError("Target must be either android, iphoneos, or iphonesimulator")
if args.target == 'android':
env = ctx['android']
else:
env = ctx['ios']
#: Now copy to android assets folder
#: Extracted file type
bundle = 'python.tar.gz'
root = abspath(os.getcwd())
# Run lib build
if args.target == 'android':
#: Um, we're passing args from another command?
self.cmds['ndk-build'].run(args)
else:
#: Collect all .so files to the lib dir
with cd('{conda_prefix}/{target}/lib/'.format(target=args.target, **env)):
dst = '{root}/ios/Libs'.format(root=root)
if exists(dst):
shutil.rmtree(dst)
os.makedirs(dst)
# Copy all libs to the
for lib in glob('*.dylib'):
excluded = [p for p in env.get('excluded', [])
if fnmatch.fnmatch(lib, p)]
if excluded:
continue
shutil.copy(lib, dst)
# Clean each arch
#: Remove old
cfg = dict(bundle_id=ctx['bundle_id'])
if args.target == 'android':
for arch in env['targets']:
cfg.update(dict(
target='android/{}'.format(arch),
local_arch=arch,
arch=ANDROID_TARGETS[arch]
))
break
else:
cfg['target'] = args.target
cfg.update(env)
#: Create
if not os.path.exists(env['python_build_dir']):
os.makedirs(env['python_build_dir'].format(**cfg))
# raise RuntimeError(
# "Error: Python build doesn't exist. "
# "You should run './enaml-native build-python' first!")
with cd(env['python_build_dir']):
#: Remove old build
if os.path.exists('build'):
shutil.rmtree('build')
#: Copy python/ build/
cp('{conda_prefix}/{target}/python/'.format(**cfg),
'{python_build_dir}/build'.format(**cfg))
#: Copy sources from app source
for src in ctx.get('sources', ['src']):
cp(join(root, src), 'build')
#: Clean any excluded sources
with cd('build'):
if not args.no_compile:
# Compile to pyc
compileall.compile_dir('.')
# Remove all py files
for dp, dn, fn in os.walk('.'):
for f in glob(join(dp, '*.py')):
if exists(f+'c') or exists(f+'o'):
os.remove(f)
# Exclude all py files and any user added patterns
for pattern in env.get('excluded', [])+['*.dist-info',
'*.egg-info']:
matches = glob(pattern)
for m in matches:
if os.path.isdir(m):
shutil.rmtree(m)
else:
os.remove(m)
#: Remove old
for ext in ['.zip', '.tar.lz4', '.so', '.tar.gz']:
if exists('python.{}'.format(ext)):
os.remove('python.{}'.format(ext))
#: Zip everything and copy to assets arch to build
with cd('build'):
print(Colors.CYAN+"[DEBUG] Creating python bundle..."+ \
Colors.RESET)
with tarfile.open('../'+bundle, "w:gz") as tar:
tar.add('.')
#shprint(sh.zip, '-r',
# 'android/app/src/main/assets/python/python.zip', '.')
#shprint(sh.zip, '-r', '../python.zip', '.')
#shprint(sh.tar, '-zcvf', '../python.tar.gz', '.')
#shprint(sh.bash, '-c',
# 'tar czf - build | lz4 -9 - python.tar.lz4')
# import msgpack
# import lz4
# import lz4.frame
# with open('../libpybundle.so', 'wb') as source:
# data = {}
# for root, dirs, files in os.walk("."):
# for file in files:
# path = join(root, file)[2:] # Skip ./
#
# # TODO Compile to pyc here
# with open(path, 'rb') as f:
# data[path] = f.read()
# for k in data.keys():
# print(k)
# msgpack.pack(data, source)
# # Compress with lz4
# MINHC = lz4.frame.COMPRESSIONLEVEL_MINHC
# with lz4.frame.open('../libpybundle.lz4', 'wb',
# compression_level=MINHC) as f:
# f.write(msgpack.packb(data))
# Copy to each lib dir
#for arch in env['targets']:
# env['abi'] = ANDROID_TARGETS[arch]
# src = '{python_build_dir}/libpybundle.so'.format(**env)
# dst = '{conda_prefix}/android/enaml-native/src/main/libs/{abi}/'.format(**env)
# print("Copying bundle to {}...".format(dst))
# shutil.copy(src, dst)
# Copy to Android assets
if args.target == 'android':
cp('{python_build_dir}/{bundle}'.format(bundle=bundle, **env),
'android/app/src/main/assets/python/{bundle}'.format(bundle=bundle))
# Copy to iOS assets
else:
# TODO Use the bundle!
cp('{python_build_dir}/build'.format(bundle=bundle, **env),
'ios/assets/python'.format(bundle=bundle))
#cp('{python_build_dir}/{bundle}'.format(bundle=bundle, **env),
# 'ios/app/src/main/assets/python/{bundle}'.format(bundle=bundle))
print(Colors.GREEN+"[INFO] Python bundled successfully!"+Colors.RESET)
class ListPackages(Command):
title = set_default("list")
help = set_default("List installed packages (alias to conda list)")
#: Can be run from anywhere
app_dir_required = set_default(False)
def run(self, args):
shprint(self.cli.conda, 'list')
class Install(Command):
""" The "Install" command does a `conda install` of the package names given
and then runs the linker command.
"""
title = set_default("install")
help = set_default("Install and link an enaml-native package")
args = set_default([
('args', dict(nargs=REMAINDER, help="Alias to conda install")),
])
#: Can be run from anywhere
app_dir_required = set_default(False)
def run(self, args):
if os.environ.get('CONDA_DEFAULT_ENV') in [None, 'root']:
print(Colors.RED+'enaml-native install should only be used'
'within an app env!'+Colors.RESET)
raise SystemExit(0)
shprint(self.cli.conda, 'install', '-y', *args.args)
#: Link everything for now
self.cmds['link'].run()
class Uninstall(Command):
""" The "Uninstall" command unlinks the package (if needed) and does a
`conda uninstall` of the package names given.
"""
title = set_default("uninstall")
help = set_default("Uninstall and unlink enaml-native package")
args = set_default([
('args', dict(help="Args to conda uninstall", nargs=REMAINDER)),
])
#: Can be run from anywhere
app_dir_required = set_default(False)
def run(self, args):
if os.environ.get('CONDA_DEFAULT_ENV') in [None, 'root']:
print(Colors.RED+'enaml-native uninstall should only be used'
'within an app env!'+Colors.RESET)
raise SystemExit(0)
#: Unlink first
if hasattr(args, 'names'):
# TODO...
self.cmds['unlink'].run(args)
shprint(self.cli.conda, 'uninstall', '-y', *args.args)
class Link(Command):
""" The "Link" command tries to modify the android and ios projects
to include all of the necessary changes for this package to work.
A custom linkiner can be used by adding a "enaml_native_linker"
entry_point which shall be a function that receives the app package.json
(context) an argument.
Example
----------
def linker(ctx):
# Link android and ios projects here
return True #: To tell the cli the linking was handled and should
return
"""
title = set_default("link")
help = set_default("Link an enaml-native package "
"(updates android and ios projects)")
args = set_default([
('names', dict(
help="Package name (optional) If not set links all projects.",
nargs='*')),
])
#: Where "enaml native packages" are installed within the root
package_dir = 'venv'
def run(self, args=None):
print("Linking {}".format(args.names if args and args.names
else "all packages..."))
if args and args.names:
for name in args.names:
self.link(self.package_dir, name)
else:
#: Link everything
for target in ['android', 'iphoneos', 'iphonesimulator']:
sysroot = join(self.package_dir, target)
for path in os.listdir(sysroot):
self.link(sysroot, path)
def link(self, path, pkg):
""" Link the package in the current directory.
"""
# Check if a custom linker exists to handle linking this package
#for ep in pkg_resources.iter_entry_points(group="enaml_native_linker"):
# if ep.name.replace("-", '_') == pkg.replace("-", '_'):
# linker = ep.load()
# print("Custom linker {} found for '{}'. Linking...".format(
# linker, pkg))
# if linker(self.ctx, path):
# return
#: Use the default builtin linker script
if exists(join(path, pkg, 'build.gradle')):
print(Colors.BLUE+"[INFO] Linking {}/build.gradle".format(
pkg)+Colors.RESET)
self.link_android(path, pkg)
if exists(join(path, pkg, 'Podfile')):
print(Colors.BLUE+"[INFO] Linking {}/Podfile".format(
pkg)+Colors.RESET)
self.link_ios(path, pkg)
@staticmethod
def is_settings_linked(source, pkg):
""" Returns true if the "include ':<project>'" line exists in the file
"""
for line in source.split("\n"):
if re.search(r"include\s*['\"]:{}['\"]".format(pkg), line):
return True
return False
@staticmethod
def is_build_linked(source, pkg):
""" Returns true if the "compile project(':<project>')"
line exists exists in the file """
for line in source.split("\n"):
if re.search(r"(api|compile)\s+project\(['\"]:{}['\"]\)".format(pkg),
line):
return True
return False
@staticmethod
def find_packages(path):
""" Find all java files matching the "*Package.java" pattern within
the given enaml package directory relative to the java source path.
"""
matches = []
root = join(path, 'src', 'main', 'java')
for folder, dirnames, filenames in os.walk(root):
for filename in fnmatch.filter(filenames, '*Package.java'):
#: Open and make sure it's an EnamlPackage somewhere
with open(join(folder, filename)) as f:
if "implements EnamlPackage" in f.read():
package = os.path.relpath(folder, root)
matches.append(os.path.join(package, filename))
return matches
@staticmethod
def is_app_linked(source, pkg, java_package):
""" Returns true if the compile project line exists exists in the file
"""
for line in source.split("\n"):
if java_package in line:
return True
return False
def link_android(self, path, pkg):
""" Link's the android project to this library.
1. Includes this project's directory in the app's
android/settings.gradle
It adds:
include ':<project-name>'
project(':<project-name>').projectDir = new File(
rootProject.projectDir, '../packages/<project-name>/android')
2. Add's this project as a dependency to the android/app/build.gradle
It adds:
compile project(':<project-name>')
to the dependencies.
3. If preset, adds the import and package statement
to the android/app/src/main/java/<bundle/id>/MainApplication.java
"""
bundle_id = self.ctx['bundle_id']
pkg_root = join(path, pkg)
#: Check if it's already linked
with open(join('android', 'settings.gradle')) as f:
settings_gradle = f.read()
with open(join('android', 'app', 'build.gradle')) as f:
build_gradle = f.read()
#: Find the MainApplication.java
main_app_java_path = join('android', 'app', 'src', 'main', 'java',
join(*bundle_id.split(".")),
'MainApplication.java')
with open(main_app_java_path) as f:
main_application_java = f.read()
try:
#: Now link all the EnamlPackages we can find in the new "package"
new_packages = Link.find_packages(join(path, pkg))
if not new_packages:
print("[Android] {} No EnamlPackages found to link!".format(
pkg))
return
#: Link settings.gradle
if not Link.is_settings_linked(settings_gradle, pkg):
#: Add two statements
new_settings = settings_gradle.split("\n")
new_settings.append("") # Blank line
new_settings.append("include ':{name}'".format(name=pkg))
new_settings.append("project(':{name}').projectDir = "
"new File(rootProject.projectDir, "
"'../{path}/android/{name}')"
.format(name=pkg, path=self.package_dir))
with open(join('android', 'settings.gradle'), 'w') as f:
f.write("\n".join(new_settings))
print("[Android] {} linked in settings.gradle!".format(pkg))
else:
print("[Android] {} was already linked in "
"settings.gradle!".format(pkg))
#: Link app/build.gradle
if not Link.is_build_linked(build_gradle, pkg):
#: Add two statements
new_build = build_gradle.split("\n")
#: Find correct line number
found = False
for i, line in enumerate(new_build):
if re.match(r"dependencies\s*{", line):
found = True
continue
if found and "}" in line:
#: Hackish way to find line of the closing bracket after
#: the dependencies { block is found
break
if not found:
raise ValueError("Unable to find dependencies in "
"{pkg}/app/build.gradle!".format(pkg=pkg))
#: Insert before the closing bracket
new_build.insert(i, " api project(':{name}')".format(
name=pkg))
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write("\n".join(new_build))
print("[Android] {} linked in app/build.gradle!".format(pkg))
else:
print("[Android] {} was already linked in "
"app/build.gradle!".format(pkg))
new_app_java = []
for package in new_packages:
#: Add our import statement
javacls = os.path.splitext(package)[0].replace("/", ".")
if not Link.is_app_linked(main_application_java, pkg, javacls):
#: Reuse previous if avialable
new_app_java = (new_app_java or
main_application_java.split("\n"))
#: Find last import statement
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line, "import *;"):
j = i
new_app_java.insert(j+1, "import {};".format(javacls))
#: Add the package statement
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line.strip(), "new *Package()"):
j = i
if j == 0:
raise ValueError("Could not find the correct spot to "
"add package {}".format(javacls))
else:
#: Get indent and add to previous line
#: Add comma to previous line
new_app_java[j] = new_app_java[j]+ ","
#: Insert new line
new_app_java.insert(j+1, " new {}()"
.format(javacls.split(".")[-1]))
else:
print("[Android] {} was already linked in {}!".format(
pkg, main_app_java_path))
if new_app_java:
with open(main_app_java_path, 'w') as f:
f.write("\n".join(new_app_java))
print(Colors.GREEN+"[Android] {} linked successfully!".format(
pkg)+Colors.RESET)
except Exception as e:
print(Colors.GREEN+"[Android] {} Failed to link. "
"Reverting due to error: "
"{}".format(pkg, e)+Colors.RESET)
#: Undo any changes
with open(join('android', 'settings.gradle'), 'w') as f:
f.write(settings_gradle)
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write(build_gradle)
with open(main_app_java_path, 'w') as f:
f.write(main_application_java)
#: Now blow up
raise
def link_ios(self, path, pkg):
print("[iOS] Link TODO:...")
class Unlink(Command):
""" The "Unlink" command tries to undo the modifications done by the
linker..
A custom unlinkiner can be used by adding a "enaml_native_unlinker"
entry_point which shall be a function that receives the app
package.json (context) an argument.
Example
----------
def unlinker(ctx):
# Unlink android and ios projects here
return True #: To tell the cli the unlinking was handled and
should return
"""
title = set_default("unlink")
help = set_default("Unlink an enaml-native package")
args = set_default([
('names', dict(help="Package name", nargs="+")),
])
def run(self, args=None):
""" The name IS required here. """
print(Colors.BLUE+"[INFO] Unlinking {}...".format(
args.names)+Colors.RESET)
for name in args.names:
self.unlink(Link.package_dir, name)
def unlink(self, path, pkg):
""" Unlink the package in the current directory.
"""
#: Check if a custom unlinker exists to handle unlinking this package
for ep in pkg_resources.iter_entry_points(
group="enaml_native_unlinker"):
if ep.name.replace("-", '_') == pkg.replace("-", '_'):
unlinker = ep.load()
print("Custom unlinker {} found for '{}'. "
"Unlinking...".format(unlinker, pkg))
if unlinker(self.ctx, path):
return
if exists(join(path, 'android', pkg, 'build.gradle')):
print("[Android] unlinking {}".format(pkg))
self.unlink_android(path, pkg)
for target in ['iphoneos', 'iphonesimulator']:
if exists(join(path, target, pkg, 'Podfile')):
print("[iOS] unlinking {}".format(pkg))
self.unlink_ios(path, pkg)
def unlink_android(self, path, pkg):
""" Unlink's the android project to this library.
1. In the app's android/settings.gradle, it removes the following
lines (if they exist):
include ':<project-name>'
project(':<project-name>').projectDir = new File(
rootProject.projectDir,
'../venv/packages/<project-name>/android')
2. In the app's android/app/build.gradle, it removes the following
line (if present)
compile project(':<project-name>')
3. In the app's
android/app/src/main/java/<bundle/id>/MainApplication.java,
it removes:
import <package>.<Name>Package;
new <Name>Package(),
If no comma exists it will remove the comma from the previous
line.
"""
bundle_id = self.ctx['bundle_id']
#: Check if it's already linked
with open(join('android', 'settings.gradle')) as f:
settings_gradle = f.read()
with open(join('android', 'app', 'build.gradle')) as f:
build_gradle = f.read()
#: Find the MainApplication.java
main_app_java_path = join('android', 'app', 'src', 'main', 'java',
join(*bundle_id.split(".")),
'MainApplication.java')
with open(main_app_java_path) as f:
main_application_java = f.read()
try:
#: Now link all the EnamlPackages we can find in the new "package"
new_packages = Link.find_packages(join(path, 'android', pkg))
if not new_packages:
print(Colors.RED+"\t[Android] {} No EnamlPackages found to "
"unlink!".format(pkg)+Colors.RESET)
return
#: Unlink settings.gradle
if Link.is_settings_linked(settings_gradle, pkg):
#: Remove the two statements
new_settings = [
line for line in settings_gradle.split("\n")
if line.strip() not in [
"include ':{name}'".format(name=pkg),
"project(':{name}').projectDir = "
"new File(rootProject.projectDir, "
"'../{path}/android/{name}')".format(path=path,
name=pkg)
]
]
with open(join('android', 'settings.gradle'), 'w') as f:
f.write("\n".join(new_settings))
print("\t[Android] {} unlinked settings.gradle!".format(pkg))
else:
print("\t[Android] {} was not linked in "
"settings.gradle!".format(pkg))
#: Unlink app/build.gradle
if Link.is_build_linked(build_gradle, pkg):
#: Add two statements
new_build = [
line for line in build_gradle.split("\n")
if line.strip() not in [
"compile project(':{name}')".format(name=pkg),
"api project(':{name}')".format(name=pkg),
]
]
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write("\n".join(new_build))
print("\t[Android] {} unlinked in "
"app/build.gradle!".format(pkg))
else:
print("\t[Android] {} was not linked in "
"app/build.gradle!".format(pkg))
new_app_java = []
for package in new_packages:
#: Add our import statement
javacls = os.path.splitext(package)[0].replace("/", ".")
if Link.is_app_linked(main_application_java, pkg, javacls):
#: Reuse previous if avialable
new_app_java = (new_app_java or
main_application_java.split("\n"))
new_app_java = [
line for line in new_app_java
if line.strip() not in [
"import {};".format(javacls),
"new {}()".format(javacls.split(".")[-1]),
"new {}(),".format(javacls.split(".")[-1]),
]
]
#: Now find the last package and remove the comma if it
#: exists
found = False
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line.strip(), "new *Package()"):
found = True
elif fnmatch.fnmatch(line.strip(), "new *Package(),"):
j = i
#: We removed the last package so add a comma
if not found:
#: This kills any whitespace...
new_app_java[j] = new_app_java[j][
:new_app_java[j].rfind(',')]
else:
print("\t[Android] {} was not linked in {}!".format(
pkg, main_app_java_path))
if new_app_java:
with open(main_app_java_path, 'w') as f:
f.write("\n".join(new_app_java))
print(Colors.GREEN+"\t[Android] {} unlinked successfully!".format(
pkg)+Colors.RESET)
except Exception as e:
print(Colors.RED+"\t[Android] {} Failed to unlink. "
"Reverting due to error: {}".format(pkg, e)+Colors.RESET)
#: Undo any changes
with open(join('android', 'settings.gradle'), 'w') as f:
f.write(settings_gradle)
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write(build_gradle)
with open(main_app_java_path, 'w') as f:
f.write(main_application_java)
#: Now blow up
raise
class BuildAndroid(Command):
title = set_default("build-android")
help = set_default("Build android project")
args = set_default([
('--release', dict(action='store_true', help="Release mode")),
('extra', dict(nargs=REMAINDER, help="Args to pass to gradle")),
])
def run(self, args=None):
with cd("android"):
gradlew = sh.Command('gradlew.bat' if IS_WIN else './gradlew')
if args and args.release:
shprint(gradlew, 'assembleRelease', *args.extra, _debug=True)
else:
shprint(gradlew, 'assembleDebug', *args.extra, _debug=True)
class CleanAndroid(Command):
title = set_default("clean-android")
help = set_default("Clean the android project")
def run(self, args=None):
with cd('android'):
gradlew = sh.Command('gradlew.bat' if IS_WIN else './gradlew')
shprint(gradlew, 'clean', _debug=True)
class RunAndroid(Command):
title = set_default("run-android")
help = set_default("Build android project, install it, and run")
args = set_default([
('--release', dict(action='store_true', help="Build in Release mode")),
('extra', dict(nargs=REMAINDER, help="Extra args to pass to gradle")),
])
def run(self, args=None):
ctx = self.ctx
bundle_id = ctx['bundle_id']
with cd("android"):
release_apk = os.path.abspath(join(
'.', 'app', 'build', 'outputs', 'apk',
'app-release-unsigned.apk'))
gradlew = sh.Command('gradlew.bat' if IS_WIN else './gradlew')
#: If no devices are connected, start the simulator
if len(sh.adb('devices').stdout.strip())==1:
device = sh.emulator('-list-avds').stdout.split("\n")[0]
shprint(sh.emulator, '-avd', device)
if args and args.release:
shprint(gradlew, 'assembleRelease', *args.extra, _debug=True)
#shprint(sh.adb,'uninstall','-k','"{}"'.format(bundle_id))
shprint(sh.adb, 'install', release_apk)
else:
shprint(gradlew, 'installDebug',*args.extra, _debug=True)
shprint(sh.adb, 'shell', 'am', 'start', '-n',
'{bundle_id}/{bundle_id}.MainActivity'.format(
bundle_id=bundle_id))
class CleanIOS(Command):
title = set_default("clean-ios")
help = set_default("Clean the ios project")
def run(self, args=None):
with cd('ios'):
shprint(sh.xcodebuild, 'clean', '-project', 'App.xcodeproj',
'-configuration', 'ReleaseAdhoc', '-alltargets')
class RunIOS(Command):
title = set_default("run-ios")
help = set_default("Build and run the ios project")
args = set_default([
('--release', dict(action='store_true', help="Build in Release mode")),
])
def run(self, args=None):
ctx = self.ctx
env = ctx['ios']
with cd('ios'):
ws = glob("*.xcworkspace")
if not ws:
raise RuntimeError("Couldn't find a xcworkspace in the ios folder! "
"Did you run `pod install`? ")
workspace = ws[0]
scheme = '.'.join(workspace.split('.')[0:-1])
shprint(sh.xcrun, 'xcodebuild',
'-scheme', scheme,
'-workspace', workspace,
'-configuration', 'Release' if args and args.release else 'Debug',
'-allowProvisioningUpdates',
'-derivedDataPath', 'run')
#shprint(sh.xcrun, 'simctl', 'install', 'booted',
# 'build/Build/Products/Debug-iphonesimulator/
# {project}.app'.format(**env))
shprint(sh.xcrun, 'simctl', 'launch', 'booted', ctx['bundle_id'])
class BuildIOS(Command):
title = set_default("build-ios")
help = set_default("Build the ios project")
args = set_default([
('--release', dict(action='store_true', help="Build in Release mode")),
])
def run(self, args=None):
ctx = self.ctx
with cd('ios'):
ws = glob("*.xcworkspace")
if not ws:
raise RuntimeError("Couldn't find a xcworkspace in the ios folder! "
"Did you run `pod install`? ")
workspace = ws[0]
scheme = '.'.join(workspace.split('.')[0:-1])
shprint(sh.xcrun,
'xcodebuild',
'-scheme', scheme,
'-workspace', workspace,
'-configuration', 'Release' if args and args.release else 'Debug',
'-allowProvisioningUpdates',
'-derivedDataPath', 'build')
class Server(Command):
""" Run a dev server to host files. Only view files can be reloaded at the
moment.
"""
title = set_default("start")
help = set_default("Start a debug server for serving files to the app")
#: Dev server index page to render
index_page = Unicode("enaml-native dev server. "
"When you change a source file it pushes to the app.")
args = set_default([
('--remote-debugging', dict(action='store_true',
help="Run in remote debugging mode")),
])
#: Server port
port = Int(8888)
#: Time in ms to wait before triggering a reload
reload_delay = Float(1)
_reload_count = Int() #: Pending reload requests
#: Watchdog observer
observer = Instance(object)
#: Watchdog handler
watcher = Instance(object)
#: Websocket handler implementation
handlers = List()
#: Callable to add a callback from a thread into the event loop
add_callback = Callable()
#: Callable to add a callback at some later time
call_later = Callable()
#: Changed file events
changes = List()
#: Run in bridge (forwarding) mode for remote debugging
remote_debugging = Bool()
#: Can be run from anywhere
app_dir_required = set_default(False)
def run(self, args=None):
ctx = self.ctx
#: Look for tornado or twisted in reqs
use_twisted = 'twisted' in ', '.join(ctx.get('dependencies', []))
#: Save setting
self.remote_debugging = args and args.remote_debugging
if self.remote_debugging:
#: Do reverse forwarding so you can use remote-debugging over
#: adb (via USB even if Wifi is not accessible)
shprint(sh.adb, 'reverse',
'tcp:{}'.format(self.port), 'tcp:{}'.format(self.port))
else:
#: Setup observer
try:
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
except ImportError:
print(Colors.RED + "[WARNING] Watchdog is required the dev "
"server: Run 'pip install watchdog'" + Colors.RESET)
return
self.observer = Observer()
server = self
class AppNotifier(LoggingEventHandler):
def on_any_event(self, event):
super(AppNotifier, self).on_any_event(event)
#: Use add callback to push to event loop thread
server.add_callback(server.on_file_changed, event)
with cd('src'):
if not self.remote_debugging:
print("Watching {}".format(abspath('.')))
self.watcher = AppNotifier()
self.observer.schedule(self.watcher, abspath('.'),
recursive=True)
self.observer.start()
if use_twisted:
self.run_twisted(args)
else:
self.run_tornado(args)
def run_tornado(self, args):
""" Tornado dev server implementation """
server = self
import tornado.ioloop
import tornado.web
import tornado.websocket
ioloop = tornado.ioloop.IOLoop.current()
class DevWebSocketHandler(tornado.websocket.WebSocketHandler):
def open(self):
super(DevWebSocketHandler, self).open()
server.on_open(self)
def on_message(self, message):
server.on_message(self, message)
def on_close(self):
super(DevWebSocketHandler, self).on_close()
server.on_close(self)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write(server.index_page)
#: Set the call later method
server.call_later = ioloop.call_later
server.add_callback = ioloop.add_callback
app = tornado.web.Application([
(r"/", MainHandler),
(r"/dev", DevWebSocketHandler),
])
app.listen(self.port)
print("Tornado Dev server started on {}".format(self.port))
ioloop.start()
def run_twisted(self, args):
""" Twisted dev server implementation """
server = self
from twisted.internet import reactor
from twisted.web import resource
from twisted.web.static import File
from twisted.web.server import Site
from autobahn.twisted.websocket import (WebSocketServerFactory,
WebSocketServerProtocol)
from autobahn.twisted.resource import WebSocketResource
class DevWebSocketHandler(WebSocketServerProtocol):
def onConnect(self, request):
super(DevWebSocketHandler, self).onConnect(request)
server.on_open(self)
def onMessage(self, payload, isBinary):
server.on_message(self, payload)
def onClose(self, wasClean, code, reason):
super(DevWebSocketHandler,self).onClose(wasClean, code, reason)
server.on_close(self)
def write_message(self, message, binary=False):
self.sendMessage(message, binary)
#: Set the call later method
server.call_later = reactor.callLater
server.add_callback = reactor.callFromThread
factory = WebSocketServerFactory(u"ws://0.0.0.0:{}".format(self.port))
factory.protocol = DevWebSocketHandler
class MainHandler(resource.Resource):
def render_GET(self, req):
return str(server.index_page)
root = resource.Resource()
root.putChild("", MainHandler())
root.putChild("dev", WebSocketResource(factory))
reactor.listenTCP(self.port, Site(root))
print("Twisted Dev server started on {}".format(self.port))
reactor.run()
#: ========================================================
#: Shared protocol implementation
#: ========================================================
def on_open(self, handler):
self._reload_count = 0
print("Client {} connected!".format(handler))
self.handlers.append(handler)
def on_message(self, handler, msg):
""" In remote debugging mode this simply acts as a forwarding
proxy for the two clients.
"""
if self.remote_debugging:
#: Forward to other clients
for h in self.handlers:
if h != handler:
h.write_message(msg, True)
else:
print(msg)
def send_message(self, msg):
""" Send a message to the client. This should not be used in
remote debugging mode.
"""
if not self.handlers:
return #: Client not connected
for h in self.handlers:
h.write_message(msg)
def on_close(self, handler):
print("Client {} left!".format(handler))
self.handlers.remove(handler)
def on_file_changed(self, event):
""" """
print(event)
self._reload_count +=1
self.changes.append(event)
self.call_later(self.reload_delay, self._trigger_reload, event)
def _trigger_reload(self, event):
self._reload_count -=1
if self._reload_count == 0:
files = {}
for event in self.changes:
path = os.path.relpath(event.src_path, os.getcwd())
if os.path.splitext(path)[-1] not in ['.py', '.enaml']:
continue
with open(event.src_path) as f:
data = f.read()
#: Add to changed files
files[path] = data
if files:
#: Send the reload request
msg = {
'type':'reload',
'files':files
}
print("Reloading: {}".format(files.keys()))
self.send_message(json.dumps(msg))
#: Clear changes
self.changes = []
class EnamlNativeCli(Atom):
#: Root parser
parser = Instance(ArgumentParser)
#: Loaded from package
ctx = Dict()
#: Parsed args
args = Instance(Namespace)
#: Location of package file
package = Unicode("environment.yml")
#: If enaml-native is being run within an app directory
in_app_directory = Bool()
#: Conda command
conda = Instance(sh.Command)
#: Commands
commands = List(Command)
def _default_commands(self):
""" Build the list of CLI commands by finding subclasses of the Command
class
Also allows commands to be installed using the "enaml_native_command"
entry point. This entry point should return a Command subclass
"""
commands = [c() for c in find_commands(Command)]
#: Get commands installed via entry points
for ep in pkg_resources.iter_entry_points(
group="enaml_native_command"):
c = ep.load()
if not issubclass(c, Command):
print("Warning: entry point {} did not return a valid enaml "
"cli command! This command will be ignored!".format(
ep.name))
commands.append(c())
return commands
def _default_in_app_directory(self):
""" Return if we are in a directory that contains the package.json file
which should indicate it's in the root directory of an enaml-native
app.
"""
return exists(self.package)
def _default_ctx(self):
""" Return the package config or context and normalize some of the
values
"""
if not self.in_app_directory:
print("Warning: {} does not exist. Using the default.".format(
self.package))
ctx = {}
else:
with open(self.package) as f:
ctx = dict(yaml.load(f, Loader=yaml.RoundTripLoader))
if self.in_app_directory:
# Update the env for each platform
excluded = list(ctx.get('excluded', []))
for env in [ctx['ios'], ctx['android']]:
if 'python_build_dir' not in env:
env['python_build_dir'] = expanduser(abspath('build/python'))
if 'conda_prefix' not in env:
env['conda_prefix'] = os.environ.get(
'CONDA_PREFIX', expanduser(abspath('venv')))
# Join the shared and local exclusions
env['excluded'] = list(env.get('excluded', [])) + excluded
return ctx
def _default_parser(self):
""" Generate a parser using the command list """
parser = ArgumentParser(prog='enaml-native')
#: Build commands by name
cmds = {c.title: c for c in self.commands}
#: Build parser, prepare commands
subparsers = parser.add_subparsers()
for c in self.commands:
p = subparsers.add_parser(c.title, help=c.help)
c.parser = p
for (flags, kwargs) in c.args:
p.add_argument(*flags.split(), **kwargs)
p.set_defaults(cmd=c)
c.ctx = self.ctx
c.cmds = cmds
c.cli = self
return parser
def _default_conda(self):
return find_conda()
def check_dependencies(self):
try:
self.conda('--version')
except:
raise EnvironmentError(
"conda could not be found. Please install miniconda from "
"https://conda.io/miniconda.html or set CONDA_HOME to the"
"location where conda is installed.")
def start(self):
""" Run the commands"""
self.check_dependencies()
self.args = self.parser.parse_args()
# Python 3 doesn't set the cmd if no args are given
if not hasattr(self.args, 'cmd'):
self.parser.print_help()
return
cmd = self.args.cmd
try:
if cmd.app_dir_required and not self.in_app_directory:
raise EnvironmentError(
"'enaml-native {}' must be run within an app root "
"directory not: {}".format(cmd.title, os.getcwd()))
cmd.run(self.args)
except sh.ErrorReturnCode as e:
raise
def main():
EnamlNativeCli().start()
if __name__ == '__main__':
main()
|
codelv/enaml-native-cli | enamlnativecli/main.py | Link.link | python | def link(self, path, pkg):
# Check if a custom linker exists to handle linking this package
#for ep in pkg_resources.iter_entry_points(group="enaml_native_linker"):
# if ep.name.replace("-", '_') == pkg.replace("-", '_'):
# linker = ep.load()
# print("Custom linker {} found for '{}'. Linking...".format(
# linker, pkg))
# if linker(self.ctx, path):
# return
#: Use the default builtin linker script
if exists(join(path, pkg, 'build.gradle')):
print(Colors.BLUE+"[INFO] Linking {}/build.gradle".format(
pkg)+Colors.RESET)
self.link_android(path, pkg)
if exists(join(path, pkg, 'Podfile')):
print(Colors.BLUE+"[INFO] Linking {}/Podfile".format(
pkg)+Colors.RESET)
self.link_ios(path, pkg) | Link the package in the current directory. | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L826-L846 | null | class Link(Command):
""" The "Link" command tries to modify the android and ios projects
to include all of the necessary changes for this package to work.
A custom linkiner can be used by adding a "enaml_native_linker"
entry_point which shall be a function that receives the app package.json
(context) an argument.
Example
----------
def linker(ctx):
# Link android and ios projects here
return True #: To tell the cli the linking was handled and should
return
"""
title = set_default("link")
help = set_default("Link an enaml-native package "
"(updates android and ios projects)")
args = set_default([
('names', dict(
help="Package name (optional) If not set links all projects.",
nargs='*')),
])
#: Where "enaml native packages" are installed within the root
package_dir = 'venv'
def run(self, args=None):
print("Linking {}".format(args.names if args and args.names
else "all packages..."))
if args and args.names:
for name in args.names:
self.link(self.package_dir, name)
else:
#: Link everything
for target in ['android', 'iphoneos', 'iphonesimulator']:
sysroot = join(self.package_dir, target)
for path in os.listdir(sysroot):
self.link(sysroot, path)
@staticmethod
def is_settings_linked(source, pkg):
""" Returns true if the "include ':<project>'" line exists in the file
"""
for line in source.split("\n"):
if re.search(r"include\s*['\"]:{}['\"]".format(pkg), line):
return True
return False
@staticmethod
def is_build_linked(source, pkg):
""" Returns true if the "compile project(':<project>')"
line exists exists in the file """
for line in source.split("\n"):
if re.search(r"(api|compile)\s+project\(['\"]:{}['\"]\)".format(pkg),
line):
return True
return False
@staticmethod
def find_packages(path):
""" Find all java files matching the "*Package.java" pattern within
the given enaml package directory relative to the java source path.
"""
matches = []
root = join(path, 'src', 'main', 'java')
for folder, dirnames, filenames in os.walk(root):
for filename in fnmatch.filter(filenames, '*Package.java'):
#: Open and make sure it's an EnamlPackage somewhere
with open(join(folder, filename)) as f:
if "implements EnamlPackage" in f.read():
package = os.path.relpath(folder, root)
matches.append(os.path.join(package, filename))
return matches
@staticmethod
def is_app_linked(source, pkg, java_package):
""" Returns true if the compile project line exists exists in the file
"""
for line in source.split("\n"):
if java_package in line:
return True
return False
def link_android(self, path, pkg):
""" Link's the android project to this library.
1. Includes this project's directory in the app's
android/settings.gradle
It adds:
include ':<project-name>'
project(':<project-name>').projectDir = new File(
rootProject.projectDir, '../packages/<project-name>/android')
2. Add's this project as a dependency to the android/app/build.gradle
It adds:
compile project(':<project-name>')
to the dependencies.
3. If preset, adds the import and package statement
to the android/app/src/main/java/<bundle/id>/MainApplication.java
"""
bundle_id = self.ctx['bundle_id']
pkg_root = join(path, pkg)
#: Check if it's already linked
with open(join('android', 'settings.gradle')) as f:
settings_gradle = f.read()
with open(join('android', 'app', 'build.gradle')) as f:
build_gradle = f.read()
#: Find the MainApplication.java
main_app_java_path = join('android', 'app', 'src', 'main', 'java',
join(*bundle_id.split(".")),
'MainApplication.java')
with open(main_app_java_path) as f:
main_application_java = f.read()
try:
#: Now link all the EnamlPackages we can find in the new "package"
new_packages = Link.find_packages(join(path, pkg))
if not new_packages:
print("[Android] {} No EnamlPackages found to link!".format(
pkg))
return
#: Link settings.gradle
if not Link.is_settings_linked(settings_gradle, pkg):
#: Add two statements
new_settings = settings_gradle.split("\n")
new_settings.append("") # Blank line
new_settings.append("include ':{name}'".format(name=pkg))
new_settings.append("project(':{name}').projectDir = "
"new File(rootProject.projectDir, "
"'../{path}/android/{name}')"
.format(name=pkg, path=self.package_dir))
with open(join('android', 'settings.gradle'), 'w') as f:
f.write("\n".join(new_settings))
print("[Android] {} linked in settings.gradle!".format(pkg))
else:
print("[Android] {} was already linked in "
"settings.gradle!".format(pkg))
#: Link app/build.gradle
if not Link.is_build_linked(build_gradle, pkg):
#: Add two statements
new_build = build_gradle.split("\n")
#: Find correct line number
found = False
for i, line in enumerate(new_build):
if re.match(r"dependencies\s*{", line):
found = True
continue
if found and "}" in line:
#: Hackish way to find line of the closing bracket after
#: the dependencies { block is found
break
if not found:
raise ValueError("Unable to find dependencies in "
"{pkg}/app/build.gradle!".format(pkg=pkg))
#: Insert before the closing bracket
new_build.insert(i, " api project(':{name}')".format(
name=pkg))
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write("\n".join(new_build))
print("[Android] {} linked in app/build.gradle!".format(pkg))
else:
print("[Android] {} was already linked in "
"app/build.gradle!".format(pkg))
new_app_java = []
for package in new_packages:
#: Add our import statement
javacls = os.path.splitext(package)[0].replace("/", ".")
if not Link.is_app_linked(main_application_java, pkg, javacls):
#: Reuse previous if avialable
new_app_java = (new_app_java or
main_application_java.split("\n"))
#: Find last import statement
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line, "import *;"):
j = i
new_app_java.insert(j+1, "import {};".format(javacls))
#: Add the package statement
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line.strip(), "new *Package()"):
j = i
if j == 0:
raise ValueError("Could not find the correct spot to "
"add package {}".format(javacls))
else:
#: Get indent and add to previous line
#: Add comma to previous line
new_app_java[j] = new_app_java[j]+ ","
#: Insert new line
new_app_java.insert(j+1, " new {}()"
.format(javacls.split(".")[-1]))
else:
print("[Android] {} was already linked in {}!".format(
pkg, main_app_java_path))
if new_app_java:
with open(main_app_java_path, 'w') as f:
f.write("\n".join(new_app_java))
print(Colors.GREEN+"[Android] {} linked successfully!".format(
pkg)+Colors.RESET)
except Exception as e:
print(Colors.GREEN+"[Android] {} Failed to link. "
"Reverting due to error: "
"{}".format(pkg, e)+Colors.RESET)
#: Undo any changes
with open(join('android', 'settings.gradle'), 'w') as f:
f.write(settings_gradle)
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write(build_gradle)
with open(main_app_java_path, 'w') as f:
f.write(main_application_java)
#: Now blow up
raise
def link_ios(self, path, pkg):
print("[iOS] Link TODO:...")
|
codelv/enaml-native-cli | enamlnativecli/main.py | Link.is_settings_linked | python | def is_settings_linked(source, pkg):
for line in source.split("\n"):
if re.search(r"include\s*['\"]:{}['\"]".format(pkg), line):
return True
return False | Returns true if the "include ':<project>'" line exists in the file | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L849-L855 | null | class Link(Command):
""" The "Link" command tries to modify the android and ios projects
to include all of the necessary changes for this package to work.
A custom linkiner can be used by adding a "enaml_native_linker"
entry_point which shall be a function that receives the app package.json
(context) an argument.
Example
----------
def linker(ctx):
# Link android and ios projects here
return True #: To tell the cli the linking was handled and should
return
"""
title = set_default("link")
help = set_default("Link an enaml-native package "
"(updates android and ios projects)")
args = set_default([
('names', dict(
help="Package name (optional) If not set links all projects.",
nargs='*')),
])
#: Where "enaml native packages" are installed within the root
package_dir = 'venv'
def run(self, args=None):
print("Linking {}".format(args.names if args and args.names
else "all packages..."))
if args and args.names:
for name in args.names:
self.link(self.package_dir, name)
else:
#: Link everything
for target in ['android', 'iphoneos', 'iphonesimulator']:
sysroot = join(self.package_dir, target)
for path in os.listdir(sysroot):
self.link(sysroot, path)
def link(self, path, pkg):
""" Link the package in the current directory.
"""
# Check if a custom linker exists to handle linking this package
#for ep in pkg_resources.iter_entry_points(group="enaml_native_linker"):
# if ep.name.replace("-", '_') == pkg.replace("-", '_'):
# linker = ep.load()
# print("Custom linker {} found for '{}'. Linking...".format(
# linker, pkg))
# if linker(self.ctx, path):
# return
#: Use the default builtin linker script
if exists(join(path, pkg, 'build.gradle')):
print(Colors.BLUE+"[INFO] Linking {}/build.gradle".format(
pkg)+Colors.RESET)
self.link_android(path, pkg)
if exists(join(path, pkg, 'Podfile')):
print(Colors.BLUE+"[INFO] Linking {}/Podfile".format(
pkg)+Colors.RESET)
self.link_ios(path, pkg)
@staticmethod
@staticmethod
def is_build_linked(source, pkg):
""" Returns true if the "compile project(':<project>')"
line exists exists in the file """
for line in source.split("\n"):
if re.search(r"(api|compile)\s+project\(['\"]:{}['\"]\)".format(pkg),
line):
return True
return False
@staticmethod
def find_packages(path):
""" Find all java files matching the "*Package.java" pattern within
the given enaml package directory relative to the java source path.
"""
matches = []
root = join(path, 'src', 'main', 'java')
for folder, dirnames, filenames in os.walk(root):
for filename in fnmatch.filter(filenames, '*Package.java'):
#: Open and make sure it's an EnamlPackage somewhere
with open(join(folder, filename)) as f:
if "implements EnamlPackage" in f.read():
package = os.path.relpath(folder, root)
matches.append(os.path.join(package, filename))
return matches
@staticmethod
def is_app_linked(source, pkg, java_package):
""" Returns true if the compile project line exists exists in the file
"""
for line in source.split("\n"):
if java_package in line:
return True
return False
def link_android(self, path, pkg):
""" Link's the android project to this library.
1. Includes this project's directory in the app's
android/settings.gradle
It adds:
include ':<project-name>'
project(':<project-name>').projectDir = new File(
rootProject.projectDir, '../packages/<project-name>/android')
2. Add's this project as a dependency to the android/app/build.gradle
It adds:
compile project(':<project-name>')
to the dependencies.
3. If preset, adds the import and package statement
to the android/app/src/main/java/<bundle/id>/MainApplication.java
"""
bundle_id = self.ctx['bundle_id']
pkg_root = join(path, pkg)
#: Check if it's already linked
with open(join('android', 'settings.gradle')) as f:
settings_gradle = f.read()
with open(join('android', 'app', 'build.gradle')) as f:
build_gradle = f.read()
#: Find the MainApplication.java
main_app_java_path = join('android', 'app', 'src', 'main', 'java',
join(*bundle_id.split(".")),
'MainApplication.java')
with open(main_app_java_path) as f:
main_application_java = f.read()
try:
#: Now link all the EnamlPackages we can find in the new "package"
new_packages = Link.find_packages(join(path, pkg))
if not new_packages:
print("[Android] {} No EnamlPackages found to link!".format(
pkg))
return
#: Link settings.gradle
if not Link.is_settings_linked(settings_gradle, pkg):
#: Add two statements
new_settings = settings_gradle.split("\n")
new_settings.append("") # Blank line
new_settings.append("include ':{name}'".format(name=pkg))
new_settings.append("project(':{name}').projectDir = "
"new File(rootProject.projectDir, "
"'../{path}/android/{name}')"
.format(name=pkg, path=self.package_dir))
with open(join('android', 'settings.gradle'), 'w') as f:
f.write("\n".join(new_settings))
print("[Android] {} linked in settings.gradle!".format(pkg))
else:
print("[Android] {} was already linked in "
"settings.gradle!".format(pkg))
#: Link app/build.gradle
if not Link.is_build_linked(build_gradle, pkg):
#: Add two statements
new_build = build_gradle.split("\n")
#: Find correct line number
found = False
for i, line in enumerate(new_build):
if re.match(r"dependencies\s*{", line):
found = True
continue
if found and "}" in line:
#: Hackish way to find line of the closing bracket after
#: the dependencies { block is found
break
if not found:
raise ValueError("Unable to find dependencies in "
"{pkg}/app/build.gradle!".format(pkg=pkg))
#: Insert before the closing bracket
new_build.insert(i, " api project(':{name}')".format(
name=pkg))
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write("\n".join(new_build))
print("[Android] {} linked in app/build.gradle!".format(pkg))
else:
print("[Android] {} was already linked in "
"app/build.gradle!".format(pkg))
new_app_java = []
for package in new_packages:
#: Add our import statement
javacls = os.path.splitext(package)[0].replace("/", ".")
if not Link.is_app_linked(main_application_java, pkg, javacls):
#: Reuse previous if avialable
new_app_java = (new_app_java or
main_application_java.split("\n"))
#: Find last import statement
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line, "import *;"):
j = i
new_app_java.insert(j+1, "import {};".format(javacls))
#: Add the package statement
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line.strip(), "new *Package()"):
j = i
if j == 0:
raise ValueError("Could not find the correct spot to "
"add package {}".format(javacls))
else:
#: Get indent and add to previous line
#: Add comma to previous line
new_app_java[j] = new_app_java[j]+ ","
#: Insert new line
new_app_java.insert(j+1, " new {}()"
.format(javacls.split(".")[-1]))
else:
print("[Android] {} was already linked in {}!".format(
pkg, main_app_java_path))
if new_app_java:
with open(main_app_java_path, 'w') as f:
f.write("\n".join(new_app_java))
print(Colors.GREEN+"[Android] {} linked successfully!".format(
pkg)+Colors.RESET)
except Exception as e:
print(Colors.GREEN+"[Android] {} Failed to link. "
"Reverting due to error: "
"{}".format(pkg, e)+Colors.RESET)
#: Undo any changes
with open(join('android', 'settings.gradle'), 'w') as f:
f.write(settings_gradle)
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write(build_gradle)
with open(main_app_java_path, 'w') as f:
f.write(main_application_java)
#: Now blow up
raise
def link_ios(self, path, pkg):
print("[iOS] Link TODO:...")
|
codelv/enaml-native-cli | enamlnativecli/main.py | Link.is_build_linked | python | def is_build_linked(source, pkg):
for line in source.split("\n"):
if re.search(r"(api|compile)\s+project\(['\"]:{}['\"]\)".format(pkg),
line):
return True
return False | Returns true if the "compile project(':<project>')"
line exists exists in the file | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L858-L865 | null | class Link(Command):
""" The "Link" command tries to modify the android and ios projects
to include all of the necessary changes for this package to work.
A custom linkiner can be used by adding a "enaml_native_linker"
entry_point which shall be a function that receives the app package.json
(context) an argument.
Example
----------
def linker(ctx):
# Link android and ios projects here
return True #: To tell the cli the linking was handled and should
return
"""
title = set_default("link")
help = set_default("Link an enaml-native package "
"(updates android and ios projects)")
args = set_default([
('names', dict(
help="Package name (optional) If not set links all projects.",
nargs='*')),
])
#: Where "enaml native packages" are installed within the root
package_dir = 'venv'
def run(self, args=None):
print("Linking {}".format(args.names if args and args.names
else "all packages..."))
if args and args.names:
for name in args.names:
self.link(self.package_dir, name)
else:
#: Link everything
for target in ['android', 'iphoneos', 'iphonesimulator']:
sysroot = join(self.package_dir, target)
for path in os.listdir(sysroot):
self.link(sysroot, path)
def link(self, path, pkg):
""" Link the package in the current directory.
"""
# Check if a custom linker exists to handle linking this package
#for ep in pkg_resources.iter_entry_points(group="enaml_native_linker"):
# if ep.name.replace("-", '_') == pkg.replace("-", '_'):
# linker = ep.load()
# print("Custom linker {} found for '{}'. Linking...".format(
# linker, pkg))
# if linker(self.ctx, path):
# return
#: Use the default builtin linker script
if exists(join(path, pkg, 'build.gradle')):
print(Colors.BLUE+"[INFO] Linking {}/build.gradle".format(
pkg)+Colors.RESET)
self.link_android(path, pkg)
if exists(join(path, pkg, 'Podfile')):
print(Colors.BLUE+"[INFO] Linking {}/Podfile".format(
pkg)+Colors.RESET)
self.link_ios(path, pkg)
@staticmethod
def is_settings_linked(source, pkg):
""" Returns true if the "include ':<project>'" line exists in the file
"""
for line in source.split("\n"):
if re.search(r"include\s*['\"]:{}['\"]".format(pkg), line):
return True
return False
@staticmethod
@staticmethod
def find_packages(path):
""" Find all java files matching the "*Package.java" pattern within
the given enaml package directory relative to the java source path.
"""
matches = []
root = join(path, 'src', 'main', 'java')
for folder, dirnames, filenames in os.walk(root):
for filename in fnmatch.filter(filenames, '*Package.java'):
#: Open and make sure it's an EnamlPackage somewhere
with open(join(folder, filename)) as f:
if "implements EnamlPackage" in f.read():
package = os.path.relpath(folder, root)
matches.append(os.path.join(package, filename))
return matches
@staticmethod
def is_app_linked(source, pkg, java_package):
""" Returns true if the compile project line exists exists in the file
"""
for line in source.split("\n"):
if java_package in line:
return True
return False
def link_android(self, path, pkg):
""" Link's the android project to this library.
1. Includes this project's directory in the app's
android/settings.gradle
It adds:
include ':<project-name>'
project(':<project-name>').projectDir = new File(
rootProject.projectDir, '../packages/<project-name>/android')
2. Add's this project as a dependency to the android/app/build.gradle
It adds:
compile project(':<project-name>')
to the dependencies.
3. If preset, adds the import and package statement
to the android/app/src/main/java/<bundle/id>/MainApplication.java
"""
bundle_id = self.ctx['bundle_id']
pkg_root = join(path, pkg)
#: Check if it's already linked
with open(join('android', 'settings.gradle')) as f:
settings_gradle = f.read()
with open(join('android', 'app', 'build.gradle')) as f:
build_gradle = f.read()
#: Find the MainApplication.java
main_app_java_path = join('android', 'app', 'src', 'main', 'java',
join(*bundle_id.split(".")),
'MainApplication.java')
with open(main_app_java_path) as f:
main_application_java = f.read()
try:
#: Now link all the EnamlPackages we can find in the new "package"
new_packages = Link.find_packages(join(path, pkg))
if not new_packages:
print("[Android] {} No EnamlPackages found to link!".format(
pkg))
return
#: Link settings.gradle
if not Link.is_settings_linked(settings_gradle, pkg):
#: Add two statements
new_settings = settings_gradle.split("\n")
new_settings.append("") # Blank line
new_settings.append("include ':{name}'".format(name=pkg))
new_settings.append("project(':{name}').projectDir = "
"new File(rootProject.projectDir, "
"'../{path}/android/{name}')"
.format(name=pkg, path=self.package_dir))
with open(join('android', 'settings.gradle'), 'w') as f:
f.write("\n".join(new_settings))
print("[Android] {} linked in settings.gradle!".format(pkg))
else:
print("[Android] {} was already linked in "
"settings.gradle!".format(pkg))
#: Link app/build.gradle
if not Link.is_build_linked(build_gradle, pkg):
#: Add two statements
new_build = build_gradle.split("\n")
#: Find correct line number
found = False
for i, line in enumerate(new_build):
if re.match(r"dependencies\s*{", line):
found = True
continue
if found and "}" in line:
#: Hackish way to find line of the closing bracket after
#: the dependencies { block is found
break
if not found:
raise ValueError("Unable to find dependencies in "
"{pkg}/app/build.gradle!".format(pkg=pkg))
#: Insert before the closing bracket
new_build.insert(i, " api project(':{name}')".format(
name=pkg))
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write("\n".join(new_build))
print("[Android] {} linked in app/build.gradle!".format(pkg))
else:
print("[Android] {} was already linked in "
"app/build.gradle!".format(pkg))
new_app_java = []
for package in new_packages:
#: Add our import statement
javacls = os.path.splitext(package)[0].replace("/", ".")
if not Link.is_app_linked(main_application_java, pkg, javacls):
#: Reuse previous if avialable
new_app_java = (new_app_java or
main_application_java.split("\n"))
#: Find last import statement
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line, "import *;"):
j = i
new_app_java.insert(j+1, "import {};".format(javacls))
#: Add the package statement
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line.strip(), "new *Package()"):
j = i
if j == 0:
raise ValueError("Could not find the correct spot to "
"add package {}".format(javacls))
else:
#: Get indent and add to previous line
#: Add comma to previous line
new_app_java[j] = new_app_java[j]+ ","
#: Insert new line
new_app_java.insert(j+1, " new {}()"
.format(javacls.split(".")[-1]))
else:
print("[Android] {} was already linked in {}!".format(
pkg, main_app_java_path))
if new_app_java:
with open(main_app_java_path, 'w') as f:
f.write("\n".join(new_app_java))
print(Colors.GREEN+"[Android] {} linked successfully!".format(
pkg)+Colors.RESET)
except Exception as e:
print(Colors.GREEN+"[Android] {} Failed to link. "
"Reverting due to error: "
"{}".format(pkg, e)+Colors.RESET)
#: Undo any changes
with open(join('android', 'settings.gradle'), 'w') as f:
f.write(settings_gradle)
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write(build_gradle)
with open(main_app_java_path, 'w') as f:
f.write(main_application_java)
#: Now blow up
raise
def link_ios(self, path, pkg):
print("[iOS] Link TODO:...")
|
codelv/enaml-native-cli | enamlnativecli/main.py | Link.find_packages | python | def find_packages(path):
matches = []
root = join(path, 'src', 'main', 'java')
for folder, dirnames, filenames in os.walk(root):
for filename in fnmatch.filter(filenames, '*Package.java'):
#: Open and make sure it's an EnamlPackage somewhere
with open(join(folder, filename)) as f:
if "implements EnamlPackage" in f.read():
package = os.path.relpath(folder, root)
matches.append(os.path.join(package, filename))
return matches | Find all java files matching the "*Package.java" pattern within
the given enaml package directory relative to the java source path. | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L868-L881 | null | class Link(Command):
""" The "Link" command tries to modify the android and ios projects
to include all of the necessary changes for this package to work.
A custom linkiner can be used by adding a "enaml_native_linker"
entry_point which shall be a function that receives the app package.json
(context) an argument.
Example
----------
def linker(ctx):
# Link android and ios projects here
return True #: To tell the cli the linking was handled and should
return
"""
title = set_default("link")
help = set_default("Link an enaml-native package "
"(updates android and ios projects)")
args = set_default([
('names', dict(
help="Package name (optional) If not set links all projects.",
nargs='*')),
])
#: Where "enaml native packages" are installed within the root
package_dir = 'venv'
def run(self, args=None):
print("Linking {}".format(args.names if args and args.names
else "all packages..."))
if args and args.names:
for name in args.names:
self.link(self.package_dir, name)
else:
#: Link everything
for target in ['android', 'iphoneos', 'iphonesimulator']:
sysroot = join(self.package_dir, target)
for path in os.listdir(sysroot):
self.link(sysroot, path)
def link(self, path, pkg):
""" Link the package in the current directory.
"""
# Check if a custom linker exists to handle linking this package
#for ep in pkg_resources.iter_entry_points(group="enaml_native_linker"):
# if ep.name.replace("-", '_') == pkg.replace("-", '_'):
# linker = ep.load()
# print("Custom linker {} found for '{}'. Linking...".format(
# linker, pkg))
# if linker(self.ctx, path):
# return
#: Use the default builtin linker script
if exists(join(path, pkg, 'build.gradle')):
print(Colors.BLUE+"[INFO] Linking {}/build.gradle".format(
pkg)+Colors.RESET)
self.link_android(path, pkg)
if exists(join(path, pkg, 'Podfile')):
print(Colors.BLUE+"[INFO] Linking {}/Podfile".format(
pkg)+Colors.RESET)
self.link_ios(path, pkg)
@staticmethod
def is_settings_linked(source, pkg):
""" Returns true if the "include ':<project>'" line exists in the file
"""
for line in source.split("\n"):
if re.search(r"include\s*['\"]:{}['\"]".format(pkg), line):
return True
return False
@staticmethod
def is_build_linked(source, pkg):
""" Returns true if the "compile project(':<project>')"
line exists exists in the file """
for line in source.split("\n"):
if re.search(r"(api|compile)\s+project\(['\"]:{}['\"]\)".format(pkg),
line):
return True
return False
@staticmethod
@staticmethod
def is_app_linked(source, pkg, java_package):
""" Returns true if the compile project line exists exists in the file
"""
for line in source.split("\n"):
if java_package in line:
return True
return False
def link_android(self, path, pkg):
""" Link's the android project to this library.
1. Includes this project's directory in the app's
android/settings.gradle
It adds:
include ':<project-name>'
project(':<project-name>').projectDir = new File(
rootProject.projectDir, '../packages/<project-name>/android')
2. Add's this project as a dependency to the android/app/build.gradle
It adds:
compile project(':<project-name>')
to the dependencies.
3. If preset, adds the import and package statement
to the android/app/src/main/java/<bundle/id>/MainApplication.java
"""
bundle_id = self.ctx['bundle_id']
pkg_root = join(path, pkg)
#: Check if it's already linked
with open(join('android', 'settings.gradle')) as f:
settings_gradle = f.read()
with open(join('android', 'app', 'build.gradle')) as f:
build_gradle = f.read()
#: Find the MainApplication.java
main_app_java_path = join('android', 'app', 'src', 'main', 'java',
join(*bundle_id.split(".")),
'MainApplication.java')
with open(main_app_java_path) as f:
main_application_java = f.read()
try:
#: Now link all the EnamlPackages we can find in the new "package"
new_packages = Link.find_packages(join(path, pkg))
if not new_packages:
print("[Android] {} No EnamlPackages found to link!".format(
pkg))
return
#: Link settings.gradle
if not Link.is_settings_linked(settings_gradle, pkg):
#: Add two statements
new_settings = settings_gradle.split("\n")
new_settings.append("") # Blank line
new_settings.append("include ':{name}'".format(name=pkg))
new_settings.append("project(':{name}').projectDir = "
"new File(rootProject.projectDir, "
"'../{path}/android/{name}')"
.format(name=pkg, path=self.package_dir))
with open(join('android', 'settings.gradle'), 'w') as f:
f.write("\n".join(new_settings))
print("[Android] {} linked in settings.gradle!".format(pkg))
else:
print("[Android] {} was already linked in "
"settings.gradle!".format(pkg))
#: Link app/build.gradle
if not Link.is_build_linked(build_gradle, pkg):
#: Add two statements
new_build = build_gradle.split("\n")
#: Find correct line number
found = False
for i, line in enumerate(new_build):
if re.match(r"dependencies\s*{", line):
found = True
continue
if found and "}" in line:
#: Hackish way to find line of the closing bracket after
#: the dependencies { block is found
break
if not found:
raise ValueError("Unable to find dependencies in "
"{pkg}/app/build.gradle!".format(pkg=pkg))
#: Insert before the closing bracket
new_build.insert(i, " api project(':{name}')".format(
name=pkg))
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write("\n".join(new_build))
print("[Android] {} linked in app/build.gradle!".format(pkg))
else:
print("[Android] {} was already linked in "
"app/build.gradle!".format(pkg))
new_app_java = []
for package in new_packages:
#: Add our import statement
javacls = os.path.splitext(package)[0].replace("/", ".")
if not Link.is_app_linked(main_application_java, pkg, javacls):
#: Reuse previous if avialable
new_app_java = (new_app_java or
main_application_java.split("\n"))
#: Find last import statement
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line, "import *;"):
j = i
new_app_java.insert(j+1, "import {};".format(javacls))
#: Add the package statement
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line.strip(), "new *Package()"):
j = i
if j == 0:
raise ValueError("Could not find the correct spot to "
"add package {}".format(javacls))
else:
#: Get indent and add to previous line
#: Add comma to previous line
new_app_java[j] = new_app_java[j]+ ","
#: Insert new line
new_app_java.insert(j+1, " new {}()"
.format(javacls.split(".")[-1]))
else:
print("[Android] {} was already linked in {}!".format(
pkg, main_app_java_path))
if new_app_java:
with open(main_app_java_path, 'w') as f:
f.write("\n".join(new_app_java))
print(Colors.GREEN+"[Android] {} linked successfully!".format(
pkg)+Colors.RESET)
except Exception as e:
print(Colors.GREEN+"[Android] {} Failed to link. "
"Reverting due to error: "
"{}".format(pkg, e)+Colors.RESET)
#: Undo any changes
with open(join('android', 'settings.gradle'), 'w') as f:
f.write(settings_gradle)
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write(build_gradle)
with open(main_app_java_path, 'w') as f:
f.write(main_application_java)
#: Now blow up
raise
def link_ios(self, path, pkg):
print("[iOS] Link TODO:...")
|
codelv/enaml-native-cli | enamlnativecli/main.py | Link.is_app_linked | python | def is_app_linked(source, pkg, java_package):
for line in source.split("\n"):
if java_package in line:
return True
return False | Returns true if the compile project line exists exists in the file | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L884-L891 | null | class Link(Command):
""" The "Link" command tries to modify the android and ios projects
to include all of the necessary changes for this package to work.
A custom linkiner can be used by adding a "enaml_native_linker"
entry_point which shall be a function that receives the app package.json
(context) an argument.
Example
----------
def linker(ctx):
# Link android and ios projects here
return True #: To tell the cli the linking was handled and should
return
"""
title = set_default("link")
help = set_default("Link an enaml-native package "
"(updates android and ios projects)")
args = set_default([
('names', dict(
help="Package name (optional) If not set links all projects.",
nargs='*')),
])
#: Where "enaml native packages" are installed within the root
package_dir = 'venv'
def run(self, args=None):
print("Linking {}".format(args.names if args and args.names
else "all packages..."))
if args and args.names:
for name in args.names:
self.link(self.package_dir, name)
else:
#: Link everything
for target in ['android', 'iphoneos', 'iphonesimulator']:
sysroot = join(self.package_dir, target)
for path in os.listdir(sysroot):
self.link(sysroot, path)
def link(self, path, pkg):
""" Link the package in the current directory.
"""
# Check if a custom linker exists to handle linking this package
#for ep in pkg_resources.iter_entry_points(group="enaml_native_linker"):
# if ep.name.replace("-", '_') == pkg.replace("-", '_'):
# linker = ep.load()
# print("Custom linker {} found for '{}'. Linking...".format(
# linker, pkg))
# if linker(self.ctx, path):
# return
#: Use the default builtin linker script
if exists(join(path, pkg, 'build.gradle')):
print(Colors.BLUE+"[INFO] Linking {}/build.gradle".format(
pkg)+Colors.RESET)
self.link_android(path, pkg)
if exists(join(path, pkg, 'Podfile')):
print(Colors.BLUE+"[INFO] Linking {}/Podfile".format(
pkg)+Colors.RESET)
self.link_ios(path, pkg)
@staticmethod
def is_settings_linked(source, pkg):
""" Returns true if the "include ':<project>'" line exists in the file
"""
for line in source.split("\n"):
if re.search(r"include\s*['\"]:{}['\"]".format(pkg), line):
return True
return False
@staticmethod
def is_build_linked(source, pkg):
""" Returns true if the "compile project(':<project>')"
line exists exists in the file """
for line in source.split("\n"):
if re.search(r"(api|compile)\s+project\(['\"]:{}['\"]\)".format(pkg),
line):
return True
return False
@staticmethod
def find_packages(path):
""" Find all java files matching the "*Package.java" pattern within
the given enaml package directory relative to the java source path.
"""
matches = []
root = join(path, 'src', 'main', 'java')
for folder, dirnames, filenames in os.walk(root):
for filename in fnmatch.filter(filenames, '*Package.java'):
#: Open and make sure it's an EnamlPackage somewhere
with open(join(folder, filename)) as f:
if "implements EnamlPackage" in f.read():
package = os.path.relpath(folder, root)
matches.append(os.path.join(package, filename))
return matches
@staticmethod
def is_app_linked(source, pkg, java_package):
""" Returns true if the compile project line exists exists in the file
"""
for line in source.split("\n"):
if java_package in line:
return True
return False
def link_android(self, path, pkg):
""" Link's the android project to this library.
1. Includes this project's directory in the app's
android/settings.gradle
It adds:
include ':<project-name>'
project(':<project-name>').projectDir = new File(
rootProject.projectDir, '../packages/<project-name>/android')
2. Add's this project as a dependency to the android/app/build.gradle
It adds:
compile project(':<project-name>')
to the dependencies.
3. If preset, adds the import and package statement
to the android/app/src/main/java/<bundle/id>/MainApplication.java
"""
bundle_id = self.ctx['bundle_id']
pkg_root = join(path, pkg)
#: Check if it's already linked
with open(join('android', 'settings.gradle')) as f:
settings_gradle = f.read()
with open(join('android', 'app', 'build.gradle')) as f:
build_gradle = f.read()
#: Find the MainApplication.java
main_app_java_path = join('android', 'app', 'src', 'main', 'java',
join(*bundle_id.split(".")),
'MainApplication.java')
with open(main_app_java_path) as f:
main_application_java = f.read()
try:
#: Now link all the EnamlPackages we can find in the new "package"
new_packages = Link.find_packages(join(path, pkg))
if not new_packages:
print("[Android] {} No EnamlPackages found to link!".format(
pkg))
return
#: Link settings.gradle
if not Link.is_settings_linked(settings_gradle, pkg):
#: Add two statements
new_settings = settings_gradle.split("\n")
new_settings.append("") # Blank line
new_settings.append("include ':{name}'".format(name=pkg))
new_settings.append("project(':{name}').projectDir = "
"new File(rootProject.projectDir, "
"'../{path}/android/{name}')"
.format(name=pkg, path=self.package_dir))
with open(join('android', 'settings.gradle'), 'w') as f:
f.write("\n".join(new_settings))
print("[Android] {} linked in settings.gradle!".format(pkg))
else:
print("[Android] {} was already linked in "
"settings.gradle!".format(pkg))
#: Link app/build.gradle
if not Link.is_build_linked(build_gradle, pkg):
#: Add two statements
new_build = build_gradle.split("\n")
#: Find correct line number
found = False
for i, line in enumerate(new_build):
if re.match(r"dependencies\s*{", line):
found = True
continue
if found and "}" in line:
#: Hackish way to find line of the closing bracket after
#: the dependencies { block is found
break
if not found:
raise ValueError("Unable to find dependencies in "
"{pkg}/app/build.gradle!".format(pkg=pkg))
#: Insert before the closing bracket
new_build.insert(i, " api project(':{name}')".format(
name=pkg))
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write("\n".join(new_build))
print("[Android] {} linked in app/build.gradle!".format(pkg))
else:
print("[Android] {} was already linked in "
"app/build.gradle!".format(pkg))
new_app_java = []
for package in new_packages:
#: Add our import statement
javacls = os.path.splitext(package)[0].replace("/", ".")
if not Link.is_app_linked(main_application_java, pkg, javacls):
#: Reuse previous if avialable
new_app_java = (new_app_java or
main_application_java.split("\n"))
#: Find last import statement
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line, "import *;"):
j = i
new_app_java.insert(j+1, "import {};".format(javacls))
#: Add the package statement
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line.strip(), "new *Package()"):
j = i
if j == 0:
raise ValueError("Could not find the correct spot to "
"add package {}".format(javacls))
else:
#: Get indent and add to previous line
#: Add comma to previous line
new_app_java[j] = new_app_java[j]+ ","
#: Insert new line
new_app_java.insert(j+1, " new {}()"
.format(javacls.split(".")[-1]))
else:
print("[Android] {} was already linked in {}!".format(
pkg, main_app_java_path))
if new_app_java:
with open(main_app_java_path, 'w') as f:
f.write("\n".join(new_app_java))
print(Colors.GREEN+"[Android] {} linked successfully!".format(
pkg)+Colors.RESET)
except Exception as e:
print(Colors.GREEN+"[Android] {} Failed to link. "
"Reverting due to error: "
"{}".format(pkg, e)+Colors.RESET)
#: Undo any changes
with open(join('android', 'settings.gradle'), 'w') as f:
f.write(settings_gradle)
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write(build_gradle)
with open(main_app_java_path, 'w') as f:
f.write(main_application_java)
#: Now blow up
raise
def link_ios(self, path, pkg):
print("[iOS] Link TODO:...")
|
codelv/enaml-native-cli | enamlnativecli/main.py | Link.link_android | python | def link_android(self, path, pkg):
bundle_id = self.ctx['bundle_id']
pkg_root = join(path, pkg)
#: Check if it's already linked
with open(join('android', 'settings.gradle')) as f:
settings_gradle = f.read()
with open(join('android', 'app', 'build.gradle')) as f:
build_gradle = f.read()
#: Find the MainApplication.java
main_app_java_path = join('android', 'app', 'src', 'main', 'java',
join(*bundle_id.split(".")),
'MainApplication.java')
with open(main_app_java_path) as f:
main_application_java = f.read()
try:
#: Now link all the EnamlPackages we can find in the new "package"
new_packages = Link.find_packages(join(path, pkg))
if not new_packages:
print("[Android] {} No EnamlPackages found to link!".format(
pkg))
return
#: Link settings.gradle
if not Link.is_settings_linked(settings_gradle, pkg):
#: Add two statements
new_settings = settings_gradle.split("\n")
new_settings.append("") # Blank line
new_settings.append("include ':{name}'".format(name=pkg))
new_settings.append("project(':{name}').projectDir = "
"new File(rootProject.projectDir, "
"'../{path}/android/{name}')"
.format(name=pkg, path=self.package_dir))
with open(join('android', 'settings.gradle'), 'w') as f:
f.write("\n".join(new_settings))
print("[Android] {} linked in settings.gradle!".format(pkg))
else:
print("[Android] {} was already linked in "
"settings.gradle!".format(pkg))
#: Link app/build.gradle
if not Link.is_build_linked(build_gradle, pkg):
#: Add two statements
new_build = build_gradle.split("\n")
#: Find correct line number
found = False
for i, line in enumerate(new_build):
if re.match(r"dependencies\s*{", line):
found = True
continue
if found and "}" in line:
#: Hackish way to find line of the closing bracket after
#: the dependencies { block is found
break
if not found:
raise ValueError("Unable to find dependencies in "
"{pkg}/app/build.gradle!".format(pkg=pkg))
#: Insert before the closing bracket
new_build.insert(i, " api project(':{name}')".format(
name=pkg))
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write("\n".join(new_build))
print("[Android] {} linked in app/build.gradle!".format(pkg))
else:
print("[Android] {} was already linked in "
"app/build.gradle!".format(pkg))
new_app_java = []
for package in new_packages:
#: Add our import statement
javacls = os.path.splitext(package)[0].replace("/", ".")
if not Link.is_app_linked(main_application_java, pkg, javacls):
#: Reuse previous if avialable
new_app_java = (new_app_java or
main_application_java.split("\n"))
#: Find last import statement
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line, "import *;"):
j = i
new_app_java.insert(j+1, "import {};".format(javacls))
#: Add the package statement
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line.strip(), "new *Package()"):
j = i
if j == 0:
raise ValueError("Could not find the correct spot to "
"add package {}".format(javacls))
else:
#: Get indent and add to previous line
#: Add comma to previous line
new_app_java[j] = new_app_java[j]+ ","
#: Insert new line
new_app_java.insert(j+1, " new {}()"
.format(javacls.split(".")[-1]))
else:
print("[Android] {} was already linked in {}!".format(
pkg, main_app_java_path))
if new_app_java:
with open(main_app_java_path, 'w') as f:
f.write("\n".join(new_app_java))
print(Colors.GREEN+"[Android] {} linked successfully!".format(
pkg)+Colors.RESET)
except Exception as e:
print(Colors.GREEN+"[Android] {} Failed to link. "
"Reverting due to error: "
"{}".format(pkg, e)+Colors.RESET)
#: Undo any changes
with open(join('android', 'settings.gradle'), 'w') as f:
f.write(settings_gradle)
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write(build_gradle)
with open(main_app_java_path, 'w') as f:
f.write(main_application_java)
#: Now blow up
raise | Link's the android project to this library.
1. Includes this project's directory in the app's
android/settings.gradle
It adds:
include ':<project-name>'
project(':<project-name>').projectDir = new File(
rootProject.projectDir, '../packages/<project-name>/android')
2. Add's this project as a dependency to the android/app/build.gradle
It adds:
compile project(':<project-name>')
to the dependencies.
3. If preset, adds the import and package statement
to the android/app/src/main/java/<bundle/id>/MainApplication.java | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L893-L1044 | null | class Link(Command):
""" The "Link" command tries to modify the android and ios projects
to include all of the necessary changes for this package to work.
A custom linkiner can be used by adding a "enaml_native_linker"
entry_point which shall be a function that receives the app package.json
(context) an argument.
Example
----------
def linker(ctx):
# Link android and ios projects here
return True #: To tell the cli the linking was handled and should
return
"""
title = set_default("link")
help = set_default("Link an enaml-native package "
"(updates android and ios projects)")
args = set_default([
('names', dict(
help="Package name (optional) If not set links all projects.",
nargs='*')),
])
#: Where "enaml native packages" are installed within the root
package_dir = 'venv'
def run(self, args=None):
print("Linking {}".format(args.names if args and args.names
else "all packages..."))
if args and args.names:
for name in args.names:
self.link(self.package_dir, name)
else:
#: Link everything
for target in ['android', 'iphoneos', 'iphonesimulator']:
sysroot = join(self.package_dir, target)
for path in os.listdir(sysroot):
self.link(sysroot, path)
def link(self, path, pkg):
""" Link the package in the current directory.
"""
# Check if a custom linker exists to handle linking this package
#for ep in pkg_resources.iter_entry_points(group="enaml_native_linker"):
# if ep.name.replace("-", '_') == pkg.replace("-", '_'):
# linker = ep.load()
# print("Custom linker {} found for '{}'. Linking...".format(
# linker, pkg))
# if linker(self.ctx, path):
# return
#: Use the default builtin linker script
if exists(join(path, pkg, 'build.gradle')):
print(Colors.BLUE+"[INFO] Linking {}/build.gradle".format(
pkg)+Colors.RESET)
self.link_android(path, pkg)
if exists(join(path, pkg, 'Podfile')):
print(Colors.BLUE+"[INFO] Linking {}/Podfile".format(
pkg)+Colors.RESET)
self.link_ios(path, pkg)
@staticmethod
def is_settings_linked(source, pkg):
""" Returns true if the "include ':<project>'" line exists in the file
"""
for line in source.split("\n"):
if re.search(r"include\s*['\"]:{}['\"]".format(pkg), line):
return True
return False
@staticmethod
def is_build_linked(source, pkg):
""" Returns true if the "compile project(':<project>')"
line exists exists in the file """
for line in source.split("\n"):
if re.search(r"(api|compile)\s+project\(['\"]:{}['\"]\)".format(pkg),
line):
return True
return False
@staticmethod
def find_packages(path):
""" Find all java files matching the "*Package.java" pattern within
the given enaml package directory relative to the java source path.
"""
matches = []
root = join(path, 'src', 'main', 'java')
for folder, dirnames, filenames in os.walk(root):
for filename in fnmatch.filter(filenames, '*Package.java'):
#: Open and make sure it's an EnamlPackage somewhere
with open(join(folder, filename)) as f:
if "implements EnamlPackage" in f.read():
package = os.path.relpath(folder, root)
matches.append(os.path.join(package, filename))
return matches
@staticmethod
def is_app_linked(source, pkg, java_package):
""" Returns true if the compile project line exists exists in the file
"""
for line in source.split("\n"):
if java_package in line:
return True
return False
def link_ios(self, path, pkg):
print("[iOS] Link TODO:...")
|
codelv/enaml-native-cli | enamlnativecli/main.py | Unlink.run | python | def run(self, args=None):
print(Colors.BLUE+"[INFO] Unlinking {}...".format(
args.names)+Colors.RESET)
for name in args.names:
self.unlink(Link.package_dir, name) | The name IS required here. | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1073-L1078 | null | class Unlink(Command):
""" The "Unlink" command tries to undo the modifications done by the
linker..
A custom unlinkiner can be used by adding a "enaml_native_unlinker"
entry_point which shall be a function that receives the app
package.json (context) an argument.
Example
----------
def unlinker(ctx):
# Unlink android and ios projects here
return True #: To tell the cli the unlinking was handled and
should return
"""
title = set_default("unlink")
help = set_default("Unlink an enaml-native package")
args = set_default([
('names', dict(help="Package name", nargs="+")),
])
def unlink(self, path, pkg):
""" Unlink the package in the current directory.
"""
#: Check if a custom unlinker exists to handle unlinking this package
for ep in pkg_resources.iter_entry_points(
group="enaml_native_unlinker"):
if ep.name.replace("-", '_') == pkg.replace("-", '_'):
unlinker = ep.load()
print("Custom unlinker {} found for '{}'. "
"Unlinking...".format(unlinker, pkg))
if unlinker(self.ctx, path):
return
if exists(join(path, 'android', pkg, 'build.gradle')):
print("[Android] unlinking {}".format(pkg))
self.unlink_android(path, pkg)
for target in ['iphoneos', 'iphonesimulator']:
if exists(join(path, target, pkg, 'Podfile')):
print("[iOS] unlinking {}".format(pkg))
self.unlink_ios(path, pkg)
def unlink_android(self, path, pkg):
""" Unlink's the android project to this library.
1. In the app's android/settings.gradle, it removes the following
lines (if they exist):
include ':<project-name>'
project(':<project-name>').projectDir = new File(
rootProject.projectDir,
'../venv/packages/<project-name>/android')
2. In the app's android/app/build.gradle, it removes the following
line (if present)
compile project(':<project-name>')
3. In the app's
android/app/src/main/java/<bundle/id>/MainApplication.java,
it removes:
import <package>.<Name>Package;
new <Name>Package(),
If no comma exists it will remove the comma from the previous
line.
"""
bundle_id = self.ctx['bundle_id']
#: Check if it's already linked
with open(join('android', 'settings.gradle')) as f:
settings_gradle = f.read()
with open(join('android', 'app', 'build.gradle')) as f:
build_gradle = f.read()
#: Find the MainApplication.java
main_app_java_path = join('android', 'app', 'src', 'main', 'java',
join(*bundle_id.split(".")),
'MainApplication.java')
with open(main_app_java_path) as f:
main_application_java = f.read()
try:
#: Now link all the EnamlPackages we can find in the new "package"
new_packages = Link.find_packages(join(path, 'android', pkg))
if not new_packages:
print(Colors.RED+"\t[Android] {} No EnamlPackages found to "
"unlink!".format(pkg)+Colors.RESET)
return
#: Unlink settings.gradle
if Link.is_settings_linked(settings_gradle, pkg):
#: Remove the two statements
new_settings = [
line for line in settings_gradle.split("\n")
if line.strip() not in [
"include ':{name}'".format(name=pkg),
"project(':{name}').projectDir = "
"new File(rootProject.projectDir, "
"'../{path}/android/{name}')".format(path=path,
name=pkg)
]
]
with open(join('android', 'settings.gradle'), 'w') as f:
f.write("\n".join(new_settings))
print("\t[Android] {} unlinked settings.gradle!".format(pkg))
else:
print("\t[Android] {} was not linked in "
"settings.gradle!".format(pkg))
#: Unlink app/build.gradle
if Link.is_build_linked(build_gradle, pkg):
#: Add two statements
new_build = [
line for line in build_gradle.split("\n")
if line.strip() not in [
"compile project(':{name}')".format(name=pkg),
"api project(':{name}')".format(name=pkg),
]
]
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write("\n".join(new_build))
print("\t[Android] {} unlinked in "
"app/build.gradle!".format(pkg))
else:
print("\t[Android] {} was not linked in "
"app/build.gradle!".format(pkg))
new_app_java = []
for package in new_packages:
#: Add our import statement
javacls = os.path.splitext(package)[0].replace("/", ".")
if Link.is_app_linked(main_application_java, pkg, javacls):
#: Reuse previous if avialable
new_app_java = (new_app_java or
main_application_java.split("\n"))
new_app_java = [
line for line in new_app_java
if line.strip() not in [
"import {};".format(javacls),
"new {}()".format(javacls.split(".")[-1]),
"new {}(),".format(javacls.split(".")[-1]),
]
]
#: Now find the last package and remove the comma if it
#: exists
found = False
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line.strip(), "new *Package()"):
found = True
elif fnmatch.fnmatch(line.strip(), "new *Package(),"):
j = i
#: We removed the last package so add a comma
if not found:
#: This kills any whitespace...
new_app_java[j] = new_app_java[j][
:new_app_java[j].rfind(',')]
else:
print("\t[Android] {} was not linked in {}!".format(
pkg, main_app_java_path))
if new_app_java:
with open(main_app_java_path, 'w') as f:
f.write("\n".join(new_app_java))
print(Colors.GREEN+"\t[Android] {} unlinked successfully!".format(
pkg)+Colors.RESET)
except Exception as e:
print(Colors.RED+"\t[Android] {} Failed to unlink. "
"Reverting due to error: {}".format(pkg, e)+Colors.RESET)
#: Undo any changes
with open(join('android', 'settings.gradle'), 'w') as f:
f.write(settings_gradle)
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write(build_gradle)
with open(main_app_java_path, 'w') as f:
f.write(main_application_java)
#: Now blow up
raise
|
codelv/enaml-native-cli | enamlnativecli/main.py | Unlink.unlink | python | def unlink(self, path, pkg):
#: Check if a custom unlinker exists to handle unlinking this package
for ep in pkg_resources.iter_entry_points(
group="enaml_native_unlinker"):
if ep.name.replace("-", '_') == pkg.replace("-", '_'):
unlinker = ep.load()
print("Custom unlinker {} found for '{}'. "
"Unlinking...".format(unlinker, pkg))
if unlinker(self.ctx, path):
return
if exists(join(path, 'android', pkg, 'build.gradle')):
print("[Android] unlinking {}".format(pkg))
self.unlink_android(path, pkg)
for target in ['iphoneos', 'iphonesimulator']:
if exists(join(path, target, pkg, 'Podfile')):
print("[iOS] unlinking {}".format(pkg))
self.unlink_ios(path, pkg) | Unlink the package in the current directory. | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1080-L1100 | null | class Unlink(Command):
""" The "Unlink" command tries to undo the modifications done by the
linker..
A custom unlinkiner can be used by adding a "enaml_native_unlinker"
entry_point which shall be a function that receives the app
package.json (context) an argument.
Example
----------
def unlinker(ctx):
# Unlink android and ios projects here
return True #: To tell the cli the unlinking was handled and
should return
"""
title = set_default("unlink")
help = set_default("Unlink an enaml-native package")
args = set_default([
('names', dict(help="Package name", nargs="+")),
])
def run(self, args=None):
""" The name IS required here. """
print(Colors.BLUE+"[INFO] Unlinking {}...".format(
args.names)+Colors.RESET)
for name in args.names:
self.unlink(Link.package_dir, name)
def unlink_android(self, path, pkg):
""" Unlink's the android project to this library.
1. In the app's android/settings.gradle, it removes the following
lines (if they exist):
include ':<project-name>'
project(':<project-name>').projectDir = new File(
rootProject.projectDir,
'../venv/packages/<project-name>/android')
2. In the app's android/app/build.gradle, it removes the following
line (if present)
compile project(':<project-name>')
3. In the app's
android/app/src/main/java/<bundle/id>/MainApplication.java,
it removes:
import <package>.<Name>Package;
new <Name>Package(),
If no comma exists it will remove the comma from the previous
line.
"""
bundle_id = self.ctx['bundle_id']
#: Check if it's already linked
with open(join('android', 'settings.gradle')) as f:
settings_gradle = f.read()
with open(join('android', 'app', 'build.gradle')) as f:
build_gradle = f.read()
#: Find the MainApplication.java
main_app_java_path = join('android', 'app', 'src', 'main', 'java',
join(*bundle_id.split(".")),
'MainApplication.java')
with open(main_app_java_path) as f:
main_application_java = f.read()
try:
#: Now link all the EnamlPackages we can find in the new "package"
new_packages = Link.find_packages(join(path, 'android', pkg))
if not new_packages:
print(Colors.RED+"\t[Android] {} No EnamlPackages found to "
"unlink!".format(pkg)+Colors.RESET)
return
#: Unlink settings.gradle
if Link.is_settings_linked(settings_gradle, pkg):
#: Remove the two statements
new_settings = [
line for line in settings_gradle.split("\n")
if line.strip() not in [
"include ':{name}'".format(name=pkg),
"project(':{name}').projectDir = "
"new File(rootProject.projectDir, "
"'../{path}/android/{name}')".format(path=path,
name=pkg)
]
]
with open(join('android', 'settings.gradle'), 'w') as f:
f.write("\n".join(new_settings))
print("\t[Android] {} unlinked settings.gradle!".format(pkg))
else:
print("\t[Android] {} was not linked in "
"settings.gradle!".format(pkg))
#: Unlink app/build.gradle
if Link.is_build_linked(build_gradle, pkg):
#: Add two statements
new_build = [
line for line in build_gradle.split("\n")
if line.strip() not in [
"compile project(':{name}')".format(name=pkg),
"api project(':{name}')".format(name=pkg),
]
]
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write("\n".join(new_build))
print("\t[Android] {} unlinked in "
"app/build.gradle!".format(pkg))
else:
print("\t[Android] {} was not linked in "
"app/build.gradle!".format(pkg))
new_app_java = []
for package in new_packages:
#: Add our import statement
javacls = os.path.splitext(package)[0].replace("/", ".")
if Link.is_app_linked(main_application_java, pkg, javacls):
#: Reuse previous if avialable
new_app_java = (new_app_java or
main_application_java.split("\n"))
new_app_java = [
line for line in new_app_java
if line.strip() not in [
"import {};".format(javacls),
"new {}()".format(javacls.split(".")[-1]),
"new {}(),".format(javacls.split(".")[-1]),
]
]
#: Now find the last package and remove the comma if it
#: exists
found = False
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line.strip(), "new *Package()"):
found = True
elif fnmatch.fnmatch(line.strip(), "new *Package(),"):
j = i
#: We removed the last package so add a comma
if not found:
#: This kills any whitespace...
new_app_java[j] = new_app_java[j][
:new_app_java[j].rfind(',')]
else:
print("\t[Android] {} was not linked in {}!".format(
pkg, main_app_java_path))
if new_app_java:
with open(main_app_java_path, 'w') as f:
f.write("\n".join(new_app_java))
print(Colors.GREEN+"\t[Android] {} unlinked successfully!".format(
pkg)+Colors.RESET)
except Exception as e:
print(Colors.RED+"\t[Android] {} Failed to unlink. "
"Reverting due to error: {}".format(pkg, e)+Colors.RESET)
#: Undo any changes
with open(join('android', 'settings.gradle'), 'w') as f:
f.write(settings_gradle)
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write(build_gradle)
with open(main_app_java_path, 'w') as f:
f.write(main_application_java)
#: Now blow up
raise
|
codelv/enaml-native-cli | enamlnativecli/main.py | Unlink.unlink_android | python | def unlink_android(self, path, pkg):
bundle_id = self.ctx['bundle_id']
#: Check if it's already linked
with open(join('android', 'settings.gradle')) as f:
settings_gradle = f.read()
with open(join('android', 'app', 'build.gradle')) as f:
build_gradle = f.read()
#: Find the MainApplication.java
main_app_java_path = join('android', 'app', 'src', 'main', 'java',
join(*bundle_id.split(".")),
'MainApplication.java')
with open(main_app_java_path) as f:
main_application_java = f.read()
try:
#: Now link all the EnamlPackages we can find in the new "package"
new_packages = Link.find_packages(join(path, 'android', pkg))
if not new_packages:
print(Colors.RED+"\t[Android] {} No EnamlPackages found to "
"unlink!".format(pkg)+Colors.RESET)
return
#: Unlink settings.gradle
if Link.is_settings_linked(settings_gradle, pkg):
#: Remove the two statements
new_settings = [
line for line in settings_gradle.split("\n")
if line.strip() not in [
"include ':{name}'".format(name=pkg),
"project(':{name}').projectDir = "
"new File(rootProject.projectDir, "
"'../{path}/android/{name}')".format(path=path,
name=pkg)
]
]
with open(join('android', 'settings.gradle'), 'w') as f:
f.write("\n".join(new_settings))
print("\t[Android] {} unlinked settings.gradle!".format(pkg))
else:
print("\t[Android] {} was not linked in "
"settings.gradle!".format(pkg))
#: Unlink app/build.gradle
if Link.is_build_linked(build_gradle, pkg):
#: Add two statements
new_build = [
line for line in build_gradle.split("\n")
if line.strip() not in [
"compile project(':{name}')".format(name=pkg),
"api project(':{name}')".format(name=pkg),
]
]
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write("\n".join(new_build))
print("\t[Android] {} unlinked in "
"app/build.gradle!".format(pkg))
else:
print("\t[Android] {} was not linked in "
"app/build.gradle!".format(pkg))
new_app_java = []
for package in new_packages:
#: Add our import statement
javacls = os.path.splitext(package)[0].replace("/", ".")
if Link.is_app_linked(main_application_java, pkg, javacls):
#: Reuse previous if avialable
new_app_java = (new_app_java or
main_application_java.split("\n"))
new_app_java = [
line for line in new_app_java
if line.strip() not in [
"import {};".format(javacls),
"new {}()".format(javacls.split(".")[-1]),
"new {}(),".format(javacls.split(".")[-1]),
]
]
#: Now find the last package and remove the comma if it
#: exists
found = False
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line.strip(), "new *Package()"):
found = True
elif fnmatch.fnmatch(line.strip(), "new *Package(),"):
j = i
#: We removed the last package so add a comma
if not found:
#: This kills any whitespace...
new_app_java[j] = new_app_java[j][
:new_app_java[j].rfind(',')]
else:
print("\t[Android] {} was not linked in {}!".format(
pkg, main_app_java_path))
if new_app_java:
with open(main_app_java_path, 'w') as f:
f.write("\n".join(new_app_java))
print(Colors.GREEN+"\t[Android] {} unlinked successfully!".format(
pkg)+Colors.RESET)
except Exception as e:
print(Colors.RED+"\t[Android] {} Failed to unlink. "
"Reverting due to error: {}".format(pkg, e)+Colors.RESET)
#: Undo any changes
with open(join('android', 'settings.gradle'), 'w') as f:
f.write(settings_gradle)
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write(build_gradle)
with open(main_app_java_path, 'w') as f:
f.write(main_application_java)
#: Now blow up
raise | Unlink's the android project to this library.
1. In the app's android/settings.gradle, it removes the following
lines (if they exist):
include ':<project-name>'
project(':<project-name>').projectDir = new File(
rootProject.projectDir,
'../venv/packages/<project-name>/android')
2. In the app's android/app/build.gradle, it removes the following
line (if present)
compile project(':<project-name>')
3. In the app's
android/app/src/main/java/<bundle/id>/MainApplication.java,
it removes:
import <package>.<Name>Package;
new <Name>Package(),
If no comma exists it will remove the comma from the previous
line. | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1102-L1249 | null | class Unlink(Command):
""" The "Unlink" command tries to undo the modifications done by the
linker..
A custom unlinkiner can be used by adding a "enaml_native_unlinker"
entry_point which shall be a function that receives the app
package.json (context) an argument.
Example
----------
def unlinker(ctx):
# Unlink android and ios projects here
return True #: To tell the cli the unlinking was handled and
should return
"""
title = set_default("unlink")
help = set_default("Unlink an enaml-native package")
args = set_default([
('names', dict(help="Package name", nargs="+")),
])
def run(self, args=None):
""" The name IS required here. """
print(Colors.BLUE+"[INFO] Unlinking {}...".format(
args.names)+Colors.RESET)
for name in args.names:
self.unlink(Link.package_dir, name)
def unlink(self, path, pkg):
""" Unlink the package in the current directory.
"""
#: Check if a custom unlinker exists to handle unlinking this package
for ep in pkg_resources.iter_entry_points(
group="enaml_native_unlinker"):
if ep.name.replace("-", '_') == pkg.replace("-", '_'):
unlinker = ep.load()
print("Custom unlinker {} found for '{}'. "
"Unlinking...".format(unlinker, pkg))
if unlinker(self.ctx, path):
return
if exists(join(path, 'android', pkg, 'build.gradle')):
print("[Android] unlinking {}".format(pkg))
self.unlink_android(path, pkg)
for target in ['iphoneos', 'iphonesimulator']:
if exists(join(path, target, pkg, 'Podfile')):
print("[iOS] unlinking {}".format(pkg))
self.unlink_ios(path, pkg)
def unlink_android(self, path, pkg):
""" Unlink's the android project to this library.
1. In the app's android/settings.gradle, it removes the following
lines (if they exist):
include ':<project-name>'
project(':<project-name>').projectDir = new File(
rootProject.projectDir,
'../venv/packages/<project-name>/android')
2. In the app's android/app/build.gradle, it removes the following
line (if present)
compile project(':<project-name>')
3. In the app's
android/app/src/main/java/<bundle/id>/MainApplication.java,
it removes:
import <package>.<Name>Package;
new <Name>Package(),
If no comma exists it will remove the comma from the previous
line.
"""
bundle_id = self.ctx['bundle_id']
#: Check if it's already linked
with open(join('android', 'settings.gradle')) as f:
settings_gradle = f.read()
with open(join('android', 'app', 'build.gradle')) as f:
build_gradle = f.read()
#: Find the MainApplication.java
main_app_java_path = join('android', 'app', 'src', 'main', 'java',
join(*bundle_id.split(".")),
'MainApplication.java')
with open(main_app_java_path) as f:
main_application_java = f.read()
try:
#: Now link all the EnamlPackages we can find in the new "package"
new_packages = Link.find_packages(join(path, 'android', pkg))
if not new_packages:
print(Colors.RED+"\t[Android] {} No EnamlPackages found to "
"unlink!".format(pkg)+Colors.RESET)
return
#: Unlink settings.gradle
if Link.is_settings_linked(settings_gradle, pkg):
#: Remove the two statements
new_settings = [
line for line in settings_gradle.split("\n")
if line.strip() not in [
"include ':{name}'".format(name=pkg),
"project(':{name}').projectDir = "
"new File(rootProject.projectDir, "
"'../{path}/android/{name}')".format(path=path,
name=pkg)
]
]
with open(join('android', 'settings.gradle'), 'w') as f:
f.write("\n".join(new_settings))
print("\t[Android] {} unlinked settings.gradle!".format(pkg))
else:
print("\t[Android] {} was not linked in "
"settings.gradle!".format(pkg))
#: Unlink app/build.gradle
if Link.is_build_linked(build_gradle, pkg):
#: Add two statements
new_build = [
line for line in build_gradle.split("\n")
if line.strip() not in [
"compile project(':{name}')".format(name=pkg),
"api project(':{name}')".format(name=pkg),
]
]
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write("\n".join(new_build))
print("\t[Android] {} unlinked in "
"app/build.gradle!".format(pkg))
else:
print("\t[Android] {} was not linked in "
"app/build.gradle!".format(pkg))
new_app_java = []
for package in new_packages:
#: Add our import statement
javacls = os.path.splitext(package)[0].replace("/", ".")
if Link.is_app_linked(main_application_java, pkg, javacls):
#: Reuse previous if avialable
new_app_java = (new_app_java or
main_application_java.split("\n"))
new_app_java = [
line for line in new_app_java
if line.strip() not in [
"import {};".format(javacls),
"new {}()".format(javacls.split(".")[-1]),
"new {}(),".format(javacls.split(".")[-1]),
]
]
#: Now find the last package and remove the comma if it
#: exists
found = False
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line.strip(), "new *Package()"):
found = True
elif fnmatch.fnmatch(line.strip(), "new *Package(),"):
j = i
#: We removed the last package so add a comma
if not found:
#: This kills any whitespace...
new_app_java[j] = new_app_java[j][
:new_app_java[j].rfind(',')]
else:
print("\t[Android] {} was not linked in {}!".format(
pkg, main_app_java_path))
if new_app_java:
with open(main_app_java_path, 'w') as f:
f.write("\n".join(new_app_java))
print(Colors.GREEN+"\t[Android] {} unlinked successfully!".format(
pkg)+Colors.RESET)
except Exception as e:
print(Colors.RED+"\t[Android] {} Failed to unlink. "
"Reverting due to error: {}".format(pkg, e)+Colors.RESET)
#: Undo any changes
with open(join('android', 'settings.gradle'), 'w') as f:
f.write(settings_gradle)
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write(build_gradle)
with open(main_app_java_path, 'w') as f:
f.write(main_application_java)
#: Now blow up
raise
|
codelv/enaml-native-cli | enamlnativecli/main.py | Server.run_tornado | python | def run_tornado(self, args):
server = self
import tornado.ioloop
import tornado.web
import tornado.websocket
ioloop = tornado.ioloop.IOLoop.current()
class DevWebSocketHandler(tornado.websocket.WebSocketHandler):
def open(self):
super(DevWebSocketHandler, self).open()
server.on_open(self)
def on_message(self, message):
server.on_message(self, message)
def on_close(self):
super(DevWebSocketHandler, self).on_close()
server.on_close(self)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write(server.index_page)
#: Set the call later method
server.call_later = ioloop.call_later
server.add_callback = ioloop.add_callback
app = tornado.web.Application([
(r"/", MainHandler),
(r"/dev", DevWebSocketHandler),
])
app.listen(self.port)
print("Tornado Dev server started on {}".format(self.port))
ioloop.start() | Tornado dev server implementation | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1466-L1502 | null | class Server(Command):
""" Run a dev server to host files. Only view files can be reloaded at the
moment.
"""
title = set_default("start")
help = set_default("Start a debug server for serving files to the app")
#: Dev server index page to render
index_page = Unicode("enaml-native dev server. "
"When you change a source file it pushes to the app.")
args = set_default([
('--remote-debugging', dict(action='store_true',
help="Run in remote debugging mode")),
])
#: Server port
port = Int(8888)
#: Time in ms to wait before triggering a reload
reload_delay = Float(1)
_reload_count = Int() #: Pending reload requests
#: Watchdog observer
observer = Instance(object)
#: Watchdog handler
watcher = Instance(object)
#: Websocket handler implementation
handlers = List()
#: Callable to add a callback from a thread into the event loop
add_callback = Callable()
#: Callable to add a callback at some later time
call_later = Callable()
#: Changed file events
changes = List()
#: Run in bridge (forwarding) mode for remote debugging
remote_debugging = Bool()
#: Can be run from anywhere
app_dir_required = set_default(False)
def run(self, args=None):
ctx = self.ctx
#: Look for tornado or twisted in reqs
use_twisted = 'twisted' in ', '.join(ctx.get('dependencies', []))
#: Save setting
self.remote_debugging = args and args.remote_debugging
if self.remote_debugging:
#: Do reverse forwarding so you can use remote-debugging over
#: adb (via USB even if Wifi is not accessible)
shprint(sh.adb, 'reverse',
'tcp:{}'.format(self.port), 'tcp:{}'.format(self.port))
else:
#: Setup observer
try:
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
except ImportError:
print(Colors.RED + "[WARNING] Watchdog is required the dev "
"server: Run 'pip install watchdog'" + Colors.RESET)
return
self.observer = Observer()
server = self
class AppNotifier(LoggingEventHandler):
def on_any_event(self, event):
super(AppNotifier, self).on_any_event(event)
#: Use add callback to push to event loop thread
server.add_callback(server.on_file_changed, event)
with cd('src'):
if not self.remote_debugging:
print("Watching {}".format(abspath('.')))
self.watcher = AppNotifier()
self.observer.schedule(self.watcher, abspath('.'),
recursive=True)
self.observer.start()
if use_twisted:
self.run_twisted(args)
else:
self.run_tornado(args)
def run_twisted(self, args):
""" Twisted dev server implementation """
server = self
from twisted.internet import reactor
from twisted.web import resource
from twisted.web.static import File
from twisted.web.server import Site
from autobahn.twisted.websocket import (WebSocketServerFactory,
WebSocketServerProtocol)
from autobahn.twisted.resource import WebSocketResource
class DevWebSocketHandler(WebSocketServerProtocol):
def onConnect(self, request):
super(DevWebSocketHandler, self).onConnect(request)
server.on_open(self)
def onMessage(self, payload, isBinary):
server.on_message(self, payload)
def onClose(self, wasClean, code, reason):
super(DevWebSocketHandler,self).onClose(wasClean, code, reason)
server.on_close(self)
def write_message(self, message, binary=False):
self.sendMessage(message, binary)
#: Set the call later method
server.call_later = reactor.callLater
server.add_callback = reactor.callFromThread
factory = WebSocketServerFactory(u"ws://0.0.0.0:{}".format(self.port))
factory.protocol = DevWebSocketHandler
class MainHandler(resource.Resource):
def render_GET(self, req):
return str(server.index_page)
root = resource.Resource()
root.putChild("", MainHandler())
root.putChild("dev", WebSocketResource(factory))
reactor.listenTCP(self.port, Site(root))
print("Twisted Dev server started on {}".format(self.port))
reactor.run()
#: ========================================================
#: Shared protocol implementation
#: ========================================================
def on_open(self, handler):
self._reload_count = 0
print("Client {} connected!".format(handler))
self.handlers.append(handler)
def on_message(self, handler, msg):
""" In remote debugging mode this simply acts as a forwarding
proxy for the two clients.
"""
if self.remote_debugging:
#: Forward to other clients
for h in self.handlers:
if h != handler:
h.write_message(msg, True)
else:
print(msg)
def send_message(self, msg):
""" Send a message to the client. This should not be used in
remote debugging mode.
"""
if not self.handlers:
return #: Client not connected
for h in self.handlers:
h.write_message(msg)
def on_close(self, handler):
print("Client {} left!".format(handler))
self.handlers.remove(handler)
def on_file_changed(self, event):
""" """
print(event)
self._reload_count +=1
self.changes.append(event)
self.call_later(self.reload_delay, self._trigger_reload, event)
def _trigger_reload(self, event):
self._reload_count -=1
if self._reload_count == 0:
files = {}
for event in self.changes:
path = os.path.relpath(event.src_path, os.getcwd())
if os.path.splitext(path)[-1] not in ['.py', '.enaml']:
continue
with open(event.src_path) as f:
data = f.read()
#: Add to changed files
files[path] = data
if files:
#: Send the reload request
msg = {
'type':'reload',
'files':files
}
print("Reloading: {}".format(files.keys()))
self.send_message(json.dumps(msg))
#: Clear changes
self.changes = []
|
codelv/enaml-native-cli | enamlnativecli/main.py | Server.run_twisted | python | def run_twisted(self, args):
server = self
from twisted.internet import reactor
from twisted.web import resource
from twisted.web.static import File
from twisted.web.server import Site
from autobahn.twisted.websocket import (WebSocketServerFactory,
WebSocketServerProtocol)
from autobahn.twisted.resource import WebSocketResource
class DevWebSocketHandler(WebSocketServerProtocol):
def onConnect(self, request):
super(DevWebSocketHandler, self).onConnect(request)
server.on_open(self)
def onMessage(self, payload, isBinary):
server.on_message(self, payload)
def onClose(self, wasClean, code, reason):
super(DevWebSocketHandler,self).onClose(wasClean, code, reason)
server.on_close(self)
def write_message(self, message, binary=False):
self.sendMessage(message, binary)
#: Set the call later method
server.call_later = reactor.callLater
server.add_callback = reactor.callFromThread
factory = WebSocketServerFactory(u"ws://0.0.0.0:{}".format(self.port))
factory.protocol = DevWebSocketHandler
class MainHandler(resource.Resource):
def render_GET(self, req):
return str(server.index_page)
root = resource.Resource()
root.putChild("", MainHandler())
root.putChild("dev", WebSocketResource(factory))
reactor.listenTCP(self.port, Site(root))
print("Twisted Dev server started on {}".format(self.port))
reactor.run() | Twisted dev server implementation | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1504-L1547 | null | class Server(Command):
""" Run a dev server to host files. Only view files can be reloaded at the
moment.
"""
title = set_default("start")
help = set_default("Start a debug server for serving files to the app")
#: Dev server index page to render
index_page = Unicode("enaml-native dev server. "
"When you change a source file it pushes to the app.")
args = set_default([
('--remote-debugging', dict(action='store_true',
help="Run in remote debugging mode")),
])
#: Server port
port = Int(8888)
#: Time in ms to wait before triggering a reload
reload_delay = Float(1)
_reload_count = Int() #: Pending reload requests
#: Watchdog observer
observer = Instance(object)
#: Watchdog handler
watcher = Instance(object)
#: Websocket handler implementation
handlers = List()
#: Callable to add a callback from a thread into the event loop
add_callback = Callable()
#: Callable to add a callback at some later time
call_later = Callable()
#: Changed file events
changes = List()
#: Run in bridge (forwarding) mode for remote debugging
remote_debugging = Bool()
#: Can be run from anywhere
app_dir_required = set_default(False)
def run(self, args=None):
ctx = self.ctx
#: Look for tornado or twisted in reqs
use_twisted = 'twisted' in ', '.join(ctx.get('dependencies', []))
#: Save setting
self.remote_debugging = args and args.remote_debugging
if self.remote_debugging:
#: Do reverse forwarding so you can use remote-debugging over
#: adb (via USB even if Wifi is not accessible)
shprint(sh.adb, 'reverse',
'tcp:{}'.format(self.port), 'tcp:{}'.format(self.port))
else:
#: Setup observer
try:
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
except ImportError:
print(Colors.RED + "[WARNING] Watchdog is required the dev "
"server: Run 'pip install watchdog'" + Colors.RESET)
return
self.observer = Observer()
server = self
class AppNotifier(LoggingEventHandler):
def on_any_event(self, event):
super(AppNotifier, self).on_any_event(event)
#: Use add callback to push to event loop thread
server.add_callback(server.on_file_changed, event)
with cd('src'):
if not self.remote_debugging:
print("Watching {}".format(abspath('.')))
self.watcher = AppNotifier()
self.observer.schedule(self.watcher, abspath('.'),
recursive=True)
self.observer.start()
if use_twisted:
self.run_twisted(args)
else:
self.run_tornado(args)
def run_tornado(self, args):
""" Tornado dev server implementation """
server = self
import tornado.ioloop
import tornado.web
import tornado.websocket
ioloop = tornado.ioloop.IOLoop.current()
class DevWebSocketHandler(tornado.websocket.WebSocketHandler):
def open(self):
super(DevWebSocketHandler, self).open()
server.on_open(self)
def on_message(self, message):
server.on_message(self, message)
def on_close(self):
super(DevWebSocketHandler, self).on_close()
server.on_close(self)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write(server.index_page)
#: Set the call later method
server.call_later = ioloop.call_later
server.add_callback = ioloop.add_callback
app = tornado.web.Application([
(r"/", MainHandler),
(r"/dev", DevWebSocketHandler),
])
app.listen(self.port)
print("Tornado Dev server started on {}".format(self.port))
ioloop.start()
#: ========================================================
#: Shared protocol implementation
#: ========================================================
def on_open(self, handler):
self._reload_count = 0
print("Client {} connected!".format(handler))
self.handlers.append(handler)
def on_message(self, handler, msg):
""" In remote debugging mode this simply acts as a forwarding
proxy for the two clients.
"""
if self.remote_debugging:
#: Forward to other clients
for h in self.handlers:
if h != handler:
h.write_message(msg, True)
else:
print(msg)
def send_message(self, msg):
""" Send a message to the client. This should not be used in
remote debugging mode.
"""
if not self.handlers:
return #: Client not connected
for h in self.handlers:
h.write_message(msg)
def on_close(self, handler):
print("Client {} left!".format(handler))
self.handlers.remove(handler)
def on_file_changed(self, event):
""" """
print(event)
self._reload_count +=1
self.changes.append(event)
self.call_later(self.reload_delay, self._trigger_reload, event)
def _trigger_reload(self, event):
self._reload_count -=1
if self._reload_count == 0:
files = {}
for event in self.changes:
path = os.path.relpath(event.src_path, os.getcwd())
if os.path.splitext(path)[-1] not in ['.py', '.enaml']:
continue
with open(event.src_path) as f:
data = f.read()
#: Add to changed files
files[path] = data
if files:
#: Send the reload request
msg = {
'type':'reload',
'files':files
}
print("Reloading: {}".format(files.keys()))
self.send_message(json.dumps(msg))
#: Clear changes
self.changes = []
|
codelv/enaml-native-cli | enamlnativecli/main.py | Server.on_message | python | def on_message(self, handler, msg):
if self.remote_debugging:
#: Forward to other clients
for h in self.handlers:
if h != handler:
h.write_message(msg, True)
else:
print(msg) | In remote debugging mode this simply acts as a forwarding
proxy for the two clients. | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1557-L1567 | null | class Server(Command):
""" Run a dev server to host files. Only view files can be reloaded at the
moment.
"""
title = set_default("start")
help = set_default("Start a debug server for serving files to the app")
#: Dev server index page to render
index_page = Unicode("enaml-native dev server. "
"When you change a source file it pushes to the app.")
args = set_default([
('--remote-debugging', dict(action='store_true',
help="Run in remote debugging mode")),
])
#: Server port
port = Int(8888)
#: Time in ms to wait before triggering a reload
reload_delay = Float(1)
_reload_count = Int() #: Pending reload requests
#: Watchdog observer
observer = Instance(object)
#: Watchdog handler
watcher = Instance(object)
#: Websocket handler implementation
handlers = List()
#: Callable to add a callback from a thread into the event loop
add_callback = Callable()
#: Callable to add a callback at some later time
call_later = Callable()
#: Changed file events
changes = List()
#: Run in bridge (forwarding) mode for remote debugging
remote_debugging = Bool()
#: Can be run from anywhere
app_dir_required = set_default(False)
def run(self, args=None):
ctx = self.ctx
#: Look for tornado or twisted in reqs
use_twisted = 'twisted' in ', '.join(ctx.get('dependencies', []))
#: Save setting
self.remote_debugging = args and args.remote_debugging
if self.remote_debugging:
#: Do reverse forwarding so you can use remote-debugging over
#: adb (via USB even if Wifi is not accessible)
shprint(sh.adb, 'reverse',
'tcp:{}'.format(self.port), 'tcp:{}'.format(self.port))
else:
#: Setup observer
try:
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
except ImportError:
print(Colors.RED + "[WARNING] Watchdog is required the dev "
"server: Run 'pip install watchdog'" + Colors.RESET)
return
self.observer = Observer()
server = self
class AppNotifier(LoggingEventHandler):
def on_any_event(self, event):
super(AppNotifier, self).on_any_event(event)
#: Use add callback to push to event loop thread
server.add_callback(server.on_file_changed, event)
with cd('src'):
if not self.remote_debugging:
print("Watching {}".format(abspath('.')))
self.watcher = AppNotifier()
self.observer.schedule(self.watcher, abspath('.'),
recursive=True)
self.observer.start()
if use_twisted:
self.run_twisted(args)
else:
self.run_tornado(args)
def run_tornado(self, args):
""" Tornado dev server implementation """
server = self
import tornado.ioloop
import tornado.web
import tornado.websocket
ioloop = tornado.ioloop.IOLoop.current()
class DevWebSocketHandler(tornado.websocket.WebSocketHandler):
def open(self):
super(DevWebSocketHandler, self).open()
server.on_open(self)
def on_message(self, message):
server.on_message(self, message)
def on_close(self):
super(DevWebSocketHandler, self).on_close()
server.on_close(self)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write(server.index_page)
#: Set the call later method
server.call_later = ioloop.call_later
server.add_callback = ioloop.add_callback
app = tornado.web.Application([
(r"/", MainHandler),
(r"/dev", DevWebSocketHandler),
])
app.listen(self.port)
print("Tornado Dev server started on {}".format(self.port))
ioloop.start()
def run_twisted(self, args):
""" Twisted dev server implementation """
server = self
from twisted.internet import reactor
from twisted.web import resource
from twisted.web.static import File
from twisted.web.server import Site
from autobahn.twisted.websocket import (WebSocketServerFactory,
WebSocketServerProtocol)
from autobahn.twisted.resource import WebSocketResource
class DevWebSocketHandler(WebSocketServerProtocol):
def onConnect(self, request):
super(DevWebSocketHandler, self).onConnect(request)
server.on_open(self)
def onMessage(self, payload, isBinary):
server.on_message(self, payload)
def onClose(self, wasClean, code, reason):
super(DevWebSocketHandler,self).onClose(wasClean, code, reason)
server.on_close(self)
def write_message(self, message, binary=False):
self.sendMessage(message, binary)
#: Set the call later method
server.call_later = reactor.callLater
server.add_callback = reactor.callFromThread
factory = WebSocketServerFactory(u"ws://0.0.0.0:{}".format(self.port))
factory.protocol = DevWebSocketHandler
class MainHandler(resource.Resource):
def render_GET(self, req):
return str(server.index_page)
root = resource.Resource()
root.putChild("", MainHandler())
root.putChild("dev", WebSocketResource(factory))
reactor.listenTCP(self.port, Site(root))
print("Twisted Dev server started on {}".format(self.port))
reactor.run()
#: ========================================================
#: Shared protocol implementation
#: ========================================================
def on_open(self, handler):
self._reload_count = 0
print("Client {} connected!".format(handler))
self.handlers.append(handler)
def send_message(self, msg):
""" Send a message to the client. This should not be used in
remote debugging mode.
"""
if not self.handlers:
return #: Client not connected
for h in self.handlers:
h.write_message(msg)
def on_close(self, handler):
print("Client {} left!".format(handler))
self.handlers.remove(handler)
def on_file_changed(self, event):
""" """
print(event)
self._reload_count +=1
self.changes.append(event)
self.call_later(self.reload_delay, self._trigger_reload, event)
def _trigger_reload(self, event):
self._reload_count -=1
if self._reload_count == 0:
files = {}
for event in self.changes:
path = os.path.relpath(event.src_path, os.getcwd())
if os.path.splitext(path)[-1] not in ['.py', '.enaml']:
continue
with open(event.src_path) as f:
data = f.read()
#: Add to changed files
files[path] = data
if files:
#: Send the reload request
msg = {
'type':'reload',
'files':files
}
print("Reloading: {}".format(files.keys()))
self.send_message(json.dumps(msg))
#: Clear changes
self.changes = []
|
codelv/enaml-native-cli | enamlnativecli/main.py | Server.send_message | python | def send_message(self, msg):
if not self.handlers:
return #: Client not connected
for h in self.handlers:
h.write_message(msg) | Send a message to the client. This should not be used in
remote debugging mode. | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1569-L1577 | null | class Server(Command):
""" Run a dev server to host files. Only view files can be reloaded at the
moment.
"""
title = set_default("start")
help = set_default("Start a debug server for serving files to the app")
#: Dev server index page to render
index_page = Unicode("enaml-native dev server. "
"When you change a source file it pushes to the app.")
args = set_default([
('--remote-debugging', dict(action='store_true',
help="Run in remote debugging mode")),
])
#: Server port
port = Int(8888)
#: Time in ms to wait before triggering a reload
reload_delay = Float(1)
_reload_count = Int() #: Pending reload requests
#: Watchdog observer
observer = Instance(object)
#: Watchdog handler
watcher = Instance(object)
#: Websocket handler implementation
handlers = List()
#: Callable to add a callback from a thread into the event loop
add_callback = Callable()
#: Callable to add a callback at some later time
call_later = Callable()
#: Changed file events
changes = List()
#: Run in bridge (forwarding) mode for remote debugging
remote_debugging = Bool()
#: Can be run from anywhere
app_dir_required = set_default(False)
def run(self, args=None):
ctx = self.ctx
#: Look for tornado or twisted in reqs
use_twisted = 'twisted' in ', '.join(ctx.get('dependencies', []))
#: Save setting
self.remote_debugging = args and args.remote_debugging
if self.remote_debugging:
#: Do reverse forwarding so you can use remote-debugging over
#: adb (via USB even if Wifi is not accessible)
shprint(sh.adb, 'reverse',
'tcp:{}'.format(self.port), 'tcp:{}'.format(self.port))
else:
#: Setup observer
try:
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
except ImportError:
print(Colors.RED + "[WARNING] Watchdog is required the dev "
"server: Run 'pip install watchdog'" + Colors.RESET)
return
self.observer = Observer()
server = self
class AppNotifier(LoggingEventHandler):
def on_any_event(self, event):
super(AppNotifier, self).on_any_event(event)
#: Use add callback to push to event loop thread
server.add_callback(server.on_file_changed, event)
with cd('src'):
if not self.remote_debugging:
print("Watching {}".format(abspath('.')))
self.watcher = AppNotifier()
self.observer.schedule(self.watcher, abspath('.'),
recursive=True)
self.observer.start()
if use_twisted:
self.run_twisted(args)
else:
self.run_tornado(args)
def run_tornado(self, args):
""" Tornado dev server implementation """
server = self
import tornado.ioloop
import tornado.web
import tornado.websocket
ioloop = tornado.ioloop.IOLoop.current()
class DevWebSocketHandler(tornado.websocket.WebSocketHandler):
def open(self):
super(DevWebSocketHandler, self).open()
server.on_open(self)
def on_message(self, message):
server.on_message(self, message)
def on_close(self):
super(DevWebSocketHandler, self).on_close()
server.on_close(self)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write(server.index_page)
#: Set the call later method
server.call_later = ioloop.call_later
server.add_callback = ioloop.add_callback
app = tornado.web.Application([
(r"/", MainHandler),
(r"/dev", DevWebSocketHandler),
])
app.listen(self.port)
print("Tornado Dev server started on {}".format(self.port))
ioloop.start()
def run_twisted(self, args):
""" Twisted dev server implementation """
server = self
from twisted.internet import reactor
from twisted.web import resource
from twisted.web.static import File
from twisted.web.server import Site
from autobahn.twisted.websocket import (WebSocketServerFactory,
WebSocketServerProtocol)
from autobahn.twisted.resource import WebSocketResource
class DevWebSocketHandler(WebSocketServerProtocol):
def onConnect(self, request):
super(DevWebSocketHandler, self).onConnect(request)
server.on_open(self)
def onMessage(self, payload, isBinary):
server.on_message(self, payload)
def onClose(self, wasClean, code, reason):
super(DevWebSocketHandler,self).onClose(wasClean, code, reason)
server.on_close(self)
def write_message(self, message, binary=False):
self.sendMessage(message, binary)
#: Set the call later method
server.call_later = reactor.callLater
server.add_callback = reactor.callFromThread
factory = WebSocketServerFactory(u"ws://0.0.0.0:{}".format(self.port))
factory.protocol = DevWebSocketHandler
class MainHandler(resource.Resource):
def render_GET(self, req):
return str(server.index_page)
root = resource.Resource()
root.putChild("", MainHandler())
root.putChild("dev", WebSocketResource(factory))
reactor.listenTCP(self.port, Site(root))
print("Twisted Dev server started on {}".format(self.port))
reactor.run()
#: ========================================================
#: Shared protocol implementation
#: ========================================================
def on_open(self, handler):
self._reload_count = 0
print("Client {} connected!".format(handler))
self.handlers.append(handler)
def on_message(self, handler, msg):
""" In remote debugging mode this simply acts as a forwarding
proxy for the two clients.
"""
if self.remote_debugging:
#: Forward to other clients
for h in self.handlers:
if h != handler:
h.write_message(msg, True)
else:
print(msg)
def send_message(self, msg):
""" Send a message to the client. This should not be used in
remote debugging mode.
"""
if not self.handlers:
return #: Client not connected
for h in self.handlers:
h.write_message(msg)
def on_close(self, handler):
print("Client {} left!".format(handler))
self.handlers.remove(handler)
def on_file_changed(self, event):
""" """
print(event)
self._reload_count +=1
self.changes.append(event)
self.call_later(self.reload_delay, self._trigger_reload, event)
def _trigger_reload(self, event):
self._reload_count -=1
if self._reload_count == 0:
files = {}
for event in self.changes:
path = os.path.relpath(event.src_path, os.getcwd())
if os.path.splitext(path)[-1] not in ['.py', '.enaml']:
continue
with open(event.src_path) as f:
data = f.read()
#: Add to changed files
files[path] = data
if files:
#: Send the reload request
msg = {
'type':'reload',
'files':files
}
print("Reloading: {}".format(files.keys()))
self.send_message(json.dumps(msg))
#: Clear changes
self.changes = []
|
codelv/enaml-native-cli | enamlnativecli/main.py | EnamlNativeCli._default_commands | python | def _default_commands(self):
commands = [c() for c in find_commands(Command)]
#: Get commands installed via entry points
for ep in pkg_resources.iter_entry_points(
group="enaml_native_command"):
c = ep.load()
if not issubclass(c, Command):
print("Warning: entry point {} did not return a valid enaml "
"cli command! This command will be ignored!".format(
ep.name))
commands.append(c())
return commands | Build the list of CLI commands by finding subclasses of the Command
class
Also allows commands to be installed using the "enaml_native_command"
entry point. This entry point should return a Command subclass | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1648-L1668 | null | class EnamlNativeCli(Atom):
#: Root parser
parser = Instance(ArgumentParser)
#: Loaded from package
ctx = Dict()
#: Parsed args
args = Instance(Namespace)
#: Location of package file
package = Unicode("environment.yml")
#: If enaml-native is being run within an app directory
in_app_directory = Bool()
#: Conda command
conda = Instance(sh.Command)
#: Commands
commands = List(Command)
def _default_in_app_directory(self):
""" Return if we are in a directory that contains the package.json file
which should indicate it's in the root directory of an enaml-native
app.
"""
return exists(self.package)
def _default_ctx(self):
""" Return the package config or context and normalize some of the
values
"""
if not self.in_app_directory:
print("Warning: {} does not exist. Using the default.".format(
self.package))
ctx = {}
else:
with open(self.package) as f:
ctx = dict(yaml.load(f, Loader=yaml.RoundTripLoader))
if self.in_app_directory:
# Update the env for each platform
excluded = list(ctx.get('excluded', []))
for env in [ctx['ios'], ctx['android']]:
if 'python_build_dir' not in env:
env['python_build_dir'] = expanduser(abspath('build/python'))
if 'conda_prefix' not in env:
env['conda_prefix'] = os.environ.get(
'CONDA_PREFIX', expanduser(abspath('venv')))
# Join the shared and local exclusions
env['excluded'] = list(env.get('excluded', [])) + excluded
return ctx
def _default_parser(self):
""" Generate a parser using the command list """
parser = ArgumentParser(prog='enaml-native')
#: Build commands by name
cmds = {c.title: c for c in self.commands}
#: Build parser, prepare commands
subparsers = parser.add_subparsers()
for c in self.commands:
p = subparsers.add_parser(c.title, help=c.help)
c.parser = p
for (flags, kwargs) in c.args:
p.add_argument(*flags.split(), **kwargs)
p.set_defaults(cmd=c)
c.ctx = self.ctx
c.cmds = cmds
c.cli = self
return parser
def _default_conda(self):
return find_conda()
def check_dependencies(self):
try:
self.conda('--version')
except:
raise EnvironmentError(
"conda could not be found. Please install miniconda from "
"https://conda.io/miniconda.html or set CONDA_HOME to the"
"location where conda is installed.")
def start(self):
""" Run the commands"""
self.check_dependencies()
self.args = self.parser.parse_args()
# Python 3 doesn't set the cmd if no args are given
if not hasattr(self.args, 'cmd'):
self.parser.print_help()
return
cmd = self.args.cmd
try:
if cmd.app_dir_required and not self.in_app_directory:
raise EnvironmentError(
"'enaml-native {}' must be run within an app root "
"directory not: {}".format(cmd.title, os.getcwd()))
cmd.run(self.args)
except sh.ErrorReturnCode as e:
raise
|
codelv/enaml-native-cli | enamlnativecli/main.py | EnamlNativeCli._default_ctx | python | def _default_ctx(self):
if not self.in_app_directory:
print("Warning: {} does not exist. Using the default.".format(
self.package))
ctx = {}
else:
with open(self.package) as f:
ctx = dict(yaml.load(f, Loader=yaml.RoundTripLoader))
if self.in_app_directory:
# Update the env for each platform
excluded = list(ctx.get('excluded', []))
for env in [ctx['ios'], ctx['android']]:
if 'python_build_dir' not in env:
env['python_build_dir'] = expanduser(abspath('build/python'))
if 'conda_prefix' not in env:
env['conda_prefix'] = os.environ.get(
'CONDA_PREFIX', expanduser(abspath('venv')))
# Join the shared and local exclusions
env['excluded'] = list(env.get('excluded', [])) + excluded
return ctx | Return the package config or context and normalize some of the
values | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1678-L1706 | null | class EnamlNativeCli(Atom):
#: Root parser
parser = Instance(ArgumentParser)
#: Loaded from package
ctx = Dict()
#: Parsed args
args = Instance(Namespace)
#: Location of package file
package = Unicode("environment.yml")
#: If enaml-native is being run within an app directory
in_app_directory = Bool()
#: Conda command
conda = Instance(sh.Command)
#: Commands
commands = List(Command)
def _default_commands(self):
""" Build the list of CLI commands by finding subclasses of the Command
class
Also allows commands to be installed using the "enaml_native_command"
entry point. This entry point should return a Command subclass
"""
commands = [c() for c in find_commands(Command)]
#: Get commands installed via entry points
for ep in pkg_resources.iter_entry_points(
group="enaml_native_command"):
c = ep.load()
if not issubclass(c, Command):
print("Warning: entry point {} did not return a valid enaml "
"cli command! This command will be ignored!".format(
ep.name))
commands.append(c())
return commands
def _default_in_app_directory(self):
""" Return if we are in a directory that contains the package.json file
which should indicate it's in the root directory of an enaml-native
app.
"""
return exists(self.package)
def _default_ctx(self):
""" Return the package config or context and normalize some of the
values
"""
if not self.in_app_directory:
print("Warning: {} does not exist. Using the default.".format(
self.package))
ctx = {}
else:
with open(self.package) as f:
ctx = dict(yaml.load(f, Loader=yaml.RoundTripLoader))
if self.in_app_directory:
# Update the env for each platform
excluded = list(ctx.get('excluded', []))
for env in [ctx['ios'], ctx['android']]:
if 'python_build_dir' not in env:
env['python_build_dir'] = expanduser(abspath('build/python'))
if 'conda_prefix' not in env:
env['conda_prefix'] = os.environ.get(
'CONDA_PREFIX', expanduser(abspath('venv')))
# Join the shared and local exclusions
env['excluded'] = list(env.get('excluded', [])) + excluded
return ctx
def _default_parser(self):
""" Generate a parser using the command list """
parser = ArgumentParser(prog='enaml-native')
#: Build commands by name
cmds = {c.title: c for c in self.commands}
#: Build parser, prepare commands
subparsers = parser.add_subparsers()
for c in self.commands:
p = subparsers.add_parser(c.title, help=c.help)
c.parser = p
for (flags, kwargs) in c.args:
p.add_argument(*flags.split(), **kwargs)
p.set_defaults(cmd=c)
c.ctx = self.ctx
c.cmds = cmds
c.cli = self
return parser
def _default_conda(self):
return find_conda()
def check_dependencies(self):
try:
self.conda('--version')
except:
raise EnvironmentError(
"conda could not be found. Please install miniconda from "
"https://conda.io/miniconda.html or set CONDA_HOME to the"
"location where conda is installed.")
def start(self):
""" Run the commands"""
self.check_dependencies()
self.args = self.parser.parse_args()
# Python 3 doesn't set the cmd if no args are given
if not hasattr(self.args, 'cmd'):
self.parser.print_help()
return
cmd = self.args.cmd
try:
if cmd.app_dir_required and not self.in_app_directory:
raise EnvironmentError(
"'enaml-native {}' must be run within an app root "
"directory not: {}".format(cmd.title, os.getcwd()))
cmd.run(self.args)
except sh.ErrorReturnCode as e:
raise
|
codelv/enaml-native-cli | enamlnativecli/main.py | EnamlNativeCli._default_parser | python | def _default_parser(self):
parser = ArgumentParser(prog='enaml-native')
#: Build commands by name
cmds = {c.title: c for c in self.commands}
#: Build parser, prepare commands
subparsers = parser.add_subparsers()
for c in self.commands:
p = subparsers.add_parser(c.title, help=c.help)
c.parser = p
for (flags, kwargs) in c.args:
p.add_argument(*flags.split(), **kwargs)
p.set_defaults(cmd=c)
c.ctx = self.ctx
c.cmds = cmds
c.cli = self
return parser | Generate a parser using the command list | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1708-L1727 | null | class EnamlNativeCli(Atom):
#: Root parser
parser = Instance(ArgumentParser)
#: Loaded from package
ctx = Dict()
#: Parsed args
args = Instance(Namespace)
#: Location of package file
package = Unicode("environment.yml")
#: If enaml-native is being run within an app directory
in_app_directory = Bool()
#: Conda command
conda = Instance(sh.Command)
#: Commands
commands = List(Command)
def _default_commands(self):
""" Build the list of CLI commands by finding subclasses of the Command
class
Also allows commands to be installed using the "enaml_native_command"
entry point. This entry point should return a Command subclass
"""
commands = [c() for c in find_commands(Command)]
#: Get commands installed via entry points
for ep in pkg_resources.iter_entry_points(
group="enaml_native_command"):
c = ep.load()
if not issubclass(c, Command):
print("Warning: entry point {} did not return a valid enaml "
"cli command! This command will be ignored!".format(
ep.name))
commands.append(c())
return commands
def _default_in_app_directory(self):
""" Return if we are in a directory that contains the package.json file
which should indicate it's in the root directory of an enaml-native
app.
"""
return exists(self.package)
def _default_ctx(self):
""" Return the package config or context and normalize some of the
values
"""
if not self.in_app_directory:
print("Warning: {} does not exist. Using the default.".format(
self.package))
ctx = {}
else:
with open(self.package) as f:
ctx = dict(yaml.load(f, Loader=yaml.RoundTripLoader))
if self.in_app_directory:
# Update the env for each platform
excluded = list(ctx.get('excluded', []))
for env in [ctx['ios'], ctx['android']]:
if 'python_build_dir' not in env:
env['python_build_dir'] = expanduser(abspath('build/python'))
if 'conda_prefix' not in env:
env['conda_prefix'] = os.environ.get(
'CONDA_PREFIX', expanduser(abspath('venv')))
# Join the shared and local exclusions
env['excluded'] = list(env.get('excluded', [])) + excluded
return ctx
def _default_conda(self):
return find_conda()
def check_dependencies(self):
try:
self.conda('--version')
except:
raise EnvironmentError(
"conda could not be found. Please install miniconda from "
"https://conda.io/miniconda.html or set CONDA_HOME to the"
"location where conda is installed.")
def start(self):
""" Run the commands"""
self.check_dependencies()
self.args = self.parser.parse_args()
# Python 3 doesn't set the cmd if no args are given
if not hasattr(self.args, 'cmd'):
self.parser.print_help()
return
cmd = self.args.cmd
try:
if cmd.app_dir_required and not self.in_app_directory:
raise EnvironmentError(
"'enaml-native {}' must be run within an app root "
"directory not: {}".format(cmd.title, os.getcwd()))
cmd.run(self.args)
except sh.ErrorReturnCode as e:
raise
|
codelv/enaml-native-cli | enamlnativecli/main.py | EnamlNativeCli.start | python | def start(self):
self.check_dependencies()
self.args = self.parser.parse_args()
# Python 3 doesn't set the cmd if no args are given
if not hasattr(self.args, 'cmd'):
self.parser.print_help()
return
cmd = self.args.cmd
try:
if cmd.app_dir_required and not self.in_app_directory:
raise EnvironmentError(
"'enaml-native {}' must be run within an app root "
"directory not: {}".format(cmd.title, os.getcwd()))
cmd.run(self.args)
except sh.ErrorReturnCode as e:
raise | Run the commands | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1741-L1759 | null | class EnamlNativeCli(Atom):
#: Root parser
parser = Instance(ArgumentParser)
#: Loaded from package
ctx = Dict()
#: Parsed args
args = Instance(Namespace)
#: Location of package file
package = Unicode("environment.yml")
#: If enaml-native is being run within an app directory
in_app_directory = Bool()
#: Conda command
conda = Instance(sh.Command)
#: Commands
commands = List(Command)
def _default_commands(self):
""" Build the list of CLI commands by finding subclasses of the Command
class
Also allows commands to be installed using the "enaml_native_command"
entry point. This entry point should return a Command subclass
"""
commands = [c() for c in find_commands(Command)]
#: Get commands installed via entry points
for ep in pkg_resources.iter_entry_points(
group="enaml_native_command"):
c = ep.load()
if not issubclass(c, Command):
print("Warning: entry point {} did not return a valid enaml "
"cli command! This command will be ignored!".format(
ep.name))
commands.append(c())
return commands
def _default_in_app_directory(self):
""" Return if we are in a directory that contains the package.json file
which should indicate it's in the root directory of an enaml-native
app.
"""
return exists(self.package)
def _default_ctx(self):
""" Return the package config or context and normalize some of the
values
"""
if not self.in_app_directory:
print("Warning: {} does not exist. Using the default.".format(
self.package))
ctx = {}
else:
with open(self.package) as f:
ctx = dict(yaml.load(f, Loader=yaml.RoundTripLoader))
if self.in_app_directory:
# Update the env for each platform
excluded = list(ctx.get('excluded', []))
for env in [ctx['ios'], ctx['android']]:
if 'python_build_dir' not in env:
env['python_build_dir'] = expanduser(abspath('build/python'))
if 'conda_prefix' not in env:
env['conda_prefix'] = os.environ.get(
'CONDA_PREFIX', expanduser(abspath('venv')))
# Join the shared and local exclusions
env['excluded'] = list(env.get('excluded', [])) + excluded
return ctx
def _default_parser(self):
""" Generate a parser using the command list """
parser = ArgumentParser(prog='enaml-native')
#: Build commands by name
cmds = {c.title: c for c in self.commands}
#: Build parser, prepare commands
subparsers = parser.add_subparsers()
for c in self.commands:
p = subparsers.add_parser(c.title, help=c.help)
c.parser = p
for (flags, kwargs) in c.args:
p.add_argument(*flags.split(), **kwargs)
p.set_defaults(cmd=c)
c.ctx = self.ctx
c.cmds = cmds
c.cli = self
return parser
def _default_conda(self):
return find_conda()
def check_dependencies(self):
try:
self.conda('--version')
except:
raise EnvironmentError(
"conda could not be found. Please install miniconda from "
"https://conda.io/miniconda.html or set CONDA_HOME to the"
"location where conda is installed.")
|
codelv/enaml-native-cli | setup.py | find_data | python | def find_data(folder):
for (path, directories, filenames) in os.walk(folder):
for filename in filenames:
yield os.path.join('..', path, filename) | Include everything in the folder | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/setup.py#L16-L20 | null | """
Copyright (c) 2017, Jairus Martin.
Distributed under the terms of the GPLv3 License.
The full license is in the file COPYING.txt, distributed with this software.
Created on July 10, 2017
@author: jrm
"""
import os
from setuptools import setup, find_packages
setup(
name="enaml-native-cli",
version="2.2.18",
author="CodeLV",
author_email="frmdstryr@gmail.com",
license='GPLv3',
url='https://github.com/codelv/enaml-native-cli/',
description="Build native mobile apps in python",
entry_points={'console_scripts': [
'enaml-native = enamlnativecli.main:main']},
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
packages=find_packages(),
package_data={'': find_data('enamlnativecli')},
include_package_data=True,
install_requires=['sh', 'atom', 'ruamel.yaml', 'cookiecutter', 'pbs'],
test_requires=['requests', 'py.test', 'pytest-cov', 'pytest-timeout']
)
|
pvizeli/ha-ffmpeg | haffmpeg/tools.py | ImageFrame.get_image | python | async def get_image(
self,
input_source: str,
output_format: str = IMAGE_JPEG,
extra_cmd: Optional[str] = None,
timeout: int = 15,
) -> Optional[bytes]:
command = ["-an", "-frames:v", "1", "-c:v", output_format]
# open input for capture 1 frame
is_open = await self.open(
cmd=command,
input_source=input_source,
output="-f image2pipe -",
extra_cmd=extra_cmd,
)
# error after open?
if not is_open:
_LOGGER.warning("Error starting FFmpeg.")
return None
# read image
try:
proc_func = functools.partial(self._proc.communicate, timeout=timeout)
image, _ = await self._loop.run_in_executor(None, proc_func)
return image
except (subprocess.TimeoutExpired, ValueError):
_LOGGER.warning("Timeout reading image.")
self.kill()
return None | Open FFmpeg process as capture 1 frame. | train | https://github.com/pvizeli/ha-ffmpeg/blob/fce1d4b77e76b9cb07d814bcb858b89657e1f32b/haffmpeg/tools.py#L19-L51 | [
"async def open(\n self,\n cmd: List[str],\n input_source: Optional[str],\n output: Optional[str] = \"-\",\n extra_cmd: Optional[str] = None,\n stdout_pipe: bool = True,\n stderr_pipe: bool = False,\n) -> bool:\n \"\"\"Start a ffmpeg instance and pipe output.\"\"\"\n stdout = subprocess.PIPE if stdout_pipe else subprocess.DEVNULL\n stderr = subprocess.PIPE if stderr_pipe else subprocess.DEVNULL\n\n if self.is_running:\n _LOGGER.warning(\"FFmpeg is already running!\")\n return True\n\n # set command line\n self._generate_ffmpeg_cmd(cmd, input_source, output, extra_cmd)\n\n # start ffmpeg\n _LOGGER.debug(\"Start FFmpeg with %s\", str(self._argv))\n try:\n proc_func = functools.partial(\n subprocess.Popen,\n self._argv,\n bufsize=0,\n stdin=subprocess.PIPE,\n stdout=stdout,\n stderr=stderr,\n )\n self._proc = await self._loop.run_in_executor(None, proc_func)\n except Exception as err: # pylint: disable=broad-except\n _LOGGER.exception(\"FFmpeg fails %s\", err)\n self._clear()\n return False\n\n return self._proc is not None\n"
] | class ImageFrame(HAFFmpeg):
"""Implement a single image capture from a stream."""
|
pvizeli/ha-ffmpeg | haffmpeg/tools.py | FFVersion.get_version | python | async def get_version(self, timeout: int = 15) -> Optional[str]:
command = ["-version"]
# open input for capture 1 frame
is_open = await self.open(cmd=command, input_source=None, output="")
# error after open?
if not is_open:
_LOGGER.warning("Error starting FFmpeg.")
return
# read output
try:
proc_func = functools.partial(self._proc.communicate, timeout=timeout)
output, _ = await self._loop.run_in_executor(None, proc_func)
result = re.search(r"ffmpeg version (\S*)", output.decode())
if result is not None:
return result.group(1)
except (subprocess.TimeoutExpired, ValueError):
_LOGGER.warning("Timeout reading stdout.")
self.kill()
return None | Execute FFmpeg process and parse the version information.
Return full FFmpeg version string. Such as 3.4.2-tessus | train | https://github.com/pvizeli/ha-ffmpeg/blob/fce1d4b77e76b9cb07d814bcb858b89657e1f32b/haffmpeg/tools.py#L57-L85 | [
"async def open(\n self,\n cmd: List[str],\n input_source: Optional[str],\n output: Optional[str] = \"-\",\n extra_cmd: Optional[str] = None,\n stdout_pipe: bool = True,\n stderr_pipe: bool = False,\n) -> bool:\n \"\"\"Start a ffmpeg instance and pipe output.\"\"\"\n stdout = subprocess.PIPE if stdout_pipe else subprocess.DEVNULL\n stderr = subprocess.PIPE if stderr_pipe else subprocess.DEVNULL\n\n if self.is_running:\n _LOGGER.warning(\"FFmpeg is already running!\")\n return True\n\n # set command line\n self._generate_ffmpeg_cmd(cmd, input_source, output, extra_cmd)\n\n # start ffmpeg\n _LOGGER.debug(\"Start FFmpeg with %s\", str(self._argv))\n try:\n proc_func = functools.partial(\n subprocess.Popen,\n self._argv,\n bufsize=0,\n stdin=subprocess.PIPE,\n stdout=stdout,\n stderr=stderr,\n )\n self._proc = await self._loop.run_in_executor(None, proc_func)\n except Exception as err: # pylint: disable=broad-except\n _LOGGER.exception(\"FFmpeg fails %s\", err)\n self._clear()\n return False\n\n return self._proc is not None\n"
] | class FFVersion(HAFFmpeg):
"""Retrieve FFmpeg version information."""
|
pvizeli/ha-ffmpeg | haffmpeg/camera.py | CameraMjpeg.open_camera | python | def open_camera(
self, input_source: str, extra_cmd: Optional[str] = None
) -> Coroutine:
command = ["-an", "-c:v", "mjpeg"]
return self.open(
cmd=command,
input_source=input_source,
output="-f mpjpeg -",
extra_cmd=extra_cmd,
) | Open FFmpeg process as mjpeg video stream.
Return A coroutine. | train | https://github.com/pvizeli/ha-ffmpeg/blob/fce1d4b77e76b9cb07d814bcb858b89657e1f32b/haffmpeg/camera.py#L10-L24 | [
"async def open(\n self,\n cmd: List[str],\n input_source: Optional[str],\n output: Optional[str] = \"-\",\n extra_cmd: Optional[str] = None,\n stdout_pipe: bool = True,\n stderr_pipe: bool = False,\n) -> bool:\n \"\"\"Start a ffmpeg instance and pipe output.\"\"\"\n stdout = subprocess.PIPE if stdout_pipe else subprocess.DEVNULL\n stderr = subprocess.PIPE if stderr_pipe else subprocess.DEVNULL\n\n if self.is_running:\n _LOGGER.warning(\"FFmpeg is already running!\")\n return True\n\n # set command line\n self._generate_ffmpeg_cmd(cmd, input_source, output, extra_cmd)\n\n # start ffmpeg\n _LOGGER.debug(\"Start FFmpeg with %s\", str(self._argv))\n try:\n proc_func = functools.partial(\n subprocess.Popen,\n self._argv,\n bufsize=0,\n stdin=subprocess.PIPE,\n stdout=stdout,\n stderr=stderr,\n )\n self._proc = await self._loop.run_in_executor(None, proc_func)\n except Exception as err: # pylint: disable=broad-except\n _LOGGER.exception(\"FFmpeg fails %s\", err)\n self._clear()\n return False\n\n return self._proc is not None\n"
] | class CameraMjpeg(HAFFmpeg):
"""Implement a camera they convert video stream to MJPEG."""
|
pvizeli/ha-ffmpeg | haffmpeg/core.py | HAFFmpeg.is_running | python | def is_running(self) -> bool:
if self._proc is None or self._proc.returncode is not None:
return False
return True | Return True if ffmpeg is running. | train | https://github.com/pvizeli/ha-ffmpeg/blob/fce1d4b77e76b9cb07d814bcb858b89657e1f32b/haffmpeg/core.py#L35-L39 | null | class HAFFmpeg:
"""HA FFmpeg process async.
Object is iterable or use the process property to call from Popen object.
"""
def __init__(self, ffmpeg_bin: str, loop: asyncio.BaseEventLoop):
"""Base initialize."""
self._loop = loop
self._ffmpeg = ffmpeg_bin
self._argv = None
self._proc = None
@property
def process(self) -> subprocess.Popen:
"""Return a Popen object or None of not running."""
return self._proc
@property
def _generate_ffmpeg_cmd(
self,
cmd: List[str],
input_source: Optional[str],
output: Optional[str],
extra_cmd: Optional[str] = None,
) -> None:
"""Generate ffmpeg command line."""
self._argv = [self._ffmpeg]
# start command init
if input_source is not None:
self._put_input(input_source)
self._argv.extend(cmd)
# exists a extra cmd from customer
if extra_cmd is not None:
self._argv.extend(shlex.split(extra_cmd))
self._merge_filters()
self._put_output(output)
def _put_input(self, input_source: str) -> None:
"""Put input string to ffmpeg command."""
input_cmd = shlex.split(str(input_source))
if len(input_cmd) > 1:
self._argv.extend(input_cmd)
else:
self._argv.extend(["-i", input_source])
def _put_output(self, output: Optional[str]) -> None:
"""Put output string to ffmpeg command."""
if output is None:
self._argv.extend(["-f", "null", "-"])
return
output_cmd = shlex.split(str(output))
if len(output_cmd) > 1:
self._argv.extend(output_cmd)
else:
self._argv.append(output)
def _merge_filters(self) -> None:
"""Merge all filter config in command line."""
for opts in (["-filter:a", "-af"], ["-filter:v", "-vf"]):
filter_list = []
new_argv = []
cmd_iter = iter(self._argv)
for element in cmd_iter:
if element in opts:
filter_list.insert(0, next(cmd_iter))
else:
new_argv.append(element)
# update argv if changes
if filter_list:
new_argv.extend([opts[0], ",".join(filter_list)])
self._argv = new_argv.copy()
def _clear(self) -> None:
"""Clear member variable after close."""
self._argv = None
self._proc = None
async def open(
self,
cmd: List[str],
input_source: Optional[str],
output: Optional[str] = "-",
extra_cmd: Optional[str] = None,
stdout_pipe: bool = True,
stderr_pipe: bool = False,
) -> bool:
"""Start a ffmpeg instance and pipe output."""
stdout = subprocess.PIPE if stdout_pipe else subprocess.DEVNULL
stderr = subprocess.PIPE if stderr_pipe else subprocess.DEVNULL
if self.is_running:
_LOGGER.warning("FFmpeg is already running!")
return True
# set command line
self._generate_ffmpeg_cmd(cmd, input_source, output, extra_cmd)
# start ffmpeg
_LOGGER.debug("Start FFmpeg with %s", str(self._argv))
try:
proc_func = functools.partial(
subprocess.Popen,
self._argv,
bufsize=0,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr,
)
self._proc = await self._loop.run_in_executor(None, proc_func)
except Exception as err: # pylint: disable=broad-except
_LOGGER.exception("FFmpeg fails %s", err)
self._clear()
return False
return self._proc is not None
async def close(self, timeout=5) -> None:
"""Stop a ffmpeg instance."""
if not self.is_running:
_LOGGER.warning("FFmpeg isn't running!")
return
# Can't use communicate because we attach the output to a streamreader
def _close():
"""Close ffmpeg."""
self._proc.stdin.write(b"q")
self._proc.wait(timeout=timeout)
# send stop to ffmpeg
try:
await self._loop.run_in_executor(None, _close)
_LOGGER.debug("Close FFmpeg process")
except (subprocess.TimeoutExpired, ValueError):
_LOGGER.warning("Timeout while waiting of FFmpeg")
self.kill()
finally:
self._clear()
def kill(self) -> None:
"""Kill ffmpeg job."""
self._proc.kill()
self._loop.run_in_executor(None, self._proc.communicate)
async def get_reader(self, source=FFMPEG_STDOUT) -> asyncio.StreamReader:
"""Create and return streamreader."""
reader = asyncio.StreamReader(loop=self._loop)
reader_protocol = asyncio.StreamReaderProtocol(reader)
# Attach stream
if source == FFMPEG_STDOUT:
await self._loop.connect_read_pipe(
lambda: reader_protocol, self._proc.stdout
)
else:
await self._loop.connect_read_pipe(
lambda: reader_protocol, self._proc.stderr
)
# Start reader
return reader
|
pvizeli/ha-ffmpeg | haffmpeg/core.py | HAFFmpeg._generate_ffmpeg_cmd | python | def _generate_ffmpeg_cmd(
self,
cmd: List[str],
input_source: Optional[str],
output: Optional[str],
extra_cmd: Optional[str] = None,
) -> None:
self._argv = [self._ffmpeg]
# start command init
if input_source is not None:
self._put_input(input_source)
self._argv.extend(cmd)
# exists a extra cmd from customer
if extra_cmd is not None:
self._argv.extend(shlex.split(extra_cmd))
self._merge_filters()
self._put_output(output) | Generate ffmpeg command line. | train | https://github.com/pvizeli/ha-ffmpeg/blob/fce1d4b77e76b9cb07d814bcb858b89657e1f32b/haffmpeg/core.py#L41-L61 | null | class HAFFmpeg:
"""HA FFmpeg process async.
Object is iterable or use the process property to call from Popen object.
"""
def __init__(self, ffmpeg_bin: str, loop: asyncio.BaseEventLoop):
"""Base initialize."""
self._loop = loop
self._ffmpeg = ffmpeg_bin
self._argv = None
self._proc = None
@property
def process(self) -> subprocess.Popen:
"""Return a Popen object or None of not running."""
return self._proc
@property
def is_running(self) -> bool:
"""Return True if ffmpeg is running."""
if self._proc is None or self._proc.returncode is not None:
return False
return True
def _put_input(self, input_source: str) -> None:
"""Put input string to ffmpeg command."""
input_cmd = shlex.split(str(input_source))
if len(input_cmd) > 1:
self._argv.extend(input_cmd)
else:
self._argv.extend(["-i", input_source])
def _put_output(self, output: Optional[str]) -> None:
"""Put output string to ffmpeg command."""
if output is None:
self._argv.extend(["-f", "null", "-"])
return
output_cmd = shlex.split(str(output))
if len(output_cmd) > 1:
self._argv.extend(output_cmd)
else:
self._argv.append(output)
def _merge_filters(self) -> None:
"""Merge all filter config in command line."""
for opts in (["-filter:a", "-af"], ["-filter:v", "-vf"]):
filter_list = []
new_argv = []
cmd_iter = iter(self._argv)
for element in cmd_iter:
if element in opts:
filter_list.insert(0, next(cmd_iter))
else:
new_argv.append(element)
# update argv if changes
if filter_list:
new_argv.extend([opts[0], ",".join(filter_list)])
self._argv = new_argv.copy()
def _clear(self) -> None:
"""Clear member variable after close."""
self._argv = None
self._proc = None
async def open(
self,
cmd: List[str],
input_source: Optional[str],
output: Optional[str] = "-",
extra_cmd: Optional[str] = None,
stdout_pipe: bool = True,
stderr_pipe: bool = False,
) -> bool:
"""Start a ffmpeg instance and pipe output."""
stdout = subprocess.PIPE if stdout_pipe else subprocess.DEVNULL
stderr = subprocess.PIPE if stderr_pipe else subprocess.DEVNULL
if self.is_running:
_LOGGER.warning("FFmpeg is already running!")
return True
# set command line
self._generate_ffmpeg_cmd(cmd, input_source, output, extra_cmd)
# start ffmpeg
_LOGGER.debug("Start FFmpeg with %s", str(self._argv))
try:
proc_func = functools.partial(
subprocess.Popen,
self._argv,
bufsize=0,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr,
)
self._proc = await self._loop.run_in_executor(None, proc_func)
except Exception as err: # pylint: disable=broad-except
_LOGGER.exception("FFmpeg fails %s", err)
self._clear()
return False
return self._proc is not None
async def close(self, timeout=5) -> None:
"""Stop a ffmpeg instance."""
if not self.is_running:
_LOGGER.warning("FFmpeg isn't running!")
return
# Can't use communicate because we attach the output to a streamreader
def _close():
"""Close ffmpeg."""
self._proc.stdin.write(b"q")
self._proc.wait(timeout=timeout)
# send stop to ffmpeg
try:
await self._loop.run_in_executor(None, _close)
_LOGGER.debug("Close FFmpeg process")
except (subprocess.TimeoutExpired, ValueError):
_LOGGER.warning("Timeout while waiting of FFmpeg")
self.kill()
finally:
self._clear()
def kill(self) -> None:
"""Kill ffmpeg job."""
self._proc.kill()
self._loop.run_in_executor(None, self._proc.communicate)
async def get_reader(self, source=FFMPEG_STDOUT) -> asyncio.StreamReader:
"""Create and return streamreader."""
reader = asyncio.StreamReader(loop=self._loop)
reader_protocol = asyncio.StreamReaderProtocol(reader)
# Attach stream
if source == FFMPEG_STDOUT:
await self._loop.connect_read_pipe(
lambda: reader_protocol, self._proc.stdout
)
else:
await self._loop.connect_read_pipe(
lambda: reader_protocol, self._proc.stderr
)
# Start reader
return reader
|
pvizeli/ha-ffmpeg | haffmpeg/core.py | HAFFmpeg._put_input | python | def _put_input(self, input_source: str) -> None:
input_cmd = shlex.split(str(input_source))
if len(input_cmd) > 1:
self._argv.extend(input_cmd)
else:
self._argv.extend(["-i", input_source]) | Put input string to ffmpeg command. | train | https://github.com/pvizeli/ha-ffmpeg/blob/fce1d4b77e76b9cb07d814bcb858b89657e1f32b/haffmpeg/core.py#L63-L69 | null | class HAFFmpeg:
"""HA FFmpeg process async.
Object is iterable or use the process property to call from Popen object.
"""
def __init__(self, ffmpeg_bin: str, loop: asyncio.BaseEventLoop):
"""Base initialize."""
self._loop = loop
self._ffmpeg = ffmpeg_bin
self._argv = None
self._proc = None
@property
def process(self) -> subprocess.Popen:
"""Return a Popen object or None of not running."""
return self._proc
@property
def is_running(self) -> bool:
"""Return True if ffmpeg is running."""
if self._proc is None or self._proc.returncode is not None:
return False
return True
def _generate_ffmpeg_cmd(
self,
cmd: List[str],
input_source: Optional[str],
output: Optional[str],
extra_cmd: Optional[str] = None,
) -> None:
"""Generate ffmpeg command line."""
self._argv = [self._ffmpeg]
# start command init
if input_source is not None:
self._put_input(input_source)
self._argv.extend(cmd)
# exists a extra cmd from customer
if extra_cmd is not None:
self._argv.extend(shlex.split(extra_cmd))
self._merge_filters()
self._put_output(output)
def _put_output(self, output: Optional[str]) -> None:
"""Put output string to ffmpeg command."""
if output is None:
self._argv.extend(["-f", "null", "-"])
return
output_cmd = shlex.split(str(output))
if len(output_cmd) > 1:
self._argv.extend(output_cmd)
else:
self._argv.append(output)
def _merge_filters(self) -> None:
"""Merge all filter config in command line."""
for opts in (["-filter:a", "-af"], ["-filter:v", "-vf"]):
filter_list = []
new_argv = []
cmd_iter = iter(self._argv)
for element in cmd_iter:
if element in opts:
filter_list.insert(0, next(cmd_iter))
else:
new_argv.append(element)
# update argv if changes
if filter_list:
new_argv.extend([opts[0], ",".join(filter_list)])
self._argv = new_argv.copy()
def _clear(self) -> None:
"""Clear member variable after close."""
self._argv = None
self._proc = None
async def open(
self,
cmd: List[str],
input_source: Optional[str],
output: Optional[str] = "-",
extra_cmd: Optional[str] = None,
stdout_pipe: bool = True,
stderr_pipe: bool = False,
) -> bool:
"""Start a ffmpeg instance and pipe output."""
stdout = subprocess.PIPE if stdout_pipe else subprocess.DEVNULL
stderr = subprocess.PIPE if stderr_pipe else subprocess.DEVNULL
if self.is_running:
_LOGGER.warning("FFmpeg is already running!")
return True
# set command line
self._generate_ffmpeg_cmd(cmd, input_source, output, extra_cmd)
# start ffmpeg
_LOGGER.debug("Start FFmpeg with %s", str(self._argv))
try:
proc_func = functools.partial(
subprocess.Popen,
self._argv,
bufsize=0,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr,
)
self._proc = await self._loop.run_in_executor(None, proc_func)
except Exception as err: # pylint: disable=broad-except
_LOGGER.exception("FFmpeg fails %s", err)
self._clear()
return False
return self._proc is not None
async def close(self, timeout=5) -> None:
"""Stop a ffmpeg instance."""
if not self.is_running:
_LOGGER.warning("FFmpeg isn't running!")
return
# Can't use communicate because we attach the output to a streamreader
def _close():
"""Close ffmpeg."""
self._proc.stdin.write(b"q")
self._proc.wait(timeout=timeout)
# send stop to ffmpeg
try:
await self._loop.run_in_executor(None, _close)
_LOGGER.debug("Close FFmpeg process")
except (subprocess.TimeoutExpired, ValueError):
_LOGGER.warning("Timeout while waiting of FFmpeg")
self.kill()
finally:
self._clear()
def kill(self) -> None:
"""Kill ffmpeg job."""
self._proc.kill()
self._loop.run_in_executor(None, self._proc.communicate)
async def get_reader(self, source=FFMPEG_STDOUT) -> asyncio.StreamReader:
"""Create and return streamreader."""
reader = asyncio.StreamReader(loop=self._loop)
reader_protocol = asyncio.StreamReaderProtocol(reader)
# Attach stream
if source == FFMPEG_STDOUT:
await self._loop.connect_read_pipe(
lambda: reader_protocol, self._proc.stdout
)
else:
await self._loop.connect_read_pipe(
lambda: reader_protocol, self._proc.stderr
)
# Start reader
return reader
|
pvizeli/ha-ffmpeg | haffmpeg/core.py | HAFFmpeg._put_output | python | def _put_output(self, output: Optional[str]) -> None:
if output is None:
self._argv.extend(["-f", "null", "-"])
return
output_cmd = shlex.split(str(output))
if len(output_cmd) > 1:
self._argv.extend(output_cmd)
else:
self._argv.append(output) | Put output string to ffmpeg command. | train | https://github.com/pvizeli/ha-ffmpeg/blob/fce1d4b77e76b9cb07d814bcb858b89657e1f32b/haffmpeg/core.py#L71-L81 | null | class HAFFmpeg:
"""HA FFmpeg process async.
Object is iterable or use the process property to call from Popen object.
"""
def __init__(self, ffmpeg_bin: str, loop: asyncio.BaseEventLoop):
"""Base initialize."""
self._loop = loop
self._ffmpeg = ffmpeg_bin
self._argv = None
self._proc = None
@property
def process(self) -> subprocess.Popen:
"""Return a Popen object or None of not running."""
return self._proc
@property
def is_running(self) -> bool:
"""Return True if ffmpeg is running."""
if self._proc is None or self._proc.returncode is not None:
return False
return True
def _generate_ffmpeg_cmd(
self,
cmd: List[str],
input_source: Optional[str],
output: Optional[str],
extra_cmd: Optional[str] = None,
) -> None:
"""Generate ffmpeg command line."""
self._argv = [self._ffmpeg]
# start command init
if input_source is not None:
self._put_input(input_source)
self._argv.extend(cmd)
# exists a extra cmd from customer
if extra_cmd is not None:
self._argv.extend(shlex.split(extra_cmd))
self._merge_filters()
self._put_output(output)
def _put_input(self, input_source: str) -> None:
"""Put input string to ffmpeg command."""
input_cmd = shlex.split(str(input_source))
if len(input_cmd) > 1:
self._argv.extend(input_cmd)
else:
self._argv.extend(["-i", input_source])
def _merge_filters(self) -> None:
"""Merge all filter config in command line."""
for opts in (["-filter:a", "-af"], ["-filter:v", "-vf"]):
filter_list = []
new_argv = []
cmd_iter = iter(self._argv)
for element in cmd_iter:
if element in opts:
filter_list.insert(0, next(cmd_iter))
else:
new_argv.append(element)
# update argv if changes
if filter_list:
new_argv.extend([opts[0], ",".join(filter_list)])
self._argv = new_argv.copy()
def _clear(self) -> None:
"""Clear member variable after close."""
self._argv = None
self._proc = None
async def open(
self,
cmd: List[str],
input_source: Optional[str],
output: Optional[str] = "-",
extra_cmd: Optional[str] = None,
stdout_pipe: bool = True,
stderr_pipe: bool = False,
) -> bool:
"""Start a ffmpeg instance and pipe output."""
stdout = subprocess.PIPE if stdout_pipe else subprocess.DEVNULL
stderr = subprocess.PIPE if stderr_pipe else subprocess.DEVNULL
if self.is_running:
_LOGGER.warning("FFmpeg is already running!")
return True
# set command line
self._generate_ffmpeg_cmd(cmd, input_source, output, extra_cmd)
# start ffmpeg
_LOGGER.debug("Start FFmpeg with %s", str(self._argv))
try:
proc_func = functools.partial(
subprocess.Popen,
self._argv,
bufsize=0,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr,
)
self._proc = await self._loop.run_in_executor(None, proc_func)
except Exception as err: # pylint: disable=broad-except
_LOGGER.exception("FFmpeg fails %s", err)
self._clear()
return False
return self._proc is not None
async def close(self, timeout=5) -> None:
"""Stop a ffmpeg instance."""
if not self.is_running:
_LOGGER.warning("FFmpeg isn't running!")
return
# Can't use communicate because we attach the output to a streamreader
def _close():
"""Close ffmpeg."""
self._proc.stdin.write(b"q")
self._proc.wait(timeout=timeout)
# send stop to ffmpeg
try:
await self._loop.run_in_executor(None, _close)
_LOGGER.debug("Close FFmpeg process")
except (subprocess.TimeoutExpired, ValueError):
_LOGGER.warning("Timeout while waiting of FFmpeg")
self.kill()
finally:
self._clear()
def kill(self) -> None:
"""Kill ffmpeg job."""
self._proc.kill()
self._loop.run_in_executor(None, self._proc.communicate)
async def get_reader(self, source=FFMPEG_STDOUT) -> asyncio.StreamReader:
"""Create and return streamreader."""
reader = asyncio.StreamReader(loop=self._loop)
reader_protocol = asyncio.StreamReaderProtocol(reader)
# Attach stream
if source == FFMPEG_STDOUT:
await self._loop.connect_read_pipe(
lambda: reader_protocol, self._proc.stdout
)
else:
await self._loop.connect_read_pipe(
lambda: reader_protocol, self._proc.stderr
)
# Start reader
return reader
|
pvizeli/ha-ffmpeg | haffmpeg/core.py | HAFFmpeg._merge_filters | python | def _merge_filters(self) -> None:
for opts in (["-filter:a", "-af"], ["-filter:v", "-vf"]):
filter_list = []
new_argv = []
cmd_iter = iter(self._argv)
for element in cmd_iter:
if element in opts:
filter_list.insert(0, next(cmd_iter))
else:
new_argv.append(element)
# update argv if changes
if filter_list:
new_argv.extend([opts[0], ",".join(filter_list)])
self._argv = new_argv.copy() | Merge all filter config in command line. | train | https://github.com/pvizeli/ha-ffmpeg/blob/fce1d4b77e76b9cb07d814bcb858b89657e1f32b/haffmpeg/core.py#L83-L98 | null | class HAFFmpeg:
"""HA FFmpeg process async.
Object is iterable or use the process property to call from Popen object.
"""
def __init__(self, ffmpeg_bin: str, loop: asyncio.BaseEventLoop):
"""Base initialize."""
self._loop = loop
self._ffmpeg = ffmpeg_bin
self._argv = None
self._proc = None
@property
def process(self) -> subprocess.Popen:
"""Return a Popen object or None of not running."""
return self._proc
@property
def is_running(self) -> bool:
"""Return True if ffmpeg is running."""
if self._proc is None or self._proc.returncode is not None:
return False
return True
def _generate_ffmpeg_cmd(
self,
cmd: List[str],
input_source: Optional[str],
output: Optional[str],
extra_cmd: Optional[str] = None,
) -> None:
"""Generate ffmpeg command line."""
self._argv = [self._ffmpeg]
# start command init
if input_source is not None:
self._put_input(input_source)
self._argv.extend(cmd)
# exists a extra cmd from customer
if extra_cmd is not None:
self._argv.extend(shlex.split(extra_cmd))
self._merge_filters()
self._put_output(output)
def _put_input(self, input_source: str) -> None:
"""Put input string to ffmpeg command."""
input_cmd = shlex.split(str(input_source))
if len(input_cmd) > 1:
self._argv.extend(input_cmd)
else:
self._argv.extend(["-i", input_source])
def _put_output(self, output: Optional[str]) -> None:
"""Put output string to ffmpeg command."""
if output is None:
self._argv.extend(["-f", "null", "-"])
return
output_cmd = shlex.split(str(output))
if len(output_cmd) > 1:
self._argv.extend(output_cmd)
else:
self._argv.append(output)
def _clear(self) -> None:
"""Clear member variable after close."""
self._argv = None
self._proc = None
async def open(
self,
cmd: List[str],
input_source: Optional[str],
output: Optional[str] = "-",
extra_cmd: Optional[str] = None,
stdout_pipe: bool = True,
stderr_pipe: bool = False,
) -> bool:
"""Start a ffmpeg instance and pipe output."""
stdout = subprocess.PIPE if stdout_pipe else subprocess.DEVNULL
stderr = subprocess.PIPE if stderr_pipe else subprocess.DEVNULL
if self.is_running:
_LOGGER.warning("FFmpeg is already running!")
return True
# set command line
self._generate_ffmpeg_cmd(cmd, input_source, output, extra_cmd)
# start ffmpeg
_LOGGER.debug("Start FFmpeg with %s", str(self._argv))
try:
proc_func = functools.partial(
subprocess.Popen,
self._argv,
bufsize=0,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr,
)
self._proc = await self._loop.run_in_executor(None, proc_func)
except Exception as err: # pylint: disable=broad-except
_LOGGER.exception("FFmpeg fails %s", err)
self._clear()
return False
return self._proc is not None
async def close(self, timeout=5) -> None:
"""Stop a ffmpeg instance."""
if not self.is_running:
_LOGGER.warning("FFmpeg isn't running!")
return
# Can't use communicate because we attach the output to a streamreader
def _close():
"""Close ffmpeg."""
self._proc.stdin.write(b"q")
self._proc.wait(timeout=timeout)
# send stop to ffmpeg
try:
await self._loop.run_in_executor(None, _close)
_LOGGER.debug("Close FFmpeg process")
except (subprocess.TimeoutExpired, ValueError):
_LOGGER.warning("Timeout while waiting of FFmpeg")
self.kill()
finally:
self._clear()
def kill(self) -> None:
"""Kill ffmpeg job."""
self._proc.kill()
self._loop.run_in_executor(None, self._proc.communicate)
async def get_reader(self, source=FFMPEG_STDOUT) -> asyncio.StreamReader:
"""Create and return streamreader."""
reader = asyncio.StreamReader(loop=self._loop)
reader_protocol = asyncio.StreamReaderProtocol(reader)
# Attach stream
if source == FFMPEG_STDOUT:
await self._loop.connect_read_pipe(
lambda: reader_protocol, self._proc.stdout
)
else:
await self._loop.connect_read_pipe(
lambda: reader_protocol, self._proc.stderr
)
# Start reader
return reader
|
pvizeli/ha-ffmpeg | haffmpeg/core.py | HAFFmpeg.open | python | async def open(
self,
cmd: List[str],
input_source: Optional[str],
output: Optional[str] = "-",
extra_cmd: Optional[str] = None,
stdout_pipe: bool = True,
stderr_pipe: bool = False,
) -> bool:
stdout = subprocess.PIPE if stdout_pipe else subprocess.DEVNULL
stderr = subprocess.PIPE if stderr_pipe else subprocess.DEVNULL
if self.is_running:
_LOGGER.warning("FFmpeg is already running!")
return True
# set command line
self._generate_ffmpeg_cmd(cmd, input_source, output, extra_cmd)
# start ffmpeg
_LOGGER.debug("Start FFmpeg with %s", str(self._argv))
try:
proc_func = functools.partial(
subprocess.Popen,
self._argv,
bufsize=0,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr,
)
self._proc = await self._loop.run_in_executor(None, proc_func)
except Exception as err: # pylint: disable=broad-except
_LOGGER.exception("FFmpeg fails %s", err)
self._clear()
return False
return self._proc is not None | Start a ffmpeg instance and pipe output. | train | https://github.com/pvizeli/ha-ffmpeg/blob/fce1d4b77e76b9cb07d814bcb858b89657e1f32b/haffmpeg/core.py#L105-L142 | [
"def _generate_ffmpeg_cmd(\n self,\n cmd: List[str],\n input_source: Optional[str],\n output: Optional[str],\n extra_cmd: Optional[str] = None,\n) -> None:\n \"\"\"Generate ffmpeg command line.\"\"\"\n self._argv = [self._ffmpeg]\n\n # start command init\n if input_source is not None:\n self._put_input(input_source)\n self._argv.extend(cmd)\n\n # exists a extra cmd from customer\n if extra_cmd is not None:\n self._argv.extend(shlex.split(extra_cmd))\n\n self._merge_filters()\n self._put_output(output)\n",
"def _clear(self) -> None:\n \"\"\"Clear member variable after close.\"\"\"\n self._argv = None\n self._proc = None\n"
] | class HAFFmpeg:
"""HA FFmpeg process async.
Object is iterable or use the process property to call from Popen object.
"""
def __init__(self, ffmpeg_bin: str, loop: asyncio.BaseEventLoop):
"""Base initialize."""
self._loop = loop
self._ffmpeg = ffmpeg_bin
self._argv = None
self._proc = None
@property
def process(self) -> subprocess.Popen:
"""Return a Popen object or None of not running."""
return self._proc
@property
def is_running(self) -> bool:
"""Return True if ffmpeg is running."""
if self._proc is None or self._proc.returncode is not None:
return False
return True
def _generate_ffmpeg_cmd(
self,
cmd: List[str],
input_source: Optional[str],
output: Optional[str],
extra_cmd: Optional[str] = None,
) -> None:
"""Generate ffmpeg command line."""
self._argv = [self._ffmpeg]
# start command init
if input_source is not None:
self._put_input(input_source)
self._argv.extend(cmd)
# exists a extra cmd from customer
if extra_cmd is not None:
self._argv.extend(shlex.split(extra_cmd))
self._merge_filters()
self._put_output(output)
def _put_input(self, input_source: str) -> None:
"""Put input string to ffmpeg command."""
input_cmd = shlex.split(str(input_source))
if len(input_cmd) > 1:
self._argv.extend(input_cmd)
else:
self._argv.extend(["-i", input_source])
def _put_output(self, output: Optional[str]) -> None:
"""Put output string to ffmpeg command."""
if output is None:
self._argv.extend(["-f", "null", "-"])
return
output_cmd = shlex.split(str(output))
if len(output_cmd) > 1:
self._argv.extend(output_cmd)
else:
self._argv.append(output)
def _merge_filters(self) -> None:
"""Merge all filter config in command line."""
for opts in (["-filter:a", "-af"], ["-filter:v", "-vf"]):
filter_list = []
new_argv = []
cmd_iter = iter(self._argv)
for element in cmd_iter:
if element in opts:
filter_list.insert(0, next(cmd_iter))
else:
new_argv.append(element)
# update argv if changes
if filter_list:
new_argv.extend([opts[0], ",".join(filter_list)])
self._argv = new_argv.copy()
def _clear(self) -> None:
"""Clear member variable after close."""
self._argv = None
self._proc = None
async def close(self, timeout=5) -> None:
"""Stop a ffmpeg instance."""
if not self.is_running:
_LOGGER.warning("FFmpeg isn't running!")
return
# Can't use communicate because we attach the output to a streamreader
def _close():
"""Close ffmpeg."""
self._proc.stdin.write(b"q")
self._proc.wait(timeout=timeout)
# send stop to ffmpeg
try:
await self._loop.run_in_executor(None, _close)
_LOGGER.debug("Close FFmpeg process")
except (subprocess.TimeoutExpired, ValueError):
_LOGGER.warning("Timeout while waiting of FFmpeg")
self.kill()
finally:
self._clear()
def kill(self) -> None:
"""Kill ffmpeg job."""
self._proc.kill()
self._loop.run_in_executor(None, self._proc.communicate)
async def get_reader(self, source=FFMPEG_STDOUT) -> asyncio.StreamReader:
"""Create and return streamreader."""
reader = asyncio.StreamReader(loop=self._loop)
reader_protocol = asyncio.StreamReaderProtocol(reader)
# Attach stream
if source == FFMPEG_STDOUT:
await self._loop.connect_read_pipe(
lambda: reader_protocol, self._proc.stdout
)
else:
await self._loop.connect_read_pipe(
lambda: reader_protocol, self._proc.stderr
)
# Start reader
return reader
|
pvizeli/ha-ffmpeg | haffmpeg/core.py | HAFFmpeg.close | python | async def close(self, timeout=5) -> None:
if not self.is_running:
_LOGGER.warning("FFmpeg isn't running!")
return
# Can't use communicate because we attach the output to a streamreader
def _close():
"""Close ffmpeg."""
self._proc.stdin.write(b"q")
self._proc.wait(timeout=timeout)
# send stop to ffmpeg
try:
await self._loop.run_in_executor(None, _close)
_LOGGER.debug("Close FFmpeg process")
except (subprocess.TimeoutExpired, ValueError):
_LOGGER.warning("Timeout while waiting of FFmpeg")
self.kill()
finally:
self._clear() | Stop a ffmpeg instance. | train | https://github.com/pvizeli/ha-ffmpeg/blob/fce1d4b77e76b9cb07d814bcb858b89657e1f32b/haffmpeg/core.py#L144-L166 | [
"def _clear(self) -> None:\n \"\"\"Clear member variable after close.\"\"\"\n self._argv = None\n self._proc = None\n",
"def kill(self) -> None:\n \"\"\"Kill ffmpeg job.\"\"\"\n self._proc.kill()\n self._loop.run_in_executor(None, self._proc.communicate)\n"
] | class HAFFmpeg:
"""HA FFmpeg process async.
Object is iterable or use the process property to call from Popen object.
"""
def __init__(self, ffmpeg_bin: str, loop: asyncio.BaseEventLoop):
"""Base initialize."""
self._loop = loop
self._ffmpeg = ffmpeg_bin
self._argv = None
self._proc = None
@property
def process(self) -> subprocess.Popen:
"""Return a Popen object or None of not running."""
return self._proc
@property
def is_running(self) -> bool:
"""Return True if ffmpeg is running."""
if self._proc is None or self._proc.returncode is not None:
return False
return True
def _generate_ffmpeg_cmd(
self,
cmd: List[str],
input_source: Optional[str],
output: Optional[str],
extra_cmd: Optional[str] = None,
) -> None:
"""Generate ffmpeg command line."""
self._argv = [self._ffmpeg]
# start command init
if input_source is not None:
self._put_input(input_source)
self._argv.extend(cmd)
# exists a extra cmd from customer
if extra_cmd is not None:
self._argv.extend(shlex.split(extra_cmd))
self._merge_filters()
self._put_output(output)
def _put_input(self, input_source: str) -> None:
"""Put input string to ffmpeg command."""
input_cmd = shlex.split(str(input_source))
if len(input_cmd) > 1:
self._argv.extend(input_cmd)
else:
self._argv.extend(["-i", input_source])
def _put_output(self, output: Optional[str]) -> None:
"""Put output string to ffmpeg command."""
if output is None:
self._argv.extend(["-f", "null", "-"])
return
output_cmd = shlex.split(str(output))
if len(output_cmd) > 1:
self._argv.extend(output_cmd)
else:
self._argv.append(output)
def _merge_filters(self) -> None:
"""Merge all filter config in command line."""
for opts in (["-filter:a", "-af"], ["-filter:v", "-vf"]):
filter_list = []
new_argv = []
cmd_iter = iter(self._argv)
for element in cmd_iter:
if element in opts:
filter_list.insert(0, next(cmd_iter))
else:
new_argv.append(element)
# update argv if changes
if filter_list:
new_argv.extend([opts[0], ",".join(filter_list)])
self._argv = new_argv.copy()
def _clear(self) -> None:
"""Clear member variable after close."""
self._argv = None
self._proc = None
async def open(
self,
cmd: List[str],
input_source: Optional[str],
output: Optional[str] = "-",
extra_cmd: Optional[str] = None,
stdout_pipe: bool = True,
stderr_pipe: bool = False,
) -> bool:
"""Start a ffmpeg instance and pipe output."""
stdout = subprocess.PIPE if stdout_pipe else subprocess.DEVNULL
stderr = subprocess.PIPE if stderr_pipe else subprocess.DEVNULL
if self.is_running:
_LOGGER.warning("FFmpeg is already running!")
return True
# set command line
self._generate_ffmpeg_cmd(cmd, input_source, output, extra_cmd)
# start ffmpeg
_LOGGER.debug("Start FFmpeg with %s", str(self._argv))
try:
proc_func = functools.partial(
subprocess.Popen,
self._argv,
bufsize=0,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr,
)
self._proc = await self._loop.run_in_executor(None, proc_func)
except Exception as err: # pylint: disable=broad-except
_LOGGER.exception("FFmpeg fails %s", err)
self._clear()
return False
return self._proc is not None
def kill(self) -> None:
"""Kill ffmpeg job."""
self._proc.kill()
self._loop.run_in_executor(None, self._proc.communicate)
async def get_reader(self, source=FFMPEG_STDOUT) -> asyncio.StreamReader:
"""Create and return streamreader."""
reader = asyncio.StreamReader(loop=self._loop)
reader_protocol = asyncio.StreamReaderProtocol(reader)
# Attach stream
if source == FFMPEG_STDOUT:
await self._loop.connect_read_pipe(
lambda: reader_protocol, self._proc.stdout
)
else:
await self._loop.connect_read_pipe(
lambda: reader_protocol, self._proc.stderr
)
# Start reader
return reader
|
pvizeli/ha-ffmpeg | haffmpeg/core.py | HAFFmpeg.kill | python | def kill(self) -> None:
self._proc.kill()
self._loop.run_in_executor(None, self._proc.communicate) | Kill ffmpeg job. | train | https://github.com/pvizeli/ha-ffmpeg/blob/fce1d4b77e76b9cb07d814bcb858b89657e1f32b/haffmpeg/core.py#L168-L171 | null | class HAFFmpeg:
"""HA FFmpeg process async.
Object is iterable or use the process property to call from Popen object.
"""
def __init__(self, ffmpeg_bin: str, loop: asyncio.BaseEventLoop):
"""Base initialize."""
self._loop = loop
self._ffmpeg = ffmpeg_bin
self._argv = None
self._proc = None
@property
def process(self) -> subprocess.Popen:
"""Return a Popen object or None of not running."""
return self._proc
@property
def is_running(self) -> bool:
"""Return True if ffmpeg is running."""
if self._proc is None or self._proc.returncode is not None:
return False
return True
def _generate_ffmpeg_cmd(
self,
cmd: List[str],
input_source: Optional[str],
output: Optional[str],
extra_cmd: Optional[str] = None,
) -> None:
"""Generate ffmpeg command line."""
self._argv = [self._ffmpeg]
# start command init
if input_source is not None:
self._put_input(input_source)
self._argv.extend(cmd)
# exists a extra cmd from customer
if extra_cmd is not None:
self._argv.extend(shlex.split(extra_cmd))
self._merge_filters()
self._put_output(output)
def _put_input(self, input_source: str) -> None:
"""Put input string to ffmpeg command."""
input_cmd = shlex.split(str(input_source))
if len(input_cmd) > 1:
self._argv.extend(input_cmd)
else:
self._argv.extend(["-i", input_source])
def _put_output(self, output: Optional[str]) -> None:
"""Put output string to ffmpeg command."""
if output is None:
self._argv.extend(["-f", "null", "-"])
return
output_cmd = shlex.split(str(output))
if len(output_cmd) > 1:
self._argv.extend(output_cmd)
else:
self._argv.append(output)
def _merge_filters(self) -> None:
"""Merge all filter config in command line."""
for opts in (["-filter:a", "-af"], ["-filter:v", "-vf"]):
filter_list = []
new_argv = []
cmd_iter = iter(self._argv)
for element in cmd_iter:
if element in opts:
filter_list.insert(0, next(cmd_iter))
else:
new_argv.append(element)
# update argv if changes
if filter_list:
new_argv.extend([opts[0], ",".join(filter_list)])
self._argv = new_argv.copy()
def _clear(self) -> None:
"""Clear member variable after close."""
self._argv = None
self._proc = None
async def open(
self,
cmd: List[str],
input_source: Optional[str],
output: Optional[str] = "-",
extra_cmd: Optional[str] = None,
stdout_pipe: bool = True,
stderr_pipe: bool = False,
) -> bool:
"""Start a ffmpeg instance and pipe output."""
stdout = subprocess.PIPE if stdout_pipe else subprocess.DEVNULL
stderr = subprocess.PIPE if stderr_pipe else subprocess.DEVNULL
if self.is_running:
_LOGGER.warning("FFmpeg is already running!")
return True
# set command line
self._generate_ffmpeg_cmd(cmd, input_source, output, extra_cmd)
# start ffmpeg
_LOGGER.debug("Start FFmpeg with %s", str(self._argv))
try:
proc_func = functools.partial(
subprocess.Popen,
self._argv,
bufsize=0,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr,
)
self._proc = await self._loop.run_in_executor(None, proc_func)
except Exception as err: # pylint: disable=broad-except
_LOGGER.exception("FFmpeg fails %s", err)
self._clear()
return False
return self._proc is not None
async def close(self, timeout=5) -> None:
"""Stop a ffmpeg instance."""
if not self.is_running:
_LOGGER.warning("FFmpeg isn't running!")
return
# Can't use communicate because we attach the output to a streamreader
def _close():
"""Close ffmpeg."""
self._proc.stdin.write(b"q")
self._proc.wait(timeout=timeout)
# send stop to ffmpeg
try:
await self._loop.run_in_executor(None, _close)
_LOGGER.debug("Close FFmpeg process")
except (subprocess.TimeoutExpired, ValueError):
_LOGGER.warning("Timeout while waiting of FFmpeg")
self.kill()
finally:
self._clear()
async def get_reader(self, source=FFMPEG_STDOUT) -> asyncio.StreamReader:
"""Create and return streamreader."""
reader = asyncio.StreamReader(loop=self._loop)
reader_protocol = asyncio.StreamReaderProtocol(reader)
# Attach stream
if source == FFMPEG_STDOUT:
await self._loop.connect_read_pipe(
lambda: reader_protocol, self._proc.stdout
)
else:
await self._loop.connect_read_pipe(
lambda: reader_protocol, self._proc.stderr
)
# Start reader
return reader
|
pvizeli/ha-ffmpeg | haffmpeg/core.py | HAFFmpeg.get_reader | python | async def get_reader(self, source=FFMPEG_STDOUT) -> asyncio.StreamReader:
reader = asyncio.StreamReader(loop=self._loop)
reader_protocol = asyncio.StreamReaderProtocol(reader)
# Attach stream
if source == FFMPEG_STDOUT:
await self._loop.connect_read_pipe(
lambda: reader_protocol, self._proc.stdout
)
else:
await self._loop.connect_read_pipe(
lambda: reader_protocol, self._proc.stderr
)
# Start reader
return reader | Create and return streamreader. | train | https://github.com/pvizeli/ha-ffmpeg/blob/fce1d4b77e76b9cb07d814bcb858b89657e1f32b/haffmpeg/core.py#L173-L189 | null | class HAFFmpeg:
"""HA FFmpeg process async.
Object is iterable or use the process property to call from Popen object.
"""
def __init__(self, ffmpeg_bin: str, loop: asyncio.BaseEventLoop):
"""Base initialize."""
self._loop = loop
self._ffmpeg = ffmpeg_bin
self._argv = None
self._proc = None
@property
def process(self) -> subprocess.Popen:
"""Return a Popen object or None of not running."""
return self._proc
@property
def is_running(self) -> bool:
"""Return True if ffmpeg is running."""
if self._proc is None or self._proc.returncode is not None:
return False
return True
def _generate_ffmpeg_cmd(
self,
cmd: List[str],
input_source: Optional[str],
output: Optional[str],
extra_cmd: Optional[str] = None,
) -> None:
"""Generate ffmpeg command line."""
self._argv = [self._ffmpeg]
# start command init
if input_source is not None:
self._put_input(input_source)
self._argv.extend(cmd)
# exists a extra cmd from customer
if extra_cmd is not None:
self._argv.extend(shlex.split(extra_cmd))
self._merge_filters()
self._put_output(output)
def _put_input(self, input_source: str) -> None:
"""Put input string to ffmpeg command."""
input_cmd = shlex.split(str(input_source))
if len(input_cmd) > 1:
self._argv.extend(input_cmd)
else:
self._argv.extend(["-i", input_source])
def _put_output(self, output: Optional[str]) -> None:
"""Put output string to ffmpeg command."""
if output is None:
self._argv.extend(["-f", "null", "-"])
return
output_cmd = shlex.split(str(output))
if len(output_cmd) > 1:
self._argv.extend(output_cmd)
else:
self._argv.append(output)
def _merge_filters(self) -> None:
"""Merge all filter config in command line."""
for opts in (["-filter:a", "-af"], ["-filter:v", "-vf"]):
filter_list = []
new_argv = []
cmd_iter = iter(self._argv)
for element in cmd_iter:
if element in opts:
filter_list.insert(0, next(cmd_iter))
else:
new_argv.append(element)
# update argv if changes
if filter_list:
new_argv.extend([opts[0], ",".join(filter_list)])
self._argv = new_argv.copy()
def _clear(self) -> None:
"""Clear member variable after close."""
self._argv = None
self._proc = None
async def open(
self,
cmd: List[str],
input_source: Optional[str],
output: Optional[str] = "-",
extra_cmd: Optional[str] = None,
stdout_pipe: bool = True,
stderr_pipe: bool = False,
) -> bool:
"""Start a ffmpeg instance and pipe output."""
stdout = subprocess.PIPE if stdout_pipe else subprocess.DEVNULL
stderr = subprocess.PIPE if stderr_pipe else subprocess.DEVNULL
if self.is_running:
_LOGGER.warning("FFmpeg is already running!")
return True
# set command line
self._generate_ffmpeg_cmd(cmd, input_source, output, extra_cmd)
# start ffmpeg
_LOGGER.debug("Start FFmpeg with %s", str(self._argv))
try:
proc_func = functools.partial(
subprocess.Popen,
self._argv,
bufsize=0,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr,
)
self._proc = await self._loop.run_in_executor(None, proc_func)
except Exception as err: # pylint: disable=broad-except
_LOGGER.exception("FFmpeg fails %s", err)
self._clear()
return False
return self._proc is not None
async def close(self, timeout=5) -> None:
"""Stop a ffmpeg instance."""
if not self.is_running:
_LOGGER.warning("FFmpeg isn't running!")
return
# Can't use communicate because we attach the output to a streamreader
def _close():
"""Close ffmpeg."""
self._proc.stdin.write(b"q")
self._proc.wait(timeout=timeout)
# send stop to ffmpeg
try:
await self._loop.run_in_executor(None, _close)
_LOGGER.debug("Close FFmpeg process")
except (subprocess.TimeoutExpired, ValueError):
_LOGGER.warning("Timeout while waiting of FFmpeg")
self.kill()
finally:
self._clear()
def kill(self) -> None:
"""Kill ffmpeg job."""
self._proc.kill()
self._loop.run_in_executor(None, self._proc.communicate)
|
pvizeli/ha-ffmpeg | haffmpeg/core.py | HAFFmpegWorker.close | python | def close(self, timeout: int = 5) -> None:
if self._read_task is not None and not self._read_task.cancelled():
self._read_task.cancel()
return super().close(timeout) | Stop a ffmpeg instance.
Return a coroutine | train | https://github.com/pvizeli/ha-ffmpeg/blob/fce1d4b77e76b9cb07d814bcb858b89657e1f32b/haffmpeg/core.py#L203-L211 | [
"async def close(self, timeout=5) -> None:\n \"\"\"Stop a ffmpeg instance.\"\"\"\n if not self.is_running:\n _LOGGER.warning(\"FFmpeg isn't running!\")\n return\n\n # Can't use communicate because we attach the output to a streamreader\n def _close():\n \"\"\"Close ffmpeg.\"\"\"\n self._proc.stdin.write(b\"q\")\n self._proc.wait(timeout=timeout)\n\n # send stop to ffmpeg\n try:\n await self._loop.run_in_executor(None, _close)\n _LOGGER.debug(\"Close FFmpeg process\")\n\n except (subprocess.TimeoutExpired, ValueError):\n _LOGGER.warning(\"Timeout while waiting of FFmpeg\")\n self.kill()\n\n finally:\n self._clear()\n"
] | class HAFFmpegWorker(HAFFmpeg):
"""Read FFmpeg output to que."""
def __init__(self, ffmpeg_bin: str, loop: asyncio.BaseEventLoop):
"""Init noise sensor."""
super().__init__(ffmpeg_bin, loop)
self._que = asyncio.Queue(loop=loop)
self._input = None
self._read_task = None
async def _process_lines(self, pattern: Optional[str] = None) -> None:
"""Read line from pipe they match with pattern."""
if pattern is not None:
cmp = re.compile(pattern)
_LOGGER.debug("Start working with pattern '%s'.", pattern)
# read lines
while self.is_running:
try:
line = await self._input.readline()
if not line:
break
line = line.decode()
except Exception: # pylint: disable=broad-except
break
match = True if pattern is None else cmp.search(line)
if match:
_LOGGER.debug("Process: %s", line)
await self._que.put(line)
try:
await self._loop.run_in_executor(None, self._proc.wait)
finally:
await self._que.put(None)
_LOGGER.debug("Close read ffmpeg output.")
async def _worker_process(self) -> None:
"""Process output line."""
raise NotImplementedError()
async def start_worker(
self,
cmd: List[str],
input_source: str,
output: Optional[str] = None,
extra_cmd: Optional[str] = None,
pattern: Optional[str] = None,
reading: str = FFMPEG_STDERR,
) -> None:
"""Start ffmpeg do process data from output."""
if self.is_running:
_LOGGER.warning("Can't start worker. It is allready running!")
return
if reading == FFMPEG_STDERR:
stdout = False
stderr = True
else:
stdout = True
stderr = False
# start ffmpeg and reading to queue
await self.open(
cmd=cmd,
input_source=input_source,
output=output,
extra_cmd=extra_cmd,
stdout_pipe=stdout,
stderr_pipe=stderr,
)
self._input = await self.get_reader(reading)
# start background processing
self._read_task = self._loop.create_task(self._process_lines(pattern))
self._loop.create_task(self._worker_process())
|
pvizeli/ha-ffmpeg | haffmpeg/core.py | HAFFmpegWorker._process_lines | python | async def _process_lines(self, pattern: Optional[str] = None) -> None:
if pattern is not None:
cmp = re.compile(pattern)
_LOGGER.debug("Start working with pattern '%s'.", pattern)
# read lines
while self.is_running:
try:
line = await self._input.readline()
if not line:
break
line = line.decode()
except Exception: # pylint: disable=broad-except
break
match = True if pattern is None else cmp.search(line)
if match:
_LOGGER.debug("Process: %s", line)
await self._que.put(line)
try:
await self._loop.run_in_executor(None, self._proc.wait)
finally:
await self._que.put(None)
_LOGGER.debug("Close read ffmpeg output.") | Read line from pipe they match with pattern. | train | https://github.com/pvizeli/ha-ffmpeg/blob/fce1d4b77e76b9cb07d814bcb858b89657e1f32b/haffmpeg/core.py#L213-L239 | null | class HAFFmpegWorker(HAFFmpeg):
"""Read FFmpeg output to que."""
def __init__(self, ffmpeg_bin: str, loop: asyncio.BaseEventLoop):
"""Init noise sensor."""
super().__init__(ffmpeg_bin, loop)
self._que = asyncio.Queue(loop=loop)
self._input = None
self._read_task = None
def close(self, timeout: int = 5) -> None:
"""Stop a ffmpeg instance.
Return a coroutine
"""
if self._read_task is not None and not self._read_task.cancelled():
self._read_task.cancel()
return super().close(timeout)
async def _worker_process(self) -> None:
"""Process output line."""
raise NotImplementedError()
async def start_worker(
self,
cmd: List[str],
input_source: str,
output: Optional[str] = None,
extra_cmd: Optional[str] = None,
pattern: Optional[str] = None,
reading: str = FFMPEG_STDERR,
) -> None:
"""Start ffmpeg do process data from output."""
if self.is_running:
_LOGGER.warning("Can't start worker. It is allready running!")
return
if reading == FFMPEG_STDERR:
stdout = False
stderr = True
else:
stdout = True
stderr = False
# start ffmpeg and reading to queue
await self.open(
cmd=cmd,
input_source=input_source,
output=output,
extra_cmd=extra_cmd,
stdout_pipe=stdout,
stderr_pipe=stderr,
)
self._input = await self.get_reader(reading)
# start background processing
self._read_task = self._loop.create_task(self._process_lines(pattern))
self._loop.create_task(self._worker_process())
|
pvizeli/ha-ffmpeg | haffmpeg/core.py | HAFFmpegWorker.start_worker | python | async def start_worker(
self,
cmd: List[str],
input_source: str,
output: Optional[str] = None,
extra_cmd: Optional[str] = None,
pattern: Optional[str] = None,
reading: str = FFMPEG_STDERR,
) -> None:
if self.is_running:
_LOGGER.warning("Can't start worker. It is allready running!")
return
if reading == FFMPEG_STDERR:
stdout = False
stderr = True
else:
stdout = True
stderr = False
# start ffmpeg and reading to queue
await self.open(
cmd=cmd,
input_source=input_source,
output=output,
extra_cmd=extra_cmd,
stdout_pipe=stdout,
stderr_pipe=stderr,
)
self._input = await self.get_reader(reading)
# start background processing
self._read_task = self._loop.create_task(self._process_lines(pattern))
self._loop.create_task(self._worker_process()) | Start ffmpeg do process data from output. | train | https://github.com/pvizeli/ha-ffmpeg/blob/fce1d4b77e76b9cb07d814bcb858b89657e1f32b/haffmpeg/core.py#L245-L280 | [
"async def open(\n self,\n cmd: List[str],\n input_source: Optional[str],\n output: Optional[str] = \"-\",\n extra_cmd: Optional[str] = None,\n stdout_pipe: bool = True,\n stderr_pipe: bool = False,\n) -> bool:\n \"\"\"Start a ffmpeg instance and pipe output.\"\"\"\n stdout = subprocess.PIPE if stdout_pipe else subprocess.DEVNULL\n stderr = subprocess.PIPE if stderr_pipe else subprocess.DEVNULL\n\n if self.is_running:\n _LOGGER.warning(\"FFmpeg is already running!\")\n return True\n\n # set command line\n self._generate_ffmpeg_cmd(cmd, input_source, output, extra_cmd)\n\n # start ffmpeg\n _LOGGER.debug(\"Start FFmpeg with %s\", str(self._argv))\n try:\n proc_func = functools.partial(\n subprocess.Popen,\n self._argv,\n bufsize=0,\n stdin=subprocess.PIPE,\n stdout=stdout,\n stderr=stderr,\n )\n self._proc = await self._loop.run_in_executor(None, proc_func)\n except Exception as err: # pylint: disable=broad-except\n _LOGGER.exception(\"FFmpeg fails %s\", err)\n self._clear()\n return False\n\n return self._proc is not None\n",
"async def get_reader(self, source=FFMPEG_STDOUT) -> asyncio.StreamReader:\n \"\"\"Create and return streamreader.\"\"\"\n reader = asyncio.StreamReader(loop=self._loop)\n reader_protocol = asyncio.StreamReaderProtocol(reader)\n\n # Attach stream\n if source == FFMPEG_STDOUT:\n await self._loop.connect_read_pipe(\n lambda: reader_protocol, self._proc.stdout\n )\n else:\n await self._loop.connect_read_pipe(\n lambda: reader_protocol, self._proc.stderr\n )\n\n # Start reader\n return reader\n",
"async def _process_lines(self, pattern: Optional[str] = None) -> None:\n \"\"\"Read line from pipe they match with pattern.\"\"\"\n if pattern is not None:\n cmp = re.compile(pattern)\n\n _LOGGER.debug(\"Start working with pattern '%s'.\", pattern)\n\n # read lines\n while self.is_running:\n try:\n line = await self._input.readline()\n if not line:\n break\n line = line.decode()\n except Exception: # pylint: disable=broad-except\n break\n\n match = True if pattern is None else cmp.search(line)\n if match:\n _LOGGER.debug(\"Process: %s\", line)\n await self._que.put(line)\n\n try:\n await self._loop.run_in_executor(None, self._proc.wait)\n finally:\n await self._que.put(None)\n _LOGGER.debug(\"Close read ffmpeg output.\")\n",
"async def _worker_process(self) -> None:\n \"\"\"Process output line.\"\"\"\n raise NotImplementedError()\n",
"async def _worker_process(self) -> None:\n \"\"\"This function processing data.\"\"\"\n state = self.STATE_DETECT\n timeout = self._time_duration\n\n self._loop.call_soon(self._callback, False)\n\n re_start = re.compile(\"silence_start\")\n re_end = re.compile(\"silence_end\")\n\n # process queue data\n while True:\n try:\n _LOGGER.debug(\"Reading State: %d, timeout: %s\", state, timeout)\n with async_timeout.timeout(timeout, loop=self._loop):\n data = await self._que.get()\n timeout = None\n if data is None:\n self._loop.call_soon(self._callback, None)\n return\n except asyncio.TimeoutError:\n _LOGGER.debug(\"Blocking timeout\")\n # noise\n if state == self.STATE_DETECT:\n # noise detected\n self._loop.call_soon(self._callback, True)\n state = self.STATE_NOISE\n\n elif state == self.STATE_END:\n # no noise\n self._loop.call_soon(self._callback, False)\n state = self.STATE_NONE\n\n timeout = None\n continue\n\n if re_start.search(data):\n if state == self.STATE_NOISE:\n # stop noise detection\n state = self.STATE_END\n timeout = self._time_reset\n elif state == self.STATE_DETECT:\n # reset if only a peak\n state = self.STATE_NONE\n continue\n\n if re_end.search(data):\n if state == self.STATE_NONE:\n # detect noise begin\n state = self.STATE_DETECT\n timeout = self._time_duration\n elif state == self.STATE_END:\n # back to noise status\n state = self.STATE_NOISE\n continue\n\n _LOGGER.warning(\"Unknown data from queue!\")\n",
"async def _worker_process(self) -> None:\n \"\"\"This function processing data.\"\"\"\n state = self.STATE_NONE\n timeout = None\n\n self._loop.call_soon(self._callback, False)\n\n # for repeat feature\n re_frame = 0\n re_time = 0\n\n re_data = re.compile(self.MATCH)\n\n # process queue data\n while True:\n try:\n _LOGGER.debug(\"Reading State: %d, timeout: %s\", state, timeout)\n with async_timeout.timeout(timeout, loop=self._loop):\n data = await self._que.get()\n if data is None:\n self._loop.call_soon(self._callback, None)\n return\n except asyncio.TimeoutError:\n _LOGGER.debug(\"Blocking timeout\")\n # reset motion detection\n if state == self.STATE_MOTION:\n state = self.STATE_NONE\n self._loop.call_soon(self._callback, False)\n timeout = None\n # reset repeate state\n if state == self.STATE_REPEAT:\n state = self.STATE_NONE\n timeout = None\n continue\n\n frames = re_data.search(data)\n if frames:\n # repeat not used\n if self._repeat == 0 and state == self.STATE_NONE:\n state = self.STATE_MOTION\n self._loop.call_soon(self._callback, True)\n timeout = self._time_reset\n\n # repeat feature is on / first motion\n if state == self.STATE_NONE:\n state = self.STATE_REPEAT\n timeout = self._time_repeat\n re_frame = 0\n re_time = time()\n\n elif state == self.STATE_REPEAT:\n re_frame += 1\n\n # REPEAT ready?\n if re_frame >= self._repeat:\n state = self.STATE_MOTION\n self._loop.call_soon(self._callback, True)\n timeout = self._time_reset\n else:\n past = time() - re_time\n timeout -= past\n\n # REPEAT time down\n if timeout <= 0:\n _LOGGER.debug(\"Reset repeat to none\")\n state = self.STATE_NONE\n timeout = None\n\n continue\n\n _LOGGER.warning(\"Unknown data from queue!\")\n"
] | class HAFFmpegWorker(HAFFmpeg):
"""Read FFmpeg output to que."""
def __init__(self, ffmpeg_bin: str, loop: asyncio.BaseEventLoop):
"""Init noise sensor."""
super().__init__(ffmpeg_bin, loop)
self._que = asyncio.Queue(loop=loop)
self._input = None
self._read_task = None
def close(self, timeout: int = 5) -> None:
"""Stop a ffmpeg instance.
Return a coroutine
"""
if self._read_task is not None and not self._read_task.cancelled():
self._read_task.cancel()
return super().close(timeout)
async def _process_lines(self, pattern: Optional[str] = None) -> None:
"""Read line from pipe they match with pattern."""
if pattern is not None:
cmp = re.compile(pattern)
_LOGGER.debug("Start working with pattern '%s'.", pattern)
# read lines
while self.is_running:
try:
line = await self._input.readline()
if not line:
break
line = line.decode()
except Exception: # pylint: disable=broad-except
break
match = True if pattern is None else cmp.search(line)
if match:
_LOGGER.debug("Process: %s", line)
await self._que.put(line)
try:
await self._loop.run_in_executor(None, self._proc.wait)
finally:
await self._que.put(None)
_LOGGER.debug("Close read ffmpeg output.")
async def _worker_process(self) -> None:
"""Process output line."""
raise NotImplementedError()
|
pvizeli/ha-ffmpeg | haffmpeg/sensor.py | SensorNoise.set_options | python | def set_options(
self, time_duration: int = 1, time_reset: int = 2, peak: int = -30
) -> None:
self._time_duration = time_duration
self._time_reset = time_reset
self._peak = peak | Set option parameter for noise sensor. | train | https://github.com/pvizeli/ha-ffmpeg/blob/fce1d4b77e76b9cb07d814bcb858b89657e1f32b/haffmpeg/sensor.py#L34-L40 | null | class SensorNoise(HAFFmpegWorker):
"""Implement a noise detection on a autio stream."""
STATE_NONE = 0
STATE_NOISE = 1
STATE_END = 2
STATE_DETECT = 3
def __init__(
self, ffmpeg_bin: str, loop: asyncio.BaseEventLoop, callback: Callable
):
"""Init noise sensor."""
super().__init__(ffmpeg_bin, loop)
self._callback = callback
self._peak = -30
self._time_duration = 1
self._time_reset = 2
def open_sensor(
self,
input_source: str,
output_dest: Optional[str] = None,
extra_cmd: Optional[str] = None,
) -> Coroutine:
"""Open FFmpeg process for read autio stream.
Return a coroutine.
"""
command = ["-vn", "-filter:a", "silencedetect=n={}dB:d=1".format(self._peak)]
# run ffmpeg, read output
return self.start_worker(
cmd=command,
input_source=input_source,
output=output_dest,
extra_cmd=extra_cmd,
pattern="silence",
)
async def _worker_process(self) -> None:
"""This function processing data."""
state = self.STATE_DETECT
timeout = self._time_duration
self._loop.call_soon(self._callback, False)
re_start = re.compile("silence_start")
re_end = re.compile("silence_end")
# process queue data
while True:
try:
_LOGGER.debug("Reading State: %d, timeout: %s", state, timeout)
with async_timeout.timeout(timeout, loop=self._loop):
data = await self._que.get()
timeout = None
if data is None:
self._loop.call_soon(self._callback, None)
return
except asyncio.TimeoutError:
_LOGGER.debug("Blocking timeout")
# noise
if state == self.STATE_DETECT:
# noise detected
self._loop.call_soon(self._callback, True)
state = self.STATE_NOISE
elif state == self.STATE_END:
# no noise
self._loop.call_soon(self._callback, False)
state = self.STATE_NONE
timeout = None
continue
if re_start.search(data):
if state == self.STATE_NOISE:
# stop noise detection
state = self.STATE_END
timeout = self._time_reset
elif state == self.STATE_DETECT:
# reset if only a peak
state = self.STATE_NONE
continue
if re_end.search(data):
if state == self.STATE_NONE:
# detect noise begin
state = self.STATE_DETECT
timeout = self._time_duration
elif state == self.STATE_END:
# back to noise status
state = self.STATE_NOISE
continue
_LOGGER.warning("Unknown data from queue!")
|
pvizeli/ha-ffmpeg | haffmpeg/sensor.py | SensorNoise.open_sensor | python | def open_sensor(
self,
input_source: str,
output_dest: Optional[str] = None,
extra_cmd: Optional[str] = None,
) -> Coroutine:
command = ["-vn", "-filter:a", "silencedetect=n={}dB:d=1".format(self._peak)]
# run ffmpeg, read output
return self.start_worker(
cmd=command,
input_source=input_source,
output=output_dest,
extra_cmd=extra_cmd,
pattern="silence",
) | Open FFmpeg process for read autio stream.
Return a coroutine. | train | https://github.com/pvizeli/ha-ffmpeg/blob/fce1d4b77e76b9cb07d814bcb858b89657e1f32b/haffmpeg/sensor.py#L42-L61 | [
"async def start_worker(\n self,\n cmd: List[str],\n input_source: str,\n output: Optional[str] = None,\n extra_cmd: Optional[str] = None,\n pattern: Optional[str] = None,\n reading: str = FFMPEG_STDERR,\n) -> None:\n \"\"\"Start ffmpeg do process data from output.\"\"\"\n if self.is_running:\n _LOGGER.warning(\"Can't start worker. It is allready running!\")\n return\n\n if reading == FFMPEG_STDERR:\n stdout = False\n stderr = True\n else:\n stdout = True\n stderr = False\n\n # start ffmpeg and reading to queue\n await self.open(\n cmd=cmd,\n input_source=input_source,\n output=output,\n extra_cmd=extra_cmd,\n stdout_pipe=stdout,\n stderr_pipe=stderr,\n )\n\n self._input = await self.get_reader(reading)\n\n # start background processing\n self._read_task = self._loop.create_task(self._process_lines(pattern))\n self._loop.create_task(self._worker_process())\n"
] | class SensorNoise(HAFFmpegWorker):
"""Implement a noise detection on a autio stream."""
STATE_NONE = 0
STATE_NOISE = 1
STATE_END = 2
STATE_DETECT = 3
def __init__(
self, ffmpeg_bin: str, loop: asyncio.BaseEventLoop, callback: Callable
):
"""Init noise sensor."""
super().__init__(ffmpeg_bin, loop)
self._callback = callback
self._peak = -30
self._time_duration = 1
self._time_reset = 2
def set_options(
self, time_duration: int = 1, time_reset: int = 2, peak: int = -30
) -> None:
"""Set option parameter for noise sensor."""
self._time_duration = time_duration
self._time_reset = time_reset
self._peak = peak
async def _worker_process(self) -> None:
"""This function processing data."""
state = self.STATE_DETECT
timeout = self._time_duration
self._loop.call_soon(self._callback, False)
re_start = re.compile("silence_start")
re_end = re.compile("silence_end")
# process queue data
while True:
try:
_LOGGER.debug("Reading State: %d, timeout: %s", state, timeout)
with async_timeout.timeout(timeout, loop=self._loop):
data = await self._que.get()
timeout = None
if data is None:
self._loop.call_soon(self._callback, None)
return
except asyncio.TimeoutError:
_LOGGER.debug("Blocking timeout")
# noise
if state == self.STATE_DETECT:
# noise detected
self._loop.call_soon(self._callback, True)
state = self.STATE_NOISE
elif state == self.STATE_END:
# no noise
self._loop.call_soon(self._callback, False)
state = self.STATE_NONE
timeout = None
continue
if re_start.search(data):
if state == self.STATE_NOISE:
# stop noise detection
state = self.STATE_END
timeout = self._time_reset
elif state == self.STATE_DETECT:
# reset if only a peak
state = self.STATE_NONE
continue
if re_end.search(data):
if state == self.STATE_NONE:
# detect noise begin
state = self.STATE_DETECT
timeout = self._time_duration
elif state == self.STATE_END:
# back to noise status
state = self.STATE_NOISE
continue
_LOGGER.warning("Unknown data from queue!")
|
pvizeli/ha-ffmpeg | haffmpeg/sensor.py | SensorNoise._worker_process | python | async def _worker_process(self) -> None:
state = self.STATE_DETECT
timeout = self._time_duration
self._loop.call_soon(self._callback, False)
re_start = re.compile("silence_start")
re_end = re.compile("silence_end")
# process queue data
while True:
try:
_LOGGER.debug("Reading State: %d, timeout: %s", state, timeout)
with async_timeout.timeout(timeout, loop=self._loop):
data = await self._que.get()
timeout = None
if data is None:
self._loop.call_soon(self._callback, None)
return
except asyncio.TimeoutError:
_LOGGER.debug("Blocking timeout")
# noise
if state == self.STATE_DETECT:
# noise detected
self._loop.call_soon(self._callback, True)
state = self.STATE_NOISE
elif state == self.STATE_END:
# no noise
self._loop.call_soon(self._callback, False)
state = self.STATE_NONE
timeout = None
continue
if re_start.search(data):
if state == self.STATE_NOISE:
# stop noise detection
state = self.STATE_END
timeout = self._time_reset
elif state == self.STATE_DETECT:
# reset if only a peak
state = self.STATE_NONE
continue
if re_end.search(data):
if state == self.STATE_NONE:
# detect noise begin
state = self.STATE_DETECT
timeout = self._time_duration
elif state == self.STATE_END:
# back to noise status
state = self.STATE_NOISE
continue
_LOGGER.warning("Unknown data from queue!") | This function processing data. | train | https://github.com/pvizeli/ha-ffmpeg/blob/fce1d4b77e76b9cb07d814bcb858b89657e1f32b/haffmpeg/sensor.py#L63-L119 | null | class SensorNoise(HAFFmpegWorker):
"""Implement a noise detection on a autio stream."""
STATE_NONE = 0
STATE_NOISE = 1
STATE_END = 2
STATE_DETECT = 3
def __init__(
self, ffmpeg_bin: str, loop: asyncio.BaseEventLoop, callback: Callable
):
"""Init noise sensor."""
super().__init__(ffmpeg_bin, loop)
self._callback = callback
self._peak = -30
self._time_duration = 1
self._time_reset = 2
def set_options(
self, time_duration: int = 1, time_reset: int = 2, peak: int = -30
) -> None:
"""Set option parameter for noise sensor."""
self._time_duration = time_duration
self._time_reset = time_reset
self._peak = peak
def open_sensor(
self,
input_source: str,
output_dest: Optional[str] = None,
extra_cmd: Optional[str] = None,
) -> Coroutine:
"""Open FFmpeg process for read autio stream.
Return a coroutine.
"""
command = ["-vn", "-filter:a", "silencedetect=n={}dB:d=1".format(self._peak)]
# run ffmpeg, read output
return self.start_worker(
cmd=command,
input_source=input_source,
output=output_dest,
extra_cmd=extra_cmd,
pattern="silence",
)
|
pvizeli/ha-ffmpeg | haffmpeg/sensor.py | SensorMotion.set_options | python | def set_options(
self,
time_reset: int = 60,
time_repeat: int = 0,
repeat: int = 0,
changes: int = 10,
) -> None:
self._time_reset = time_reset
self._time_repeat = time_repeat
self._repeat = repeat
self._changes = changes | Set option parameter for noise sensor. | train | https://github.com/pvizeli/ha-ffmpeg/blob/fce1d4b77e76b9cb07d814bcb858b89657e1f32b/haffmpeg/sensor.py#L143-L154 | null | class SensorMotion(HAFFmpegWorker):
"""Implement motion detection with ffmpeg scene detection."""
STATE_NONE = 0
STATE_REPEAT = 1
STATE_MOTION = 2
MATCH = r"\d,.*\d,.*\d,.*\d,.*\d,.*\w"
def __init__(
self, ffmpeg_bin: str, loop: asyncio.BaseEventLoop, callback: Callable
):
"""Init motion sensor."""
super().__init__(ffmpeg_bin, loop)
self._callback = callback
self._changes = 10
self._time_reset = 60
self._time_repeat = 0
self._repeat = 0
def open_sensor(
self, input_source: str, extra_cmd: Optional[str] = None
) -> Coroutine:
"""Open FFmpeg process a video stream for motion detection.
Return a coroutine.
"""
command = [
"-an",
"-filter:v",
"select=gt(scene\\,{0})".format(self._changes / 100),
]
# run ffmpeg, read output
return self.start_worker(
cmd=command,
input_source=input_source,
output="-f framemd5 -",
extra_cmd=extra_cmd,
pattern=self.MATCH,
reading=FFMPEG_STDOUT,
)
async def _worker_process(self) -> None:
"""This function processing data."""
state = self.STATE_NONE
timeout = None
self._loop.call_soon(self._callback, False)
# for repeat feature
re_frame = 0
re_time = 0
re_data = re.compile(self.MATCH)
# process queue data
while True:
try:
_LOGGER.debug("Reading State: %d, timeout: %s", state, timeout)
with async_timeout.timeout(timeout, loop=self._loop):
data = await self._que.get()
if data is None:
self._loop.call_soon(self._callback, None)
return
except asyncio.TimeoutError:
_LOGGER.debug("Blocking timeout")
# reset motion detection
if state == self.STATE_MOTION:
state = self.STATE_NONE
self._loop.call_soon(self._callback, False)
timeout = None
# reset repeate state
if state == self.STATE_REPEAT:
state = self.STATE_NONE
timeout = None
continue
frames = re_data.search(data)
if frames:
# repeat not used
if self._repeat == 0 and state == self.STATE_NONE:
state = self.STATE_MOTION
self._loop.call_soon(self._callback, True)
timeout = self._time_reset
# repeat feature is on / first motion
if state == self.STATE_NONE:
state = self.STATE_REPEAT
timeout = self._time_repeat
re_frame = 0
re_time = time()
elif state == self.STATE_REPEAT:
re_frame += 1
# REPEAT ready?
if re_frame >= self._repeat:
state = self.STATE_MOTION
self._loop.call_soon(self._callback, True)
timeout = self._time_reset
else:
past = time() - re_time
timeout -= past
# REPEAT time down
if timeout <= 0:
_LOGGER.debug("Reset repeat to none")
state = self.STATE_NONE
timeout = None
continue
_LOGGER.warning("Unknown data from queue!")
|
pvizeli/ha-ffmpeg | haffmpeg/sensor.py | SensorMotion.open_sensor | python | def open_sensor(
self, input_source: str, extra_cmd: Optional[str] = None
) -> Coroutine:
command = [
"-an",
"-filter:v",
"select=gt(scene\\,{0})".format(self._changes / 100),
]
# run ffmpeg, read output
return self.start_worker(
cmd=command,
input_source=input_source,
output="-f framemd5 -",
extra_cmd=extra_cmd,
pattern=self.MATCH,
reading=FFMPEG_STDOUT,
) | Open FFmpeg process a video stream for motion detection.
Return a coroutine. | train | https://github.com/pvizeli/ha-ffmpeg/blob/fce1d4b77e76b9cb07d814bcb858b89657e1f32b/haffmpeg/sensor.py#L156-L177 | [
"async def start_worker(\n self,\n cmd: List[str],\n input_source: str,\n output: Optional[str] = None,\n extra_cmd: Optional[str] = None,\n pattern: Optional[str] = None,\n reading: str = FFMPEG_STDERR,\n) -> None:\n \"\"\"Start ffmpeg do process data from output.\"\"\"\n if self.is_running:\n _LOGGER.warning(\"Can't start worker. It is allready running!\")\n return\n\n if reading == FFMPEG_STDERR:\n stdout = False\n stderr = True\n else:\n stdout = True\n stderr = False\n\n # start ffmpeg and reading to queue\n await self.open(\n cmd=cmd,\n input_source=input_source,\n output=output,\n extra_cmd=extra_cmd,\n stdout_pipe=stdout,\n stderr_pipe=stderr,\n )\n\n self._input = await self.get_reader(reading)\n\n # start background processing\n self._read_task = self._loop.create_task(self._process_lines(pattern))\n self._loop.create_task(self._worker_process())\n"
] | class SensorMotion(HAFFmpegWorker):
"""Implement motion detection with ffmpeg scene detection."""
STATE_NONE = 0
STATE_REPEAT = 1
STATE_MOTION = 2
MATCH = r"\d,.*\d,.*\d,.*\d,.*\d,.*\w"
def __init__(
self, ffmpeg_bin: str, loop: asyncio.BaseEventLoop, callback: Callable
):
"""Init motion sensor."""
super().__init__(ffmpeg_bin, loop)
self._callback = callback
self._changes = 10
self._time_reset = 60
self._time_repeat = 0
self._repeat = 0
def set_options(
self,
time_reset: int = 60,
time_repeat: int = 0,
repeat: int = 0,
changes: int = 10,
) -> None:
"""Set option parameter for noise sensor."""
self._time_reset = time_reset
self._time_repeat = time_repeat
self._repeat = repeat
self._changes = changes
async def _worker_process(self) -> None:
"""This function processing data."""
state = self.STATE_NONE
timeout = None
self._loop.call_soon(self._callback, False)
# for repeat feature
re_frame = 0
re_time = 0
re_data = re.compile(self.MATCH)
# process queue data
while True:
try:
_LOGGER.debug("Reading State: %d, timeout: %s", state, timeout)
with async_timeout.timeout(timeout, loop=self._loop):
data = await self._que.get()
if data is None:
self._loop.call_soon(self._callback, None)
return
except asyncio.TimeoutError:
_LOGGER.debug("Blocking timeout")
# reset motion detection
if state == self.STATE_MOTION:
state = self.STATE_NONE
self._loop.call_soon(self._callback, False)
timeout = None
# reset repeate state
if state == self.STATE_REPEAT:
state = self.STATE_NONE
timeout = None
continue
frames = re_data.search(data)
if frames:
# repeat not used
if self._repeat == 0 and state == self.STATE_NONE:
state = self.STATE_MOTION
self._loop.call_soon(self._callback, True)
timeout = self._time_reset
# repeat feature is on / first motion
if state == self.STATE_NONE:
state = self.STATE_REPEAT
timeout = self._time_repeat
re_frame = 0
re_time = time()
elif state == self.STATE_REPEAT:
re_frame += 1
# REPEAT ready?
if re_frame >= self._repeat:
state = self.STATE_MOTION
self._loop.call_soon(self._callback, True)
timeout = self._time_reset
else:
past = time() - re_time
timeout -= past
# REPEAT time down
if timeout <= 0:
_LOGGER.debug("Reset repeat to none")
state = self.STATE_NONE
timeout = None
continue
_LOGGER.warning("Unknown data from queue!")
|
pvizeli/ha-ffmpeg | haffmpeg/sensor.py | SensorMotion._worker_process | python | async def _worker_process(self) -> None:
state = self.STATE_NONE
timeout = None
self._loop.call_soon(self._callback, False)
# for repeat feature
re_frame = 0
re_time = 0
re_data = re.compile(self.MATCH)
# process queue data
while True:
try:
_LOGGER.debug("Reading State: %d, timeout: %s", state, timeout)
with async_timeout.timeout(timeout, loop=self._loop):
data = await self._que.get()
if data is None:
self._loop.call_soon(self._callback, None)
return
except asyncio.TimeoutError:
_LOGGER.debug("Blocking timeout")
# reset motion detection
if state == self.STATE_MOTION:
state = self.STATE_NONE
self._loop.call_soon(self._callback, False)
timeout = None
# reset repeate state
if state == self.STATE_REPEAT:
state = self.STATE_NONE
timeout = None
continue
frames = re_data.search(data)
if frames:
# repeat not used
if self._repeat == 0 and state == self.STATE_NONE:
state = self.STATE_MOTION
self._loop.call_soon(self._callback, True)
timeout = self._time_reset
# repeat feature is on / first motion
if state == self.STATE_NONE:
state = self.STATE_REPEAT
timeout = self._time_repeat
re_frame = 0
re_time = time()
elif state == self.STATE_REPEAT:
re_frame += 1
# REPEAT ready?
if re_frame >= self._repeat:
state = self.STATE_MOTION
self._loop.call_soon(self._callback, True)
timeout = self._time_reset
else:
past = time() - re_time
timeout -= past
# REPEAT time down
if timeout <= 0:
_LOGGER.debug("Reset repeat to none")
state = self.STATE_NONE
timeout = None
continue
_LOGGER.warning("Unknown data from queue!") | This function processing data. | train | https://github.com/pvizeli/ha-ffmpeg/blob/fce1d4b77e76b9cb07d814bcb858b89657e1f32b/haffmpeg/sensor.py#L179-L249 | null | class SensorMotion(HAFFmpegWorker):
"""Implement motion detection with ffmpeg scene detection."""
STATE_NONE = 0
STATE_REPEAT = 1
STATE_MOTION = 2
MATCH = r"\d,.*\d,.*\d,.*\d,.*\d,.*\w"
def __init__(
self, ffmpeg_bin: str, loop: asyncio.BaseEventLoop, callback: Callable
):
"""Init motion sensor."""
super().__init__(ffmpeg_bin, loop)
self._callback = callback
self._changes = 10
self._time_reset = 60
self._time_repeat = 0
self._repeat = 0
def set_options(
self,
time_reset: int = 60,
time_repeat: int = 0,
repeat: int = 0,
changes: int = 10,
) -> None:
"""Set option parameter for noise sensor."""
self._time_reset = time_reset
self._time_repeat = time_repeat
self._repeat = repeat
self._changes = changes
def open_sensor(
self, input_source: str, extra_cmd: Optional[str] = None
) -> Coroutine:
"""Open FFmpeg process a video stream for motion detection.
Return a coroutine.
"""
command = [
"-an",
"-filter:v",
"select=gt(scene\\,{0})".format(self._changes / 100),
]
# run ffmpeg, read output
return self.start_worker(
cmd=command,
input_source=input_source,
output="-f framemd5 -",
extra_cmd=extra_cmd,
pattern=self.MATCH,
reading=FFMPEG_STDOUT,
)
|
afilipovich/gglsbl | gglsbl/storage.py | SqliteStorage.lookup_full_hashes | python | def lookup_full_hashes(self, hash_values):
q = '''SELECT threat_type,platform_type,threat_entry_type, expires_at < current_timestamp AS has_expired
FROM full_hash WHERE value IN ({})
'''
output = []
with self.get_cursor() as dbc:
placeholders = ','.join(['?'] * len(hash_values))
dbc.execute(q.format(placeholders), [sqlite3.Binary(hv) for hv in hash_values])
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type, has_expired = h
threat_list = ThreatList(threat_type, platform_type, threat_entry_type)
output.append((threat_list, has_expired))
return output | Query DB to see if hash is blacklisted | train | https://github.com/afilipovich/gglsbl/blob/89c4665bd6487a3689ccb6b1f3e53ff85e056103/gglsbl/storage.py#L172-L185 | null | class SqliteStorage(object):
"""Storage abstraction for local GSB cache."""
schema_version = '1.1'
def __init__(self, db_path, timeout=10):
"""Constructor.
:param db_path: path to Sqlite DB file
:timeout: Sqlite lock wait timeout in seconds
"""
self.db_path = db_path
do_init_db = not os.path.isfile(db_path)
log.info('Opening SQLite DB {}'.format(db_path))
self.db = sqlite3.connect(db_path, timeout)
if do_init_db:
log.info('SQLite DB does not exist, initializing')
self.init_db()
if not self.check_schema_version():
log.warning("Cache schema is not compatible with this library version. Re-creating sqlite DB %s", db_path)
self.db.close()
os.unlink(db_path)
self.db = sqlite3.connect(db_path, timeout)
self.init_db()
self.db.cursor().execute('PRAGMA synchronous = 0')
self.db.cursor().execute('PRAGMA journal_mode = WAL')
def check_schema_version(self):
q = "SELECT value FROM metadata WHERE name='schema_version'"
v = None
with self.get_cursor() as dbc:
try:
dbc.execute(q)
v = dbc.fetchall()[0][0]
except sqlite3.OperationalError:
log.error('Can not get schema version, it is probably outdated.')
return False
self.db.rollback() # prevent dangling transaction while instance is idle after init
return v == self.schema_version
@contextlib.contextmanager
def get_cursor(self):
dbc = self.db.cursor()
try:
yield dbc
finally:
dbc.close()
def init_db(self):
self.db.cursor().execute('PRAGMA synchronous = 0')
self.db.cursor().execute('PRAGMA journal_mode = WAL')
with self.get_cursor() as dbc:
dbc.execute(
"""CREATE TABLE metadata (
name character varying(128) NOT NULL PRIMARY KEY,
value character varying(128) NOT NULL
)"""
)
dbc.execute(
"""INSERT INTO metadata (name, value) VALUES ('schema_version', '{}')""".format(self.schema_version)
)
dbc.execute(
"""CREATE TABLE threat_list (
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
client_state character varying(42),
timestamp timestamp without time zone DEFAULT current_timestamp,
PRIMARY KEY (threat_type, platform_type, threat_entry_type)
)"""
)
dbc.execute(
"""CREATE TABLE full_hash (
value BLOB NOT NULL,
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
downloaded_at timestamp without time zone DEFAULT current_timestamp,
expires_at timestamp without time zone NOT NULL DEFAULT current_timestamp,
malware_threat_type varchar(32),
PRIMARY KEY (value, threat_type, platform_type, threat_entry_type)
)"""
)
dbc.execute(
"""CREATE TABLE hash_prefix (
value BLOB NOT NULL,
cue BLOB NOT NULL,
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
timestamp timestamp without time zone DEFAULT current_timestamp,
negative_expires_at timestamp without time zone NOT NULL DEFAULT current_timestamp,
PRIMARY KEY (value, threat_type, platform_type, threat_entry_type),
FOREIGN KEY(threat_type, platform_type, threat_entry_type)
REFERENCES threat_list(threat_type, platform_type, threat_entry_type)
ON DELETE CASCADE
)
"""
)
dbc.execute(
"""CREATE INDEX idx_hash_prefix_cue ON hash_prefix (cue)"""
)
dbc.execute(
"""CREATE INDEX idx_hash_prefix_list ON hash_prefix (threat_type, platform_type, threat_entry_type)"""
)
dbc.execute(
"""CREATE INDEX idx_full_hash_expires_at ON full_hash (expires_at)"""
)
dbc.execute(
"""CREATE INDEX idx_full_hash_value ON full_hash (value)"""
)
self.db.commit()
def lookup_hash_prefix(self, cues):
"""Lookup hash prefixes by cue (first 4 bytes of hash)
Returns a tuple of (value, negative_cache_expired).
"""
q = '''SELECT value, MAX(negative_expires_at < current_timestamp) AS negative_cache_expired
FROM hash_prefix WHERE cue IN ({}) GROUP BY 1
'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q.format(','.join(['?'] * len(cues))), [sqlite3.Binary(cue) for cue in cues])
for h in dbc.fetchall():
value, negative_cache_expired = h
output.append((bytes(value), negative_cache_expired))
return output
def store_full_hash(self, threat_list, hash_value, cache_duration, malware_threat_type):
"""Store full hash found for the given hash prefix"""
log.info('Storing full hash %s to list %s with cache duration %s',
to_hex(hash_value), str(threat_list), cache_duration)
qi = '''INSERT OR IGNORE INTO full_hash
(value, threat_type, platform_type, threat_entry_type, malware_threat_type, downloaded_at)
VALUES
(?, ?, ?, ?, ?, current_timestamp)
'''
qu = "UPDATE full_hash SET expires_at=datetime(current_timestamp, '+{} SECONDS') \
WHERE value=? AND threat_type=? AND platform_type=? AND threat_entry_type=?"
i_parameters = [sqlite3.Binary(hash_value), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type, malware_threat_type]
u_parameters = [sqlite3.Binary(hash_value), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(qi, i_parameters)
dbc.execute(qu.format(int(cache_duration)), u_parameters)
def delete_hash_prefix_list(self, threat_list):
q = '''DELETE FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
'''
parameters = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, parameters)
def cleanup_full_hashes(self, keep_expired_for=(60 * 60 * 12)):
"""Remove long expired full_hash entries."""
q = '''DELETE FROM full_hash WHERE expires_at < datetime(current_timestamp, '-{} SECONDS')
'''
log.info('Cleaning up full_hash entries expired more than {} seconds ago.'.format(keep_expired_for))
with self.get_cursor() as dbc:
dbc.execute(q.format(int(keep_expired_for)))
def update_hash_prefix_expiration(self, prefix_value, negative_cache_duration):
q = """UPDATE hash_prefix SET negative_expires_at=datetime(current_timestamp, '+{} SECONDS')
WHERE value=?"""
parameters = [sqlite3.Binary(prefix_value)]
with self.get_cursor() as dbc:
dbc.execute(q.format(int(negative_cache_duration)), parameters)
def get_threat_lists(self):
"""Get a list of known threat lists."""
q = '''SELECT threat_type,platform_type,threat_entry_type FROM threat_list'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type = h
threat_list = ThreatList(threat_type, platform_type, threat_entry_type)
output.append(threat_list)
return output
def get_client_state(self):
"""Get a dict of known threat lists including clientState values."""
q = '''SELECT threat_type,platform_type,threat_entry_type,client_state FROM threat_list'''
output = {}
with self.get_cursor() as dbc:
dbc.execute(q)
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type, client_state = h
threat_list_tuple = (threat_type, platform_type, threat_entry_type)
output[threat_list_tuple] = client_state
return output
def add_threat_list(self, threat_list):
"""Add threat list entry if it does not exist."""
q = '''INSERT OR IGNORE INTO threat_list
(threat_type, platform_type, threat_entry_type, timestamp)
VALUES
(?, ?, ?, current_timestamp)
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
def delete_threat_list(self, threat_list):
"""Delete threat list entry."""
log.info('Deleting cached threat list "{}"'.format(repr(threat_list)))
q = '''DELETE FROM threat_list
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
def update_threat_list_client_state(self, threat_list, client_state):
log.info('Setting client_state in Sqlite')
q = '''UPDATE threat_list SET timestamp=current_timestamp, client_state=?
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?'''
with self.get_cursor() as dbc:
params = [client_state, threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
dbc.execute(q, params)
def hash_prefix_list_checksum(self, threat_list):
"""Returns SHA256 checksum for alphabetically-sorted concatenated list of hash prefixes"""
q = '''SELECT value FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
ORDER BY value
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
all_hashes = b''.join(bytes(h[0]) for h in dbc.fetchall())
checksum = hashlib.sha256(all_hashes).digest()
return checksum
def populate_hash_prefix_list(self, threat_list, hash_prefix_list):
log.info('Storing {} entries of hash prefix list {}'.format(len(hash_prefix_list), str(threat_list)))
q = '''INSERT INTO hash_prefix
(value, cue, threat_type, platform_type, threat_entry_type, timestamp)
VALUES
(?, ?, ?, ?, ?, current_timestamp)
'''
with self.get_cursor() as dbc:
records = [[sqlite3.Binary(prefix_value), sqlite3.Binary(prefix_value[0:4]), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type] for prefix_value in hash_prefix_list]
dbc.executemany(q, records)
def get_hash_prefix_values_to_remove(self, threat_list, indices):
log.info('Removing {} records from threat list "{}"'.format(len(indices), str(threat_list)))
indices = set(indices)
q = '''SELECT value FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
ORDER BY value
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
values_to_remove = []
with self.get_cursor() as dbc:
dbc.execute(q, params)
i = 0
for h in dbc.fetchall():
v = bytes(h[0])
if i in indices:
values_to_remove.append(v)
i += 1
return values_to_remove
def remove_hash_prefix_indices(self, threat_list, indices):
"""Remove records matching idices from a lexicographically-sorted local threat list."""
batch_size = 40
q = '''DELETE FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=? AND value IN ({})
'''
prefixes_to_remove = self.get_hash_prefix_values_to_remove(threat_list, indices)
with self.get_cursor() as dbc:
for i in range(0, len(prefixes_to_remove), batch_size):
remove_batch = prefixes_to_remove[i:(i + batch_size)]
params = [
threat_list.threat_type,
threat_list.platform_type,
threat_list.threat_entry_type
] + [sqlite3.Binary(b) for b in remove_batch]
dbc.execute(q.format(','.join(['?'] * len(remove_batch))), params)
def dump_hash_prefix_values(self):
"""Export all hash prefix values.
Returns a list of known hash prefix values
"""
q = '''SELECT distinct value from hash_prefix'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
output = [bytes(r[0]) for r in dbc.fetchall()]
return output
def rollback(self):
log.info('Rolling back DB transaction.')
self.db.rollback()
def commit(self):
self.db.commit()
|
afilipovich/gglsbl | gglsbl/storage.py | SqliteStorage.lookup_hash_prefix | python | def lookup_hash_prefix(self, cues):
q = '''SELECT value, MAX(negative_expires_at < current_timestamp) AS negative_cache_expired
FROM hash_prefix WHERE cue IN ({}) GROUP BY 1
'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q.format(','.join(['?'] * len(cues))), [sqlite3.Binary(cue) for cue in cues])
for h in dbc.fetchall():
value, negative_cache_expired = h
output.append((bytes(value), negative_cache_expired))
return output | Lookup hash prefixes by cue (first 4 bytes of hash)
Returns a tuple of (value, negative_cache_expired). | train | https://github.com/afilipovich/gglsbl/blob/89c4665bd6487a3689ccb6b1f3e53ff85e056103/gglsbl/storage.py#L187-L201 | null | class SqliteStorage(object):
"""Storage abstraction for local GSB cache."""
schema_version = '1.1'
def __init__(self, db_path, timeout=10):
"""Constructor.
:param db_path: path to Sqlite DB file
:timeout: Sqlite lock wait timeout in seconds
"""
self.db_path = db_path
do_init_db = not os.path.isfile(db_path)
log.info('Opening SQLite DB {}'.format(db_path))
self.db = sqlite3.connect(db_path, timeout)
if do_init_db:
log.info('SQLite DB does not exist, initializing')
self.init_db()
if not self.check_schema_version():
log.warning("Cache schema is not compatible with this library version. Re-creating sqlite DB %s", db_path)
self.db.close()
os.unlink(db_path)
self.db = sqlite3.connect(db_path, timeout)
self.init_db()
self.db.cursor().execute('PRAGMA synchronous = 0')
self.db.cursor().execute('PRAGMA journal_mode = WAL')
def check_schema_version(self):
q = "SELECT value FROM metadata WHERE name='schema_version'"
v = None
with self.get_cursor() as dbc:
try:
dbc.execute(q)
v = dbc.fetchall()[0][0]
except sqlite3.OperationalError:
log.error('Can not get schema version, it is probably outdated.')
return False
self.db.rollback() # prevent dangling transaction while instance is idle after init
return v == self.schema_version
@contextlib.contextmanager
def get_cursor(self):
dbc = self.db.cursor()
try:
yield dbc
finally:
dbc.close()
def init_db(self):
self.db.cursor().execute('PRAGMA synchronous = 0')
self.db.cursor().execute('PRAGMA journal_mode = WAL')
with self.get_cursor() as dbc:
dbc.execute(
"""CREATE TABLE metadata (
name character varying(128) NOT NULL PRIMARY KEY,
value character varying(128) NOT NULL
)"""
)
dbc.execute(
"""INSERT INTO metadata (name, value) VALUES ('schema_version', '{}')""".format(self.schema_version)
)
dbc.execute(
"""CREATE TABLE threat_list (
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
client_state character varying(42),
timestamp timestamp without time zone DEFAULT current_timestamp,
PRIMARY KEY (threat_type, platform_type, threat_entry_type)
)"""
)
dbc.execute(
"""CREATE TABLE full_hash (
value BLOB NOT NULL,
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
downloaded_at timestamp without time zone DEFAULT current_timestamp,
expires_at timestamp without time zone NOT NULL DEFAULT current_timestamp,
malware_threat_type varchar(32),
PRIMARY KEY (value, threat_type, platform_type, threat_entry_type)
)"""
)
dbc.execute(
"""CREATE TABLE hash_prefix (
value BLOB NOT NULL,
cue BLOB NOT NULL,
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
timestamp timestamp without time zone DEFAULT current_timestamp,
negative_expires_at timestamp without time zone NOT NULL DEFAULT current_timestamp,
PRIMARY KEY (value, threat_type, platform_type, threat_entry_type),
FOREIGN KEY(threat_type, platform_type, threat_entry_type)
REFERENCES threat_list(threat_type, platform_type, threat_entry_type)
ON DELETE CASCADE
)
"""
)
dbc.execute(
"""CREATE INDEX idx_hash_prefix_cue ON hash_prefix (cue)"""
)
dbc.execute(
"""CREATE INDEX idx_hash_prefix_list ON hash_prefix (threat_type, platform_type, threat_entry_type)"""
)
dbc.execute(
"""CREATE INDEX idx_full_hash_expires_at ON full_hash (expires_at)"""
)
dbc.execute(
"""CREATE INDEX idx_full_hash_value ON full_hash (value)"""
)
self.db.commit()
def lookup_full_hashes(self, hash_values):
"""Query DB to see if hash is blacklisted"""
q = '''SELECT threat_type,platform_type,threat_entry_type, expires_at < current_timestamp AS has_expired
FROM full_hash WHERE value IN ({})
'''
output = []
with self.get_cursor() as dbc:
placeholders = ','.join(['?'] * len(hash_values))
dbc.execute(q.format(placeholders), [sqlite3.Binary(hv) for hv in hash_values])
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type, has_expired = h
threat_list = ThreatList(threat_type, platform_type, threat_entry_type)
output.append((threat_list, has_expired))
return output
def store_full_hash(self, threat_list, hash_value, cache_duration, malware_threat_type):
"""Store full hash found for the given hash prefix"""
log.info('Storing full hash %s to list %s with cache duration %s',
to_hex(hash_value), str(threat_list), cache_duration)
qi = '''INSERT OR IGNORE INTO full_hash
(value, threat_type, platform_type, threat_entry_type, malware_threat_type, downloaded_at)
VALUES
(?, ?, ?, ?, ?, current_timestamp)
'''
qu = "UPDATE full_hash SET expires_at=datetime(current_timestamp, '+{} SECONDS') \
WHERE value=? AND threat_type=? AND platform_type=? AND threat_entry_type=?"
i_parameters = [sqlite3.Binary(hash_value), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type, malware_threat_type]
u_parameters = [sqlite3.Binary(hash_value), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(qi, i_parameters)
dbc.execute(qu.format(int(cache_duration)), u_parameters)
def delete_hash_prefix_list(self, threat_list):
q = '''DELETE FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
'''
parameters = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, parameters)
def cleanup_full_hashes(self, keep_expired_for=(60 * 60 * 12)):
"""Remove long expired full_hash entries."""
q = '''DELETE FROM full_hash WHERE expires_at < datetime(current_timestamp, '-{} SECONDS')
'''
log.info('Cleaning up full_hash entries expired more than {} seconds ago.'.format(keep_expired_for))
with self.get_cursor() as dbc:
dbc.execute(q.format(int(keep_expired_for)))
def update_hash_prefix_expiration(self, prefix_value, negative_cache_duration):
q = """UPDATE hash_prefix SET negative_expires_at=datetime(current_timestamp, '+{} SECONDS')
WHERE value=?"""
parameters = [sqlite3.Binary(prefix_value)]
with self.get_cursor() as dbc:
dbc.execute(q.format(int(negative_cache_duration)), parameters)
def get_threat_lists(self):
"""Get a list of known threat lists."""
q = '''SELECT threat_type,platform_type,threat_entry_type FROM threat_list'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type = h
threat_list = ThreatList(threat_type, platform_type, threat_entry_type)
output.append(threat_list)
return output
def get_client_state(self):
"""Get a dict of known threat lists including clientState values."""
q = '''SELECT threat_type,platform_type,threat_entry_type,client_state FROM threat_list'''
output = {}
with self.get_cursor() as dbc:
dbc.execute(q)
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type, client_state = h
threat_list_tuple = (threat_type, platform_type, threat_entry_type)
output[threat_list_tuple] = client_state
return output
def add_threat_list(self, threat_list):
"""Add threat list entry if it does not exist."""
q = '''INSERT OR IGNORE INTO threat_list
(threat_type, platform_type, threat_entry_type, timestamp)
VALUES
(?, ?, ?, current_timestamp)
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
def delete_threat_list(self, threat_list):
"""Delete threat list entry."""
log.info('Deleting cached threat list "{}"'.format(repr(threat_list)))
q = '''DELETE FROM threat_list
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
def update_threat_list_client_state(self, threat_list, client_state):
log.info('Setting client_state in Sqlite')
q = '''UPDATE threat_list SET timestamp=current_timestamp, client_state=?
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?'''
with self.get_cursor() as dbc:
params = [client_state, threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
dbc.execute(q, params)
def hash_prefix_list_checksum(self, threat_list):
"""Returns SHA256 checksum for alphabetically-sorted concatenated list of hash prefixes"""
q = '''SELECT value FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
ORDER BY value
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
all_hashes = b''.join(bytes(h[0]) for h in dbc.fetchall())
checksum = hashlib.sha256(all_hashes).digest()
return checksum
def populate_hash_prefix_list(self, threat_list, hash_prefix_list):
log.info('Storing {} entries of hash prefix list {}'.format(len(hash_prefix_list), str(threat_list)))
q = '''INSERT INTO hash_prefix
(value, cue, threat_type, platform_type, threat_entry_type, timestamp)
VALUES
(?, ?, ?, ?, ?, current_timestamp)
'''
with self.get_cursor() as dbc:
records = [[sqlite3.Binary(prefix_value), sqlite3.Binary(prefix_value[0:4]), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type] for prefix_value in hash_prefix_list]
dbc.executemany(q, records)
def get_hash_prefix_values_to_remove(self, threat_list, indices):
log.info('Removing {} records from threat list "{}"'.format(len(indices), str(threat_list)))
indices = set(indices)
q = '''SELECT value FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
ORDER BY value
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
values_to_remove = []
with self.get_cursor() as dbc:
dbc.execute(q, params)
i = 0
for h in dbc.fetchall():
v = bytes(h[0])
if i in indices:
values_to_remove.append(v)
i += 1
return values_to_remove
def remove_hash_prefix_indices(self, threat_list, indices):
"""Remove records matching idices from a lexicographically-sorted local threat list."""
batch_size = 40
q = '''DELETE FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=? AND value IN ({})
'''
prefixes_to_remove = self.get_hash_prefix_values_to_remove(threat_list, indices)
with self.get_cursor() as dbc:
for i in range(0, len(prefixes_to_remove), batch_size):
remove_batch = prefixes_to_remove[i:(i + batch_size)]
params = [
threat_list.threat_type,
threat_list.platform_type,
threat_list.threat_entry_type
] + [sqlite3.Binary(b) for b in remove_batch]
dbc.execute(q.format(','.join(['?'] * len(remove_batch))), params)
def dump_hash_prefix_values(self):
"""Export all hash prefix values.
Returns a list of known hash prefix values
"""
q = '''SELECT distinct value from hash_prefix'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
output = [bytes(r[0]) for r in dbc.fetchall()]
return output
def rollback(self):
log.info('Rolling back DB transaction.')
self.db.rollback()
def commit(self):
self.db.commit()
|
afilipovich/gglsbl | gglsbl/storage.py | SqliteStorage.store_full_hash | python | def store_full_hash(self, threat_list, hash_value, cache_duration, malware_threat_type):
log.info('Storing full hash %s to list %s with cache duration %s',
to_hex(hash_value), str(threat_list), cache_duration)
qi = '''INSERT OR IGNORE INTO full_hash
(value, threat_type, platform_type, threat_entry_type, malware_threat_type, downloaded_at)
VALUES
(?, ?, ?, ?, ?, current_timestamp)
'''
qu = "UPDATE full_hash SET expires_at=datetime(current_timestamp, '+{} SECONDS') \
WHERE value=? AND threat_type=? AND platform_type=? AND threat_entry_type=?"
i_parameters = [sqlite3.Binary(hash_value), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type, malware_threat_type]
u_parameters = [sqlite3.Binary(hash_value), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(qi, i_parameters)
dbc.execute(qu.format(int(cache_duration)), u_parameters) | Store full hash found for the given hash prefix | train | https://github.com/afilipovich/gglsbl/blob/89c4665bd6487a3689ccb6b1f3e53ff85e056103/gglsbl/storage.py#L203-L222 | [
"def to_hex_3(v):\n return binascii.hexlify(v)\n"
] | class SqliteStorage(object):
"""Storage abstraction for local GSB cache."""
schema_version = '1.1'
def __init__(self, db_path, timeout=10):
"""Constructor.
:param db_path: path to Sqlite DB file
:timeout: Sqlite lock wait timeout in seconds
"""
self.db_path = db_path
do_init_db = not os.path.isfile(db_path)
log.info('Opening SQLite DB {}'.format(db_path))
self.db = sqlite3.connect(db_path, timeout)
if do_init_db:
log.info('SQLite DB does not exist, initializing')
self.init_db()
if not self.check_schema_version():
log.warning("Cache schema is not compatible with this library version. Re-creating sqlite DB %s", db_path)
self.db.close()
os.unlink(db_path)
self.db = sqlite3.connect(db_path, timeout)
self.init_db()
self.db.cursor().execute('PRAGMA synchronous = 0')
self.db.cursor().execute('PRAGMA journal_mode = WAL')
def check_schema_version(self):
q = "SELECT value FROM metadata WHERE name='schema_version'"
v = None
with self.get_cursor() as dbc:
try:
dbc.execute(q)
v = dbc.fetchall()[0][0]
except sqlite3.OperationalError:
log.error('Can not get schema version, it is probably outdated.')
return False
self.db.rollback() # prevent dangling transaction while instance is idle after init
return v == self.schema_version
@contextlib.contextmanager
def get_cursor(self):
dbc = self.db.cursor()
try:
yield dbc
finally:
dbc.close()
def init_db(self):
self.db.cursor().execute('PRAGMA synchronous = 0')
self.db.cursor().execute('PRAGMA journal_mode = WAL')
with self.get_cursor() as dbc:
dbc.execute(
"""CREATE TABLE metadata (
name character varying(128) NOT NULL PRIMARY KEY,
value character varying(128) NOT NULL
)"""
)
dbc.execute(
"""INSERT INTO metadata (name, value) VALUES ('schema_version', '{}')""".format(self.schema_version)
)
dbc.execute(
"""CREATE TABLE threat_list (
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
client_state character varying(42),
timestamp timestamp without time zone DEFAULT current_timestamp,
PRIMARY KEY (threat_type, platform_type, threat_entry_type)
)"""
)
dbc.execute(
"""CREATE TABLE full_hash (
value BLOB NOT NULL,
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
downloaded_at timestamp without time zone DEFAULT current_timestamp,
expires_at timestamp without time zone NOT NULL DEFAULT current_timestamp,
malware_threat_type varchar(32),
PRIMARY KEY (value, threat_type, platform_type, threat_entry_type)
)"""
)
dbc.execute(
"""CREATE TABLE hash_prefix (
value BLOB NOT NULL,
cue BLOB NOT NULL,
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
timestamp timestamp without time zone DEFAULT current_timestamp,
negative_expires_at timestamp without time zone NOT NULL DEFAULT current_timestamp,
PRIMARY KEY (value, threat_type, platform_type, threat_entry_type),
FOREIGN KEY(threat_type, platform_type, threat_entry_type)
REFERENCES threat_list(threat_type, platform_type, threat_entry_type)
ON DELETE CASCADE
)
"""
)
dbc.execute(
"""CREATE INDEX idx_hash_prefix_cue ON hash_prefix (cue)"""
)
dbc.execute(
"""CREATE INDEX idx_hash_prefix_list ON hash_prefix (threat_type, platform_type, threat_entry_type)"""
)
dbc.execute(
"""CREATE INDEX idx_full_hash_expires_at ON full_hash (expires_at)"""
)
dbc.execute(
"""CREATE INDEX idx_full_hash_value ON full_hash (value)"""
)
self.db.commit()
def lookup_full_hashes(self, hash_values):
"""Query DB to see if hash is blacklisted"""
q = '''SELECT threat_type,platform_type,threat_entry_type, expires_at < current_timestamp AS has_expired
FROM full_hash WHERE value IN ({})
'''
output = []
with self.get_cursor() as dbc:
placeholders = ','.join(['?'] * len(hash_values))
dbc.execute(q.format(placeholders), [sqlite3.Binary(hv) for hv in hash_values])
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type, has_expired = h
threat_list = ThreatList(threat_type, platform_type, threat_entry_type)
output.append((threat_list, has_expired))
return output
def lookup_hash_prefix(self, cues):
"""Lookup hash prefixes by cue (first 4 bytes of hash)
Returns a tuple of (value, negative_cache_expired).
"""
q = '''SELECT value, MAX(negative_expires_at < current_timestamp) AS negative_cache_expired
FROM hash_prefix WHERE cue IN ({}) GROUP BY 1
'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q.format(','.join(['?'] * len(cues))), [sqlite3.Binary(cue) for cue in cues])
for h in dbc.fetchall():
value, negative_cache_expired = h
output.append((bytes(value), negative_cache_expired))
return output
def delete_hash_prefix_list(self, threat_list):
q = '''DELETE FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
'''
parameters = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, parameters)
def cleanup_full_hashes(self, keep_expired_for=(60 * 60 * 12)):
"""Remove long expired full_hash entries."""
q = '''DELETE FROM full_hash WHERE expires_at < datetime(current_timestamp, '-{} SECONDS')
'''
log.info('Cleaning up full_hash entries expired more than {} seconds ago.'.format(keep_expired_for))
with self.get_cursor() as dbc:
dbc.execute(q.format(int(keep_expired_for)))
def update_hash_prefix_expiration(self, prefix_value, negative_cache_duration):
q = """UPDATE hash_prefix SET negative_expires_at=datetime(current_timestamp, '+{} SECONDS')
WHERE value=?"""
parameters = [sqlite3.Binary(prefix_value)]
with self.get_cursor() as dbc:
dbc.execute(q.format(int(negative_cache_duration)), parameters)
def get_threat_lists(self):
"""Get a list of known threat lists."""
q = '''SELECT threat_type,platform_type,threat_entry_type FROM threat_list'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type = h
threat_list = ThreatList(threat_type, platform_type, threat_entry_type)
output.append(threat_list)
return output
def get_client_state(self):
"""Get a dict of known threat lists including clientState values."""
q = '''SELECT threat_type,platform_type,threat_entry_type,client_state FROM threat_list'''
output = {}
with self.get_cursor() as dbc:
dbc.execute(q)
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type, client_state = h
threat_list_tuple = (threat_type, platform_type, threat_entry_type)
output[threat_list_tuple] = client_state
return output
def add_threat_list(self, threat_list):
"""Add threat list entry if it does not exist."""
q = '''INSERT OR IGNORE INTO threat_list
(threat_type, platform_type, threat_entry_type, timestamp)
VALUES
(?, ?, ?, current_timestamp)
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
def delete_threat_list(self, threat_list):
"""Delete threat list entry."""
log.info('Deleting cached threat list "{}"'.format(repr(threat_list)))
q = '''DELETE FROM threat_list
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
def update_threat_list_client_state(self, threat_list, client_state):
log.info('Setting client_state in Sqlite')
q = '''UPDATE threat_list SET timestamp=current_timestamp, client_state=?
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?'''
with self.get_cursor() as dbc:
params = [client_state, threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
dbc.execute(q, params)
def hash_prefix_list_checksum(self, threat_list):
"""Returns SHA256 checksum for alphabetically-sorted concatenated list of hash prefixes"""
q = '''SELECT value FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
ORDER BY value
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
all_hashes = b''.join(bytes(h[0]) for h in dbc.fetchall())
checksum = hashlib.sha256(all_hashes).digest()
return checksum
def populate_hash_prefix_list(self, threat_list, hash_prefix_list):
log.info('Storing {} entries of hash prefix list {}'.format(len(hash_prefix_list), str(threat_list)))
q = '''INSERT INTO hash_prefix
(value, cue, threat_type, platform_type, threat_entry_type, timestamp)
VALUES
(?, ?, ?, ?, ?, current_timestamp)
'''
with self.get_cursor() as dbc:
records = [[sqlite3.Binary(prefix_value), sqlite3.Binary(prefix_value[0:4]), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type] for prefix_value in hash_prefix_list]
dbc.executemany(q, records)
def get_hash_prefix_values_to_remove(self, threat_list, indices):
log.info('Removing {} records from threat list "{}"'.format(len(indices), str(threat_list)))
indices = set(indices)
q = '''SELECT value FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
ORDER BY value
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
values_to_remove = []
with self.get_cursor() as dbc:
dbc.execute(q, params)
i = 0
for h in dbc.fetchall():
v = bytes(h[0])
if i in indices:
values_to_remove.append(v)
i += 1
return values_to_remove
def remove_hash_prefix_indices(self, threat_list, indices):
"""Remove records matching idices from a lexicographically-sorted local threat list."""
batch_size = 40
q = '''DELETE FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=? AND value IN ({})
'''
prefixes_to_remove = self.get_hash_prefix_values_to_remove(threat_list, indices)
with self.get_cursor() as dbc:
for i in range(0, len(prefixes_to_remove), batch_size):
remove_batch = prefixes_to_remove[i:(i + batch_size)]
params = [
threat_list.threat_type,
threat_list.platform_type,
threat_list.threat_entry_type
] + [sqlite3.Binary(b) for b in remove_batch]
dbc.execute(q.format(','.join(['?'] * len(remove_batch))), params)
def dump_hash_prefix_values(self):
"""Export all hash prefix values.
Returns a list of known hash prefix values
"""
q = '''SELECT distinct value from hash_prefix'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
output = [bytes(r[0]) for r in dbc.fetchall()]
return output
def rollback(self):
log.info('Rolling back DB transaction.')
self.db.rollback()
def commit(self):
self.db.commit()
|
afilipovich/gglsbl | gglsbl/storage.py | SqliteStorage.cleanup_full_hashes | python | def cleanup_full_hashes(self, keep_expired_for=(60 * 60 * 12)):
q = '''DELETE FROM full_hash WHERE expires_at < datetime(current_timestamp, '-{} SECONDS')
'''
log.info('Cleaning up full_hash entries expired more than {} seconds ago.'.format(keep_expired_for))
with self.get_cursor() as dbc:
dbc.execute(q.format(int(keep_expired_for))) | Remove long expired full_hash entries. | train | https://github.com/afilipovich/gglsbl/blob/89c4665bd6487a3689ccb6b1f3e53ff85e056103/gglsbl/storage.py#L232-L238 | null | class SqliteStorage(object):
"""Storage abstraction for local GSB cache."""
schema_version = '1.1'
def __init__(self, db_path, timeout=10):
"""Constructor.
:param db_path: path to Sqlite DB file
:timeout: Sqlite lock wait timeout in seconds
"""
self.db_path = db_path
do_init_db = not os.path.isfile(db_path)
log.info('Opening SQLite DB {}'.format(db_path))
self.db = sqlite3.connect(db_path, timeout)
if do_init_db:
log.info('SQLite DB does not exist, initializing')
self.init_db()
if not self.check_schema_version():
log.warning("Cache schema is not compatible with this library version. Re-creating sqlite DB %s", db_path)
self.db.close()
os.unlink(db_path)
self.db = sqlite3.connect(db_path, timeout)
self.init_db()
self.db.cursor().execute('PRAGMA synchronous = 0')
self.db.cursor().execute('PRAGMA journal_mode = WAL')
def check_schema_version(self):
q = "SELECT value FROM metadata WHERE name='schema_version'"
v = None
with self.get_cursor() as dbc:
try:
dbc.execute(q)
v = dbc.fetchall()[0][0]
except sqlite3.OperationalError:
log.error('Can not get schema version, it is probably outdated.')
return False
self.db.rollback() # prevent dangling transaction while instance is idle after init
return v == self.schema_version
@contextlib.contextmanager
def get_cursor(self):
dbc = self.db.cursor()
try:
yield dbc
finally:
dbc.close()
def init_db(self):
self.db.cursor().execute('PRAGMA synchronous = 0')
self.db.cursor().execute('PRAGMA journal_mode = WAL')
with self.get_cursor() as dbc:
dbc.execute(
"""CREATE TABLE metadata (
name character varying(128) NOT NULL PRIMARY KEY,
value character varying(128) NOT NULL
)"""
)
dbc.execute(
"""INSERT INTO metadata (name, value) VALUES ('schema_version', '{}')""".format(self.schema_version)
)
dbc.execute(
"""CREATE TABLE threat_list (
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
client_state character varying(42),
timestamp timestamp without time zone DEFAULT current_timestamp,
PRIMARY KEY (threat_type, platform_type, threat_entry_type)
)"""
)
dbc.execute(
"""CREATE TABLE full_hash (
value BLOB NOT NULL,
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
downloaded_at timestamp without time zone DEFAULT current_timestamp,
expires_at timestamp without time zone NOT NULL DEFAULT current_timestamp,
malware_threat_type varchar(32),
PRIMARY KEY (value, threat_type, platform_type, threat_entry_type)
)"""
)
dbc.execute(
"""CREATE TABLE hash_prefix (
value BLOB NOT NULL,
cue BLOB NOT NULL,
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
timestamp timestamp without time zone DEFAULT current_timestamp,
negative_expires_at timestamp without time zone NOT NULL DEFAULT current_timestamp,
PRIMARY KEY (value, threat_type, platform_type, threat_entry_type),
FOREIGN KEY(threat_type, platform_type, threat_entry_type)
REFERENCES threat_list(threat_type, platform_type, threat_entry_type)
ON DELETE CASCADE
)
"""
)
dbc.execute(
"""CREATE INDEX idx_hash_prefix_cue ON hash_prefix (cue)"""
)
dbc.execute(
"""CREATE INDEX idx_hash_prefix_list ON hash_prefix (threat_type, platform_type, threat_entry_type)"""
)
dbc.execute(
"""CREATE INDEX idx_full_hash_expires_at ON full_hash (expires_at)"""
)
dbc.execute(
"""CREATE INDEX idx_full_hash_value ON full_hash (value)"""
)
self.db.commit()
def lookup_full_hashes(self, hash_values):
"""Query DB to see if hash is blacklisted"""
q = '''SELECT threat_type,platform_type,threat_entry_type, expires_at < current_timestamp AS has_expired
FROM full_hash WHERE value IN ({})
'''
output = []
with self.get_cursor() as dbc:
placeholders = ','.join(['?'] * len(hash_values))
dbc.execute(q.format(placeholders), [sqlite3.Binary(hv) for hv in hash_values])
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type, has_expired = h
threat_list = ThreatList(threat_type, platform_type, threat_entry_type)
output.append((threat_list, has_expired))
return output
def lookup_hash_prefix(self, cues):
"""Lookup hash prefixes by cue (first 4 bytes of hash)
Returns a tuple of (value, negative_cache_expired).
"""
q = '''SELECT value, MAX(negative_expires_at < current_timestamp) AS negative_cache_expired
FROM hash_prefix WHERE cue IN ({}) GROUP BY 1
'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q.format(','.join(['?'] * len(cues))), [sqlite3.Binary(cue) for cue in cues])
for h in dbc.fetchall():
value, negative_cache_expired = h
output.append((bytes(value), negative_cache_expired))
return output
def store_full_hash(self, threat_list, hash_value, cache_duration, malware_threat_type):
"""Store full hash found for the given hash prefix"""
log.info('Storing full hash %s to list %s with cache duration %s',
to_hex(hash_value), str(threat_list), cache_duration)
qi = '''INSERT OR IGNORE INTO full_hash
(value, threat_type, platform_type, threat_entry_type, malware_threat_type, downloaded_at)
VALUES
(?, ?, ?, ?, ?, current_timestamp)
'''
qu = "UPDATE full_hash SET expires_at=datetime(current_timestamp, '+{} SECONDS') \
WHERE value=? AND threat_type=? AND platform_type=? AND threat_entry_type=?"
i_parameters = [sqlite3.Binary(hash_value), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type, malware_threat_type]
u_parameters = [sqlite3.Binary(hash_value), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(qi, i_parameters)
dbc.execute(qu.format(int(cache_duration)), u_parameters)
def delete_hash_prefix_list(self, threat_list):
q = '''DELETE FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
'''
parameters = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, parameters)
def update_hash_prefix_expiration(self, prefix_value, negative_cache_duration):
q = """UPDATE hash_prefix SET negative_expires_at=datetime(current_timestamp, '+{} SECONDS')
WHERE value=?"""
parameters = [sqlite3.Binary(prefix_value)]
with self.get_cursor() as dbc:
dbc.execute(q.format(int(negative_cache_duration)), parameters)
def get_threat_lists(self):
"""Get a list of known threat lists."""
q = '''SELECT threat_type,platform_type,threat_entry_type FROM threat_list'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type = h
threat_list = ThreatList(threat_type, platform_type, threat_entry_type)
output.append(threat_list)
return output
def get_client_state(self):
"""Get a dict of known threat lists including clientState values."""
q = '''SELECT threat_type,platform_type,threat_entry_type,client_state FROM threat_list'''
output = {}
with self.get_cursor() as dbc:
dbc.execute(q)
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type, client_state = h
threat_list_tuple = (threat_type, platform_type, threat_entry_type)
output[threat_list_tuple] = client_state
return output
def add_threat_list(self, threat_list):
"""Add threat list entry if it does not exist."""
q = '''INSERT OR IGNORE INTO threat_list
(threat_type, platform_type, threat_entry_type, timestamp)
VALUES
(?, ?, ?, current_timestamp)
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
def delete_threat_list(self, threat_list):
"""Delete threat list entry."""
log.info('Deleting cached threat list "{}"'.format(repr(threat_list)))
q = '''DELETE FROM threat_list
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
def update_threat_list_client_state(self, threat_list, client_state):
log.info('Setting client_state in Sqlite')
q = '''UPDATE threat_list SET timestamp=current_timestamp, client_state=?
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?'''
with self.get_cursor() as dbc:
params = [client_state, threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
dbc.execute(q, params)
def hash_prefix_list_checksum(self, threat_list):
"""Returns SHA256 checksum for alphabetically-sorted concatenated list of hash prefixes"""
q = '''SELECT value FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
ORDER BY value
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
all_hashes = b''.join(bytes(h[0]) for h in dbc.fetchall())
checksum = hashlib.sha256(all_hashes).digest()
return checksum
def populate_hash_prefix_list(self, threat_list, hash_prefix_list):
log.info('Storing {} entries of hash prefix list {}'.format(len(hash_prefix_list), str(threat_list)))
q = '''INSERT INTO hash_prefix
(value, cue, threat_type, platform_type, threat_entry_type, timestamp)
VALUES
(?, ?, ?, ?, ?, current_timestamp)
'''
with self.get_cursor() as dbc:
records = [[sqlite3.Binary(prefix_value), sqlite3.Binary(prefix_value[0:4]), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type] for prefix_value in hash_prefix_list]
dbc.executemany(q, records)
def get_hash_prefix_values_to_remove(self, threat_list, indices):
log.info('Removing {} records from threat list "{}"'.format(len(indices), str(threat_list)))
indices = set(indices)
q = '''SELECT value FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
ORDER BY value
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
values_to_remove = []
with self.get_cursor() as dbc:
dbc.execute(q, params)
i = 0
for h in dbc.fetchall():
v = bytes(h[0])
if i in indices:
values_to_remove.append(v)
i += 1
return values_to_remove
def remove_hash_prefix_indices(self, threat_list, indices):
"""Remove records matching idices from a lexicographically-sorted local threat list."""
batch_size = 40
q = '''DELETE FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=? AND value IN ({})
'''
prefixes_to_remove = self.get_hash_prefix_values_to_remove(threat_list, indices)
with self.get_cursor() as dbc:
for i in range(0, len(prefixes_to_remove), batch_size):
remove_batch = prefixes_to_remove[i:(i + batch_size)]
params = [
threat_list.threat_type,
threat_list.platform_type,
threat_list.threat_entry_type
] + [sqlite3.Binary(b) for b in remove_batch]
dbc.execute(q.format(','.join(['?'] * len(remove_batch))), params)
def dump_hash_prefix_values(self):
"""Export all hash prefix values.
Returns a list of known hash prefix values
"""
q = '''SELECT distinct value from hash_prefix'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
output = [bytes(r[0]) for r in dbc.fetchall()]
return output
def rollback(self):
log.info('Rolling back DB transaction.')
self.db.rollback()
def commit(self):
self.db.commit()
|
afilipovich/gglsbl | gglsbl/storage.py | SqliteStorage.get_threat_lists | python | def get_threat_lists(self):
q = '''SELECT threat_type,platform_type,threat_entry_type FROM threat_list'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type = h
threat_list = ThreatList(threat_type, platform_type, threat_entry_type)
output.append(threat_list)
return output | Get a list of known threat lists. | train | https://github.com/afilipovich/gglsbl/blob/89c4665bd6487a3689ccb6b1f3e53ff85e056103/gglsbl/storage.py#L247-L257 | null | class SqliteStorage(object):
"""Storage abstraction for local GSB cache."""
schema_version = '1.1'
def __init__(self, db_path, timeout=10):
"""Constructor.
:param db_path: path to Sqlite DB file
:timeout: Sqlite lock wait timeout in seconds
"""
self.db_path = db_path
do_init_db = not os.path.isfile(db_path)
log.info('Opening SQLite DB {}'.format(db_path))
self.db = sqlite3.connect(db_path, timeout)
if do_init_db:
log.info('SQLite DB does not exist, initializing')
self.init_db()
if not self.check_schema_version():
log.warning("Cache schema is not compatible with this library version. Re-creating sqlite DB %s", db_path)
self.db.close()
os.unlink(db_path)
self.db = sqlite3.connect(db_path, timeout)
self.init_db()
self.db.cursor().execute('PRAGMA synchronous = 0')
self.db.cursor().execute('PRAGMA journal_mode = WAL')
def check_schema_version(self):
q = "SELECT value FROM metadata WHERE name='schema_version'"
v = None
with self.get_cursor() as dbc:
try:
dbc.execute(q)
v = dbc.fetchall()[0][0]
except sqlite3.OperationalError:
log.error('Can not get schema version, it is probably outdated.')
return False
self.db.rollback() # prevent dangling transaction while instance is idle after init
return v == self.schema_version
@contextlib.contextmanager
def get_cursor(self):
dbc = self.db.cursor()
try:
yield dbc
finally:
dbc.close()
def init_db(self):
self.db.cursor().execute('PRAGMA synchronous = 0')
self.db.cursor().execute('PRAGMA journal_mode = WAL')
with self.get_cursor() as dbc:
dbc.execute(
"""CREATE TABLE metadata (
name character varying(128) NOT NULL PRIMARY KEY,
value character varying(128) NOT NULL
)"""
)
dbc.execute(
"""INSERT INTO metadata (name, value) VALUES ('schema_version', '{}')""".format(self.schema_version)
)
dbc.execute(
"""CREATE TABLE threat_list (
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
client_state character varying(42),
timestamp timestamp without time zone DEFAULT current_timestamp,
PRIMARY KEY (threat_type, platform_type, threat_entry_type)
)"""
)
dbc.execute(
"""CREATE TABLE full_hash (
value BLOB NOT NULL,
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
downloaded_at timestamp without time zone DEFAULT current_timestamp,
expires_at timestamp without time zone NOT NULL DEFAULT current_timestamp,
malware_threat_type varchar(32),
PRIMARY KEY (value, threat_type, platform_type, threat_entry_type)
)"""
)
dbc.execute(
"""CREATE TABLE hash_prefix (
value BLOB NOT NULL,
cue BLOB NOT NULL,
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
timestamp timestamp without time zone DEFAULT current_timestamp,
negative_expires_at timestamp without time zone NOT NULL DEFAULT current_timestamp,
PRIMARY KEY (value, threat_type, platform_type, threat_entry_type),
FOREIGN KEY(threat_type, platform_type, threat_entry_type)
REFERENCES threat_list(threat_type, platform_type, threat_entry_type)
ON DELETE CASCADE
)
"""
)
dbc.execute(
"""CREATE INDEX idx_hash_prefix_cue ON hash_prefix (cue)"""
)
dbc.execute(
"""CREATE INDEX idx_hash_prefix_list ON hash_prefix (threat_type, platform_type, threat_entry_type)"""
)
dbc.execute(
"""CREATE INDEX idx_full_hash_expires_at ON full_hash (expires_at)"""
)
dbc.execute(
"""CREATE INDEX idx_full_hash_value ON full_hash (value)"""
)
self.db.commit()
def lookup_full_hashes(self, hash_values):
"""Query DB to see if hash is blacklisted"""
q = '''SELECT threat_type,platform_type,threat_entry_type, expires_at < current_timestamp AS has_expired
FROM full_hash WHERE value IN ({})
'''
output = []
with self.get_cursor() as dbc:
placeholders = ','.join(['?'] * len(hash_values))
dbc.execute(q.format(placeholders), [sqlite3.Binary(hv) for hv in hash_values])
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type, has_expired = h
threat_list = ThreatList(threat_type, platform_type, threat_entry_type)
output.append((threat_list, has_expired))
return output
def lookup_hash_prefix(self, cues):
"""Lookup hash prefixes by cue (first 4 bytes of hash)
Returns a tuple of (value, negative_cache_expired).
"""
q = '''SELECT value, MAX(negative_expires_at < current_timestamp) AS negative_cache_expired
FROM hash_prefix WHERE cue IN ({}) GROUP BY 1
'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q.format(','.join(['?'] * len(cues))), [sqlite3.Binary(cue) for cue in cues])
for h in dbc.fetchall():
value, negative_cache_expired = h
output.append((bytes(value), negative_cache_expired))
return output
def store_full_hash(self, threat_list, hash_value, cache_duration, malware_threat_type):
"""Store full hash found for the given hash prefix"""
log.info('Storing full hash %s to list %s with cache duration %s',
to_hex(hash_value), str(threat_list), cache_duration)
qi = '''INSERT OR IGNORE INTO full_hash
(value, threat_type, platform_type, threat_entry_type, malware_threat_type, downloaded_at)
VALUES
(?, ?, ?, ?, ?, current_timestamp)
'''
qu = "UPDATE full_hash SET expires_at=datetime(current_timestamp, '+{} SECONDS') \
WHERE value=? AND threat_type=? AND platform_type=? AND threat_entry_type=?"
i_parameters = [sqlite3.Binary(hash_value), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type, malware_threat_type]
u_parameters = [sqlite3.Binary(hash_value), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(qi, i_parameters)
dbc.execute(qu.format(int(cache_duration)), u_parameters)
def delete_hash_prefix_list(self, threat_list):
q = '''DELETE FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
'''
parameters = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, parameters)
def cleanup_full_hashes(self, keep_expired_for=(60 * 60 * 12)):
"""Remove long expired full_hash entries."""
q = '''DELETE FROM full_hash WHERE expires_at < datetime(current_timestamp, '-{} SECONDS')
'''
log.info('Cleaning up full_hash entries expired more than {} seconds ago.'.format(keep_expired_for))
with self.get_cursor() as dbc:
dbc.execute(q.format(int(keep_expired_for)))
def update_hash_prefix_expiration(self, prefix_value, negative_cache_duration):
q = """UPDATE hash_prefix SET negative_expires_at=datetime(current_timestamp, '+{} SECONDS')
WHERE value=?"""
parameters = [sqlite3.Binary(prefix_value)]
with self.get_cursor() as dbc:
dbc.execute(q.format(int(negative_cache_duration)), parameters)
def get_client_state(self):
"""Get a dict of known threat lists including clientState values."""
q = '''SELECT threat_type,platform_type,threat_entry_type,client_state FROM threat_list'''
output = {}
with self.get_cursor() as dbc:
dbc.execute(q)
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type, client_state = h
threat_list_tuple = (threat_type, platform_type, threat_entry_type)
output[threat_list_tuple] = client_state
return output
def add_threat_list(self, threat_list):
"""Add threat list entry if it does not exist."""
q = '''INSERT OR IGNORE INTO threat_list
(threat_type, platform_type, threat_entry_type, timestamp)
VALUES
(?, ?, ?, current_timestamp)
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
def delete_threat_list(self, threat_list):
"""Delete threat list entry."""
log.info('Deleting cached threat list "{}"'.format(repr(threat_list)))
q = '''DELETE FROM threat_list
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
def update_threat_list_client_state(self, threat_list, client_state):
log.info('Setting client_state in Sqlite')
q = '''UPDATE threat_list SET timestamp=current_timestamp, client_state=?
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?'''
with self.get_cursor() as dbc:
params = [client_state, threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
dbc.execute(q, params)
def hash_prefix_list_checksum(self, threat_list):
"""Returns SHA256 checksum for alphabetically-sorted concatenated list of hash prefixes"""
q = '''SELECT value FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
ORDER BY value
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
all_hashes = b''.join(bytes(h[0]) for h in dbc.fetchall())
checksum = hashlib.sha256(all_hashes).digest()
return checksum
def populate_hash_prefix_list(self, threat_list, hash_prefix_list):
log.info('Storing {} entries of hash prefix list {}'.format(len(hash_prefix_list), str(threat_list)))
q = '''INSERT INTO hash_prefix
(value, cue, threat_type, platform_type, threat_entry_type, timestamp)
VALUES
(?, ?, ?, ?, ?, current_timestamp)
'''
with self.get_cursor() as dbc:
records = [[sqlite3.Binary(prefix_value), sqlite3.Binary(prefix_value[0:4]), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type] for prefix_value in hash_prefix_list]
dbc.executemany(q, records)
def get_hash_prefix_values_to_remove(self, threat_list, indices):
log.info('Removing {} records from threat list "{}"'.format(len(indices), str(threat_list)))
indices = set(indices)
q = '''SELECT value FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
ORDER BY value
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
values_to_remove = []
with self.get_cursor() as dbc:
dbc.execute(q, params)
i = 0
for h in dbc.fetchall():
v = bytes(h[0])
if i in indices:
values_to_remove.append(v)
i += 1
return values_to_remove
def remove_hash_prefix_indices(self, threat_list, indices):
"""Remove records matching idices from a lexicographically-sorted local threat list."""
batch_size = 40
q = '''DELETE FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=? AND value IN ({})
'''
prefixes_to_remove = self.get_hash_prefix_values_to_remove(threat_list, indices)
with self.get_cursor() as dbc:
for i in range(0, len(prefixes_to_remove), batch_size):
remove_batch = prefixes_to_remove[i:(i + batch_size)]
params = [
threat_list.threat_type,
threat_list.platform_type,
threat_list.threat_entry_type
] + [sqlite3.Binary(b) for b in remove_batch]
dbc.execute(q.format(','.join(['?'] * len(remove_batch))), params)
def dump_hash_prefix_values(self):
"""Export all hash prefix values.
Returns a list of known hash prefix values
"""
q = '''SELECT distinct value from hash_prefix'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
output = [bytes(r[0]) for r in dbc.fetchall()]
return output
def rollback(self):
log.info('Rolling back DB transaction.')
self.db.rollback()
def commit(self):
self.db.commit()
|
afilipovich/gglsbl | gglsbl/storage.py | SqliteStorage.get_client_state | python | def get_client_state(self):
q = '''SELECT threat_type,platform_type,threat_entry_type,client_state FROM threat_list'''
output = {}
with self.get_cursor() as dbc:
dbc.execute(q)
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type, client_state = h
threat_list_tuple = (threat_type, platform_type, threat_entry_type)
output[threat_list_tuple] = client_state
return output | Get a dict of known threat lists including clientState values. | train | https://github.com/afilipovich/gglsbl/blob/89c4665bd6487a3689ccb6b1f3e53ff85e056103/gglsbl/storage.py#L259-L269 | null | class SqliteStorage(object):
"""Storage abstraction for local GSB cache."""
schema_version = '1.1'
def __init__(self, db_path, timeout=10):
"""Constructor.
:param db_path: path to Sqlite DB file
:timeout: Sqlite lock wait timeout in seconds
"""
self.db_path = db_path
do_init_db = not os.path.isfile(db_path)
log.info('Opening SQLite DB {}'.format(db_path))
self.db = sqlite3.connect(db_path, timeout)
if do_init_db:
log.info('SQLite DB does not exist, initializing')
self.init_db()
if not self.check_schema_version():
log.warning("Cache schema is not compatible with this library version. Re-creating sqlite DB %s", db_path)
self.db.close()
os.unlink(db_path)
self.db = sqlite3.connect(db_path, timeout)
self.init_db()
self.db.cursor().execute('PRAGMA synchronous = 0')
self.db.cursor().execute('PRAGMA journal_mode = WAL')
def check_schema_version(self):
q = "SELECT value FROM metadata WHERE name='schema_version'"
v = None
with self.get_cursor() as dbc:
try:
dbc.execute(q)
v = dbc.fetchall()[0][0]
except sqlite3.OperationalError:
log.error('Can not get schema version, it is probably outdated.')
return False
self.db.rollback() # prevent dangling transaction while instance is idle after init
return v == self.schema_version
@contextlib.contextmanager
def get_cursor(self):
dbc = self.db.cursor()
try:
yield dbc
finally:
dbc.close()
def init_db(self):
self.db.cursor().execute('PRAGMA synchronous = 0')
self.db.cursor().execute('PRAGMA journal_mode = WAL')
with self.get_cursor() as dbc:
dbc.execute(
"""CREATE TABLE metadata (
name character varying(128) NOT NULL PRIMARY KEY,
value character varying(128) NOT NULL
)"""
)
dbc.execute(
"""INSERT INTO metadata (name, value) VALUES ('schema_version', '{}')""".format(self.schema_version)
)
dbc.execute(
"""CREATE TABLE threat_list (
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
client_state character varying(42),
timestamp timestamp without time zone DEFAULT current_timestamp,
PRIMARY KEY (threat_type, platform_type, threat_entry_type)
)"""
)
dbc.execute(
"""CREATE TABLE full_hash (
value BLOB NOT NULL,
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
downloaded_at timestamp without time zone DEFAULT current_timestamp,
expires_at timestamp without time zone NOT NULL DEFAULT current_timestamp,
malware_threat_type varchar(32),
PRIMARY KEY (value, threat_type, platform_type, threat_entry_type)
)"""
)
dbc.execute(
"""CREATE TABLE hash_prefix (
value BLOB NOT NULL,
cue BLOB NOT NULL,
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
timestamp timestamp without time zone DEFAULT current_timestamp,
negative_expires_at timestamp without time zone NOT NULL DEFAULT current_timestamp,
PRIMARY KEY (value, threat_type, platform_type, threat_entry_type),
FOREIGN KEY(threat_type, platform_type, threat_entry_type)
REFERENCES threat_list(threat_type, platform_type, threat_entry_type)
ON DELETE CASCADE
)
"""
)
dbc.execute(
"""CREATE INDEX idx_hash_prefix_cue ON hash_prefix (cue)"""
)
dbc.execute(
"""CREATE INDEX idx_hash_prefix_list ON hash_prefix (threat_type, platform_type, threat_entry_type)"""
)
dbc.execute(
"""CREATE INDEX idx_full_hash_expires_at ON full_hash (expires_at)"""
)
dbc.execute(
"""CREATE INDEX idx_full_hash_value ON full_hash (value)"""
)
self.db.commit()
def lookup_full_hashes(self, hash_values):
"""Query DB to see if hash is blacklisted"""
q = '''SELECT threat_type,platform_type,threat_entry_type, expires_at < current_timestamp AS has_expired
FROM full_hash WHERE value IN ({})
'''
output = []
with self.get_cursor() as dbc:
placeholders = ','.join(['?'] * len(hash_values))
dbc.execute(q.format(placeholders), [sqlite3.Binary(hv) for hv in hash_values])
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type, has_expired = h
threat_list = ThreatList(threat_type, platform_type, threat_entry_type)
output.append((threat_list, has_expired))
return output
def lookup_hash_prefix(self, cues):
"""Lookup hash prefixes by cue (first 4 bytes of hash)
Returns a tuple of (value, negative_cache_expired).
"""
q = '''SELECT value, MAX(negative_expires_at < current_timestamp) AS negative_cache_expired
FROM hash_prefix WHERE cue IN ({}) GROUP BY 1
'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q.format(','.join(['?'] * len(cues))), [sqlite3.Binary(cue) for cue in cues])
for h in dbc.fetchall():
value, negative_cache_expired = h
output.append((bytes(value), negative_cache_expired))
return output
def store_full_hash(self, threat_list, hash_value, cache_duration, malware_threat_type):
"""Store full hash found for the given hash prefix"""
log.info('Storing full hash %s to list %s with cache duration %s',
to_hex(hash_value), str(threat_list), cache_duration)
qi = '''INSERT OR IGNORE INTO full_hash
(value, threat_type, platform_type, threat_entry_type, malware_threat_type, downloaded_at)
VALUES
(?, ?, ?, ?, ?, current_timestamp)
'''
qu = "UPDATE full_hash SET expires_at=datetime(current_timestamp, '+{} SECONDS') \
WHERE value=? AND threat_type=? AND platform_type=? AND threat_entry_type=?"
i_parameters = [sqlite3.Binary(hash_value), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type, malware_threat_type]
u_parameters = [sqlite3.Binary(hash_value), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(qi, i_parameters)
dbc.execute(qu.format(int(cache_duration)), u_parameters)
def delete_hash_prefix_list(self, threat_list):
q = '''DELETE FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
'''
parameters = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, parameters)
def cleanup_full_hashes(self, keep_expired_for=(60 * 60 * 12)):
"""Remove long expired full_hash entries."""
q = '''DELETE FROM full_hash WHERE expires_at < datetime(current_timestamp, '-{} SECONDS')
'''
log.info('Cleaning up full_hash entries expired more than {} seconds ago.'.format(keep_expired_for))
with self.get_cursor() as dbc:
dbc.execute(q.format(int(keep_expired_for)))
def update_hash_prefix_expiration(self, prefix_value, negative_cache_duration):
q = """UPDATE hash_prefix SET negative_expires_at=datetime(current_timestamp, '+{} SECONDS')
WHERE value=?"""
parameters = [sqlite3.Binary(prefix_value)]
with self.get_cursor() as dbc:
dbc.execute(q.format(int(negative_cache_duration)), parameters)
def get_threat_lists(self):
"""Get a list of known threat lists."""
q = '''SELECT threat_type,platform_type,threat_entry_type FROM threat_list'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type = h
threat_list = ThreatList(threat_type, platform_type, threat_entry_type)
output.append(threat_list)
return output
def add_threat_list(self, threat_list):
"""Add threat list entry if it does not exist."""
q = '''INSERT OR IGNORE INTO threat_list
(threat_type, platform_type, threat_entry_type, timestamp)
VALUES
(?, ?, ?, current_timestamp)
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
def delete_threat_list(self, threat_list):
"""Delete threat list entry."""
log.info('Deleting cached threat list "{}"'.format(repr(threat_list)))
q = '''DELETE FROM threat_list
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
def update_threat_list_client_state(self, threat_list, client_state):
log.info('Setting client_state in Sqlite')
q = '''UPDATE threat_list SET timestamp=current_timestamp, client_state=?
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?'''
with self.get_cursor() as dbc:
params = [client_state, threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
dbc.execute(q, params)
def hash_prefix_list_checksum(self, threat_list):
"""Returns SHA256 checksum for alphabetically-sorted concatenated list of hash prefixes"""
q = '''SELECT value FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
ORDER BY value
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
all_hashes = b''.join(bytes(h[0]) for h in dbc.fetchall())
checksum = hashlib.sha256(all_hashes).digest()
return checksum
def populate_hash_prefix_list(self, threat_list, hash_prefix_list):
log.info('Storing {} entries of hash prefix list {}'.format(len(hash_prefix_list), str(threat_list)))
q = '''INSERT INTO hash_prefix
(value, cue, threat_type, platform_type, threat_entry_type, timestamp)
VALUES
(?, ?, ?, ?, ?, current_timestamp)
'''
with self.get_cursor() as dbc:
records = [[sqlite3.Binary(prefix_value), sqlite3.Binary(prefix_value[0:4]), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type] for prefix_value in hash_prefix_list]
dbc.executemany(q, records)
def get_hash_prefix_values_to_remove(self, threat_list, indices):
log.info('Removing {} records from threat list "{}"'.format(len(indices), str(threat_list)))
indices = set(indices)
q = '''SELECT value FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
ORDER BY value
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
values_to_remove = []
with self.get_cursor() as dbc:
dbc.execute(q, params)
i = 0
for h in dbc.fetchall():
v = bytes(h[0])
if i in indices:
values_to_remove.append(v)
i += 1
return values_to_remove
def remove_hash_prefix_indices(self, threat_list, indices):
"""Remove records matching idices from a lexicographically-sorted local threat list."""
batch_size = 40
q = '''DELETE FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=? AND value IN ({})
'''
prefixes_to_remove = self.get_hash_prefix_values_to_remove(threat_list, indices)
with self.get_cursor() as dbc:
for i in range(0, len(prefixes_to_remove), batch_size):
remove_batch = prefixes_to_remove[i:(i + batch_size)]
params = [
threat_list.threat_type,
threat_list.platform_type,
threat_list.threat_entry_type
] + [sqlite3.Binary(b) for b in remove_batch]
dbc.execute(q.format(','.join(['?'] * len(remove_batch))), params)
def dump_hash_prefix_values(self):
"""Export all hash prefix values.
Returns a list of known hash prefix values
"""
q = '''SELECT distinct value from hash_prefix'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
output = [bytes(r[0]) for r in dbc.fetchall()]
return output
def rollback(self):
log.info('Rolling back DB transaction.')
self.db.rollback()
def commit(self):
self.db.commit()
|
afilipovich/gglsbl | gglsbl/storage.py | SqliteStorage.add_threat_list | python | def add_threat_list(self, threat_list):
q = '''INSERT OR IGNORE INTO threat_list
(threat_type, platform_type, threat_entry_type, timestamp)
VALUES
(?, ?, ?, current_timestamp)
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params) | Add threat list entry if it does not exist. | train | https://github.com/afilipovich/gglsbl/blob/89c4665bd6487a3689ccb6b1f3e53ff85e056103/gglsbl/storage.py#L271-L280 | null | class SqliteStorage(object):
"""Storage abstraction for local GSB cache."""
schema_version = '1.1'
def __init__(self, db_path, timeout=10):
"""Constructor.
:param db_path: path to Sqlite DB file
:timeout: Sqlite lock wait timeout in seconds
"""
self.db_path = db_path
do_init_db = not os.path.isfile(db_path)
log.info('Opening SQLite DB {}'.format(db_path))
self.db = sqlite3.connect(db_path, timeout)
if do_init_db:
log.info('SQLite DB does not exist, initializing')
self.init_db()
if not self.check_schema_version():
log.warning("Cache schema is not compatible with this library version. Re-creating sqlite DB %s", db_path)
self.db.close()
os.unlink(db_path)
self.db = sqlite3.connect(db_path, timeout)
self.init_db()
self.db.cursor().execute('PRAGMA synchronous = 0')
self.db.cursor().execute('PRAGMA journal_mode = WAL')
def check_schema_version(self):
q = "SELECT value FROM metadata WHERE name='schema_version'"
v = None
with self.get_cursor() as dbc:
try:
dbc.execute(q)
v = dbc.fetchall()[0][0]
except sqlite3.OperationalError:
log.error('Can not get schema version, it is probably outdated.')
return False
self.db.rollback() # prevent dangling transaction while instance is idle after init
return v == self.schema_version
@contextlib.contextmanager
def get_cursor(self):
dbc = self.db.cursor()
try:
yield dbc
finally:
dbc.close()
def init_db(self):
self.db.cursor().execute('PRAGMA synchronous = 0')
self.db.cursor().execute('PRAGMA journal_mode = WAL')
with self.get_cursor() as dbc:
dbc.execute(
"""CREATE TABLE metadata (
name character varying(128) NOT NULL PRIMARY KEY,
value character varying(128) NOT NULL
)"""
)
dbc.execute(
"""INSERT INTO metadata (name, value) VALUES ('schema_version', '{}')""".format(self.schema_version)
)
dbc.execute(
"""CREATE TABLE threat_list (
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
client_state character varying(42),
timestamp timestamp without time zone DEFAULT current_timestamp,
PRIMARY KEY (threat_type, platform_type, threat_entry_type)
)"""
)
dbc.execute(
"""CREATE TABLE full_hash (
value BLOB NOT NULL,
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
downloaded_at timestamp without time zone DEFAULT current_timestamp,
expires_at timestamp without time zone NOT NULL DEFAULT current_timestamp,
malware_threat_type varchar(32),
PRIMARY KEY (value, threat_type, platform_type, threat_entry_type)
)"""
)
dbc.execute(
"""CREATE TABLE hash_prefix (
value BLOB NOT NULL,
cue BLOB NOT NULL,
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
timestamp timestamp without time zone DEFAULT current_timestamp,
negative_expires_at timestamp without time zone NOT NULL DEFAULT current_timestamp,
PRIMARY KEY (value, threat_type, platform_type, threat_entry_type),
FOREIGN KEY(threat_type, platform_type, threat_entry_type)
REFERENCES threat_list(threat_type, platform_type, threat_entry_type)
ON DELETE CASCADE
)
"""
)
dbc.execute(
"""CREATE INDEX idx_hash_prefix_cue ON hash_prefix (cue)"""
)
dbc.execute(
"""CREATE INDEX idx_hash_prefix_list ON hash_prefix (threat_type, platform_type, threat_entry_type)"""
)
dbc.execute(
"""CREATE INDEX idx_full_hash_expires_at ON full_hash (expires_at)"""
)
dbc.execute(
"""CREATE INDEX idx_full_hash_value ON full_hash (value)"""
)
self.db.commit()
def lookup_full_hashes(self, hash_values):
"""Query DB to see if hash is blacklisted"""
q = '''SELECT threat_type,platform_type,threat_entry_type, expires_at < current_timestamp AS has_expired
FROM full_hash WHERE value IN ({})
'''
output = []
with self.get_cursor() as dbc:
placeholders = ','.join(['?'] * len(hash_values))
dbc.execute(q.format(placeholders), [sqlite3.Binary(hv) for hv in hash_values])
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type, has_expired = h
threat_list = ThreatList(threat_type, platform_type, threat_entry_type)
output.append((threat_list, has_expired))
return output
def lookup_hash_prefix(self, cues):
"""Lookup hash prefixes by cue (first 4 bytes of hash)
Returns a tuple of (value, negative_cache_expired).
"""
q = '''SELECT value, MAX(negative_expires_at < current_timestamp) AS negative_cache_expired
FROM hash_prefix WHERE cue IN ({}) GROUP BY 1
'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q.format(','.join(['?'] * len(cues))), [sqlite3.Binary(cue) for cue in cues])
for h in dbc.fetchall():
value, negative_cache_expired = h
output.append((bytes(value), negative_cache_expired))
return output
def store_full_hash(self, threat_list, hash_value, cache_duration, malware_threat_type):
"""Store full hash found for the given hash prefix"""
log.info('Storing full hash %s to list %s with cache duration %s',
to_hex(hash_value), str(threat_list), cache_duration)
qi = '''INSERT OR IGNORE INTO full_hash
(value, threat_type, platform_type, threat_entry_type, malware_threat_type, downloaded_at)
VALUES
(?, ?, ?, ?, ?, current_timestamp)
'''
qu = "UPDATE full_hash SET expires_at=datetime(current_timestamp, '+{} SECONDS') \
WHERE value=? AND threat_type=? AND platform_type=? AND threat_entry_type=?"
i_parameters = [sqlite3.Binary(hash_value), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type, malware_threat_type]
u_parameters = [sqlite3.Binary(hash_value), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(qi, i_parameters)
dbc.execute(qu.format(int(cache_duration)), u_parameters)
def delete_hash_prefix_list(self, threat_list):
q = '''DELETE FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
'''
parameters = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, parameters)
def cleanup_full_hashes(self, keep_expired_for=(60 * 60 * 12)):
"""Remove long expired full_hash entries."""
q = '''DELETE FROM full_hash WHERE expires_at < datetime(current_timestamp, '-{} SECONDS')
'''
log.info('Cleaning up full_hash entries expired more than {} seconds ago.'.format(keep_expired_for))
with self.get_cursor() as dbc:
dbc.execute(q.format(int(keep_expired_for)))
def update_hash_prefix_expiration(self, prefix_value, negative_cache_duration):
q = """UPDATE hash_prefix SET negative_expires_at=datetime(current_timestamp, '+{} SECONDS')
WHERE value=?"""
parameters = [sqlite3.Binary(prefix_value)]
with self.get_cursor() as dbc:
dbc.execute(q.format(int(negative_cache_duration)), parameters)
def get_threat_lists(self):
"""Get a list of known threat lists."""
q = '''SELECT threat_type,platform_type,threat_entry_type FROM threat_list'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type = h
threat_list = ThreatList(threat_type, platform_type, threat_entry_type)
output.append(threat_list)
return output
def get_client_state(self):
"""Get a dict of known threat lists including clientState values."""
q = '''SELECT threat_type,platform_type,threat_entry_type,client_state FROM threat_list'''
output = {}
with self.get_cursor() as dbc:
dbc.execute(q)
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type, client_state = h
threat_list_tuple = (threat_type, platform_type, threat_entry_type)
output[threat_list_tuple] = client_state
return output
def delete_threat_list(self, threat_list):
"""Delete threat list entry."""
log.info('Deleting cached threat list "{}"'.format(repr(threat_list)))
q = '''DELETE FROM threat_list
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
def update_threat_list_client_state(self, threat_list, client_state):
log.info('Setting client_state in Sqlite')
q = '''UPDATE threat_list SET timestamp=current_timestamp, client_state=?
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?'''
with self.get_cursor() as dbc:
params = [client_state, threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
dbc.execute(q, params)
def hash_prefix_list_checksum(self, threat_list):
"""Returns SHA256 checksum for alphabetically-sorted concatenated list of hash prefixes"""
q = '''SELECT value FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
ORDER BY value
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
all_hashes = b''.join(bytes(h[0]) for h in dbc.fetchall())
checksum = hashlib.sha256(all_hashes).digest()
return checksum
def populate_hash_prefix_list(self, threat_list, hash_prefix_list):
log.info('Storing {} entries of hash prefix list {}'.format(len(hash_prefix_list), str(threat_list)))
q = '''INSERT INTO hash_prefix
(value, cue, threat_type, platform_type, threat_entry_type, timestamp)
VALUES
(?, ?, ?, ?, ?, current_timestamp)
'''
with self.get_cursor() as dbc:
records = [[sqlite3.Binary(prefix_value), sqlite3.Binary(prefix_value[0:4]), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type] for prefix_value in hash_prefix_list]
dbc.executemany(q, records)
def get_hash_prefix_values_to_remove(self, threat_list, indices):
log.info('Removing {} records from threat list "{}"'.format(len(indices), str(threat_list)))
indices = set(indices)
q = '''SELECT value FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
ORDER BY value
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
values_to_remove = []
with self.get_cursor() as dbc:
dbc.execute(q, params)
i = 0
for h in dbc.fetchall():
v = bytes(h[0])
if i in indices:
values_to_remove.append(v)
i += 1
return values_to_remove
def remove_hash_prefix_indices(self, threat_list, indices):
"""Remove records matching idices from a lexicographically-sorted local threat list."""
batch_size = 40
q = '''DELETE FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=? AND value IN ({})
'''
prefixes_to_remove = self.get_hash_prefix_values_to_remove(threat_list, indices)
with self.get_cursor() as dbc:
for i in range(0, len(prefixes_to_remove), batch_size):
remove_batch = prefixes_to_remove[i:(i + batch_size)]
params = [
threat_list.threat_type,
threat_list.platform_type,
threat_list.threat_entry_type
] + [sqlite3.Binary(b) for b in remove_batch]
dbc.execute(q.format(','.join(['?'] * len(remove_batch))), params)
def dump_hash_prefix_values(self):
"""Export all hash prefix values.
Returns a list of known hash prefix values
"""
q = '''SELECT distinct value from hash_prefix'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
output = [bytes(r[0]) for r in dbc.fetchall()]
return output
def rollback(self):
log.info('Rolling back DB transaction.')
self.db.rollback()
def commit(self):
self.db.commit()
|
afilipovich/gglsbl | gglsbl/storage.py | SqliteStorage.delete_threat_list | python | def delete_threat_list(self, threat_list):
log.info('Deleting cached threat list "{}"'.format(repr(threat_list)))
q = '''DELETE FROM threat_list
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params) | Delete threat list entry. | train | https://github.com/afilipovich/gglsbl/blob/89c4665bd6487a3689ccb6b1f3e53ff85e056103/gglsbl/storage.py#L282-L290 | null | class SqliteStorage(object):
"""Storage abstraction for local GSB cache."""
schema_version = '1.1'
def __init__(self, db_path, timeout=10):
"""Constructor.
:param db_path: path to Sqlite DB file
:timeout: Sqlite lock wait timeout in seconds
"""
self.db_path = db_path
do_init_db = not os.path.isfile(db_path)
log.info('Opening SQLite DB {}'.format(db_path))
self.db = sqlite3.connect(db_path, timeout)
if do_init_db:
log.info('SQLite DB does not exist, initializing')
self.init_db()
if not self.check_schema_version():
log.warning("Cache schema is not compatible with this library version. Re-creating sqlite DB %s", db_path)
self.db.close()
os.unlink(db_path)
self.db = sqlite3.connect(db_path, timeout)
self.init_db()
self.db.cursor().execute('PRAGMA synchronous = 0')
self.db.cursor().execute('PRAGMA journal_mode = WAL')
def check_schema_version(self):
q = "SELECT value FROM metadata WHERE name='schema_version'"
v = None
with self.get_cursor() as dbc:
try:
dbc.execute(q)
v = dbc.fetchall()[0][0]
except sqlite3.OperationalError:
log.error('Can not get schema version, it is probably outdated.')
return False
self.db.rollback() # prevent dangling transaction while instance is idle after init
return v == self.schema_version
@contextlib.contextmanager
def get_cursor(self):
dbc = self.db.cursor()
try:
yield dbc
finally:
dbc.close()
def init_db(self):
self.db.cursor().execute('PRAGMA synchronous = 0')
self.db.cursor().execute('PRAGMA journal_mode = WAL')
with self.get_cursor() as dbc:
dbc.execute(
"""CREATE TABLE metadata (
name character varying(128) NOT NULL PRIMARY KEY,
value character varying(128) NOT NULL
)"""
)
dbc.execute(
"""INSERT INTO metadata (name, value) VALUES ('schema_version', '{}')""".format(self.schema_version)
)
dbc.execute(
"""CREATE TABLE threat_list (
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
client_state character varying(42),
timestamp timestamp without time zone DEFAULT current_timestamp,
PRIMARY KEY (threat_type, platform_type, threat_entry_type)
)"""
)
dbc.execute(
"""CREATE TABLE full_hash (
value BLOB NOT NULL,
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
downloaded_at timestamp without time zone DEFAULT current_timestamp,
expires_at timestamp without time zone NOT NULL DEFAULT current_timestamp,
malware_threat_type varchar(32),
PRIMARY KEY (value, threat_type, platform_type, threat_entry_type)
)"""
)
dbc.execute(
"""CREATE TABLE hash_prefix (
value BLOB NOT NULL,
cue BLOB NOT NULL,
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
timestamp timestamp without time zone DEFAULT current_timestamp,
negative_expires_at timestamp without time zone NOT NULL DEFAULT current_timestamp,
PRIMARY KEY (value, threat_type, platform_type, threat_entry_type),
FOREIGN KEY(threat_type, platform_type, threat_entry_type)
REFERENCES threat_list(threat_type, platform_type, threat_entry_type)
ON DELETE CASCADE
)
"""
)
dbc.execute(
"""CREATE INDEX idx_hash_prefix_cue ON hash_prefix (cue)"""
)
dbc.execute(
"""CREATE INDEX idx_hash_prefix_list ON hash_prefix (threat_type, platform_type, threat_entry_type)"""
)
dbc.execute(
"""CREATE INDEX idx_full_hash_expires_at ON full_hash (expires_at)"""
)
dbc.execute(
"""CREATE INDEX idx_full_hash_value ON full_hash (value)"""
)
self.db.commit()
def lookup_full_hashes(self, hash_values):
"""Query DB to see if hash is blacklisted"""
q = '''SELECT threat_type,platform_type,threat_entry_type, expires_at < current_timestamp AS has_expired
FROM full_hash WHERE value IN ({})
'''
output = []
with self.get_cursor() as dbc:
placeholders = ','.join(['?'] * len(hash_values))
dbc.execute(q.format(placeholders), [sqlite3.Binary(hv) for hv in hash_values])
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type, has_expired = h
threat_list = ThreatList(threat_type, platform_type, threat_entry_type)
output.append((threat_list, has_expired))
return output
def lookup_hash_prefix(self, cues):
"""Lookup hash prefixes by cue (first 4 bytes of hash)
Returns a tuple of (value, negative_cache_expired).
"""
q = '''SELECT value, MAX(negative_expires_at < current_timestamp) AS negative_cache_expired
FROM hash_prefix WHERE cue IN ({}) GROUP BY 1
'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q.format(','.join(['?'] * len(cues))), [sqlite3.Binary(cue) for cue in cues])
for h in dbc.fetchall():
value, negative_cache_expired = h
output.append((bytes(value), negative_cache_expired))
return output
def store_full_hash(self, threat_list, hash_value, cache_duration, malware_threat_type):
"""Store full hash found for the given hash prefix"""
log.info('Storing full hash %s to list %s with cache duration %s',
to_hex(hash_value), str(threat_list), cache_duration)
qi = '''INSERT OR IGNORE INTO full_hash
(value, threat_type, platform_type, threat_entry_type, malware_threat_type, downloaded_at)
VALUES
(?, ?, ?, ?, ?, current_timestamp)
'''
qu = "UPDATE full_hash SET expires_at=datetime(current_timestamp, '+{} SECONDS') \
WHERE value=? AND threat_type=? AND platform_type=? AND threat_entry_type=?"
i_parameters = [sqlite3.Binary(hash_value), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type, malware_threat_type]
u_parameters = [sqlite3.Binary(hash_value), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(qi, i_parameters)
dbc.execute(qu.format(int(cache_duration)), u_parameters)
def delete_hash_prefix_list(self, threat_list):
q = '''DELETE FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
'''
parameters = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, parameters)
def cleanup_full_hashes(self, keep_expired_for=(60 * 60 * 12)):
"""Remove long expired full_hash entries."""
q = '''DELETE FROM full_hash WHERE expires_at < datetime(current_timestamp, '-{} SECONDS')
'''
log.info('Cleaning up full_hash entries expired more than {} seconds ago.'.format(keep_expired_for))
with self.get_cursor() as dbc:
dbc.execute(q.format(int(keep_expired_for)))
def update_hash_prefix_expiration(self, prefix_value, negative_cache_duration):
q = """UPDATE hash_prefix SET negative_expires_at=datetime(current_timestamp, '+{} SECONDS')
WHERE value=?"""
parameters = [sqlite3.Binary(prefix_value)]
with self.get_cursor() as dbc:
dbc.execute(q.format(int(negative_cache_duration)), parameters)
def get_threat_lists(self):
"""Get a list of known threat lists."""
q = '''SELECT threat_type,platform_type,threat_entry_type FROM threat_list'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type = h
threat_list = ThreatList(threat_type, platform_type, threat_entry_type)
output.append(threat_list)
return output
def get_client_state(self):
"""Get a dict of known threat lists including clientState values."""
q = '''SELECT threat_type,platform_type,threat_entry_type,client_state FROM threat_list'''
output = {}
with self.get_cursor() as dbc:
dbc.execute(q)
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type, client_state = h
threat_list_tuple = (threat_type, platform_type, threat_entry_type)
output[threat_list_tuple] = client_state
return output
def add_threat_list(self, threat_list):
"""Add threat list entry if it does not exist."""
q = '''INSERT OR IGNORE INTO threat_list
(threat_type, platform_type, threat_entry_type, timestamp)
VALUES
(?, ?, ?, current_timestamp)
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
def update_threat_list_client_state(self, threat_list, client_state):
log.info('Setting client_state in Sqlite')
q = '''UPDATE threat_list SET timestamp=current_timestamp, client_state=?
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?'''
with self.get_cursor() as dbc:
params = [client_state, threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
dbc.execute(q, params)
def hash_prefix_list_checksum(self, threat_list):
"""Returns SHA256 checksum for alphabetically-sorted concatenated list of hash prefixes"""
q = '''SELECT value FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
ORDER BY value
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
all_hashes = b''.join(bytes(h[0]) for h in dbc.fetchall())
checksum = hashlib.sha256(all_hashes).digest()
return checksum
def populate_hash_prefix_list(self, threat_list, hash_prefix_list):
log.info('Storing {} entries of hash prefix list {}'.format(len(hash_prefix_list), str(threat_list)))
q = '''INSERT INTO hash_prefix
(value, cue, threat_type, platform_type, threat_entry_type, timestamp)
VALUES
(?, ?, ?, ?, ?, current_timestamp)
'''
with self.get_cursor() as dbc:
records = [[sqlite3.Binary(prefix_value), sqlite3.Binary(prefix_value[0:4]), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type] for prefix_value in hash_prefix_list]
dbc.executemany(q, records)
def get_hash_prefix_values_to_remove(self, threat_list, indices):
log.info('Removing {} records from threat list "{}"'.format(len(indices), str(threat_list)))
indices = set(indices)
q = '''SELECT value FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
ORDER BY value
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
values_to_remove = []
with self.get_cursor() as dbc:
dbc.execute(q, params)
i = 0
for h in dbc.fetchall():
v = bytes(h[0])
if i in indices:
values_to_remove.append(v)
i += 1
return values_to_remove
def remove_hash_prefix_indices(self, threat_list, indices):
"""Remove records matching idices from a lexicographically-sorted local threat list."""
batch_size = 40
q = '''DELETE FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=? AND value IN ({})
'''
prefixes_to_remove = self.get_hash_prefix_values_to_remove(threat_list, indices)
with self.get_cursor() as dbc:
for i in range(0, len(prefixes_to_remove), batch_size):
remove_batch = prefixes_to_remove[i:(i + batch_size)]
params = [
threat_list.threat_type,
threat_list.platform_type,
threat_list.threat_entry_type
] + [sqlite3.Binary(b) for b in remove_batch]
dbc.execute(q.format(','.join(['?'] * len(remove_batch))), params)
def dump_hash_prefix_values(self):
"""Export all hash prefix values.
Returns a list of known hash prefix values
"""
q = '''SELECT distinct value from hash_prefix'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
output = [bytes(r[0]) for r in dbc.fetchall()]
return output
def rollback(self):
log.info('Rolling back DB transaction.')
self.db.rollback()
def commit(self):
self.db.commit()
|
afilipovich/gglsbl | gglsbl/storage.py | SqliteStorage.hash_prefix_list_checksum | python | def hash_prefix_list_checksum(self, threat_list):
q = '''SELECT value FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
ORDER BY value
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
all_hashes = b''.join(bytes(h[0]) for h in dbc.fetchall())
checksum = hashlib.sha256(all_hashes).digest()
return checksum | Returns SHA256 checksum for alphabetically-sorted concatenated list of hash prefixes | train | https://github.com/afilipovich/gglsbl/blob/89c4665bd6487a3689ccb6b1f3e53ff85e056103/gglsbl/storage.py#L300-L311 | null | class SqliteStorage(object):
"""Storage abstraction for local GSB cache."""
schema_version = '1.1'
def __init__(self, db_path, timeout=10):
"""Constructor.
:param db_path: path to Sqlite DB file
:timeout: Sqlite lock wait timeout in seconds
"""
self.db_path = db_path
do_init_db = not os.path.isfile(db_path)
log.info('Opening SQLite DB {}'.format(db_path))
self.db = sqlite3.connect(db_path, timeout)
if do_init_db:
log.info('SQLite DB does not exist, initializing')
self.init_db()
if not self.check_schema_version():
log.warning("Cache schema is not compatible with this library version. Re-creating sqlite DB %s", db_path)
self.db.close()
os.unlink(db_path)
self.db = sqlite3.connect(db_path, timeout)
self.init_db()
self.db.cursor().execute('PRAGMA synchronous = 0')
self.db.cursor().execute('PRAGMA journal_mode = WAL')
def check_schema_version(self):
q = "SELECT value FROM metadata WHERE name='schema_version'"
v = None
with self.get_cursor() as dbc:
try:
dbc.execute(q)
v = dbc.fetchall()[0][0]
except sqlite3.OperationalError:
log.error('Can not get schema version, it is probably outdated.')
return False
self.db.rollback() # prevent dangling transaction while instance is idle after init
return v == self.schema_version
@contextlib.contextmanager
def get_cursor(self):
dbc = self.db.cursor()
try:
yield dbc
finally:
dbc.close()
def init_db(self):
self.db.cursor().execute('PRAGMA synchronous = 0')
self.db.cursor().execute('PRAGMA journal_mode = WAL')
with self.get_cursor() as dbc:
dbc.execute(
"""CREATE TABLE metadata (
name character varying(128) NOT NULL PRIMARY KEY,
value character varying(128) NOT NULL
)"""
)
dbc.execute(
"""INSERT INTO metadata (name, value) VALUES ('schema_version', '{}')""".format(self.schema_version)
)
dbc.execute(
"""CREATE TABLE threat_list (
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
client_state character varying(42),
timestamp timestamp without time zone DEFAULT current_timestamp,
PRIMARY KEY (threat_type, platform_type, threat_entry_type)
)"""
)
dbc.execute(
"""CREATE TABLE full_hash (
value BLOB NOT NULL,
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
downloaded_at timestamp without time zone DEFAULT current_timestamp,
expires_at timestamp without time zone NOT NULL DEFAULT current_timestamp,
malware_threat_type varchar(32),
PRIMARY KEY (value, threat_type, platform_type, threat_entry_type)
)"""
)
dbc.execute(
"""CREATE TABLE hash_prefix (
value BLOB NOT NULL,
cue BLOB NOT NULL,
threat_type character varying(128) NOT NULL,
platform_type character varying(128) NOT NULL,
threat_entry_type character varying(128) NOT NULL,
timestamp timestamp without time zone DEFAULT current_timestamp,
negative_expires_at timestamp without time zone NOT NULL DEFAULT current_timestamp,
PRIMARY KEY (value, threat_type, platform_type, threat_entry_type),
FOREIGN KEY(threat_type, platform_type, threat_entry_type)
REFERENCES threat_list(threat_type, platform_type, threat_entry_type)
ON DELETE CASCADE
)
"""
)
dbc.execute(
"""CREATE INDEX idx_hash_prefix_cue ON hash_prefix (cue)"""
)
dbc.execute(
"""CREATE INDEX idx_hash_prefix_list ON hash_prefix (threat_type, platform_type, threat_entry_type)"""
)
dbc.execute(
"""CREATE INDEX idx_full_hash_expires_at ON full_hash (expires_at)"""
)
dbc.execute(
"""CREATE INDEX idx_full_hash_value ON full_hash (value)"""
)
self.db.commit()
def lookup_full_hashes(self, hash_values):
"""Query DB to see if hash is blacklisted"""
q = '''SELECT threat_type,platform_type,threat_entry_type, expires_at < current_timestamp AS has_expired
FROM full_hash WHERE value IN ({})
'''
output = []
with self.get_cursor() as dbc:
placeholders = ','.join(['?'] * len(hash_values))
dbc.execute(q.format(placeholders), [sqlite3.Binary(hv) for hv in hash_values])
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type, has_expired = h
threat_list = ThreatList(threat_type, platform_type, threat_entry_type)
output.append((threat_list, has_expired))
return output
def lookup_hash_prefix(self, cues):
"""Lookup hash prefixes by cue (first 4 bytes of hash)
Returns a tuple of (value, negative_cache_expired).
"""
q = '''SELECT value, MAX(negative_expires_at < current_timestamp) AS negative_cache_expired
FROM hash_prefix WHERE cue IN ({}) GROUP BY 1
'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q.format(','.join(['?'] * len(cues))), [sqlite3.Binary(cue) for cue in cues])
for h in dbc.fetchall():
value, negative_cache_expired = h
output.append((bytes(value), negative_cache_expired))
return output
def store_full_hash(self, threat_list, hash_value, cache_duration, malware_threat_type):
"""Store full hash found for the given hash prefix"""
log.info('Storing full hash %s to list %s with cache duration %s',
to_hex(hash_value), str(threat_list), cache_duration)
qi = '''INSERT OR IGNORE INTO full_hash
(value, threat_type, platform_type, threat_entry_type, malware_threat_type, downloaded_at)
VALUES
(?, ?, ?, ?, ?, current_timestamp)
'''
qu = "UPDATE full_hash SET expires_at=datetime(current_timestamp, '+{} SECONDS') \
WHERE value=? AND threat_type=? AND platform_type=? AND threat_entry_type=?"
i_parameters = [sqlite3.Binary(hash_value), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type, malware_threat_type]
u_parameters = [sqlite3.Binary(hash_value), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(qi, i_parameters)
dbc.execute(qu.format(int(cache_duration)), u_parameters)
def delete_hash_prefix_list(self, threat_list):
q = '''DELETE FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
'''
parameters = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, parameters)
def cleanup_full_hashes(self, keep_expired_for=(60 * 60 * 12)):
"""Remove long expired full_hash entries."""
q = '''DELETE FROM full_hash WHERE expires_at < datetime(current_timestamp, '-{} SECONDS')
'''
log.info('Cleaning up full_hash entries expired more than {} seconds ago.'.format(keep_expired_for))
with self.get_cursor() as dbc:
dbc.execute(q.format(int(keep_expired_for)))
def update_hash_prefix_expiration(self, prefix_value, negative_cache_duration):
q = """UPDATE hash_prefix SET negative_expires_at=datetime(current_timestamp, '+{} SECONDS')
WHERE value=?"""
parameters = [sqlite3.Binary(prefix_value)]
with self.get_cursor() as dbc:
dbc.execute(q.format(int(negative_cache_duration)), parameters)
def get_threat_lists(self):
"""Get a list of known threat lists."""
q = '''SELECT threat_type,platform_type,threat_entry_type FROM threat_list'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type = h
threat_list = ThreatList(threat_type, platform_type, threat_entry_type)
output.append(threat_list)
return output
def get_client_state(self):
"""Get a dict of known threat lists including clientState values."""
q = '''SELECT threat_type,platform_type,threat_entry_type,client_state FROM threat_list'''
output = {}
with self.get_cursor() as dbc:
dbc.execute(q)
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type, client_state = h
threat_list_tuple = (threat_type, platform_type, threat_entry_type)
output[threat_list_tuple] = client_state
return output
def add_threat_list(self, threat_list):
"""Add threat list entry if it does not exist."""
q = '''INSERT OR IGNORE INTO threat_list
(threat_type, platform_type, threat_entry_type, timestamp)
VALUES
(?, ?, ?, current_timestamp)
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
def delete_threat_list(self, threat_list):
"""Delete threat list entry."""
log.info('Deleting cached threat list "{}"'.format(repr(threat_list)))
q = '''DELETE FROM threat_list
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
def update_threat_list_client_state(self, threat_list, client_state):
log.info('Setting client_state in Sqlite')
q = '''UPDATE threat_list SET timestamp=current_timestamp, client_state=?
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?'''
with self.get_cursor() as dbc:
params = [client_state, threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
dbc.execute(q, params)
def populate_hash_prefix_list(self, threat_list, hash_prefix_list):
log.info('Storing {} entries of hash prefix list {}'.format(len(hash_prefix_list), str(threat_list)))
q = '''INSERT INTO hash_prefix
(value, cue, threat_type, platform_type, threat_entry_type, timestamp)
VALUES
(?, ?, ?, ?, ?, current_timestamp)
'''
with self.get_cursor() as dbc:
records = [[sqlite3.Binary(prefix_value), sqlite3.Binary(prefix_value[0:4]), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type] for prefix_value in hash_prefix_list]
dbc.executemany(q, records)
def get_hash_prefix_values_to_remove(self, threat_list, indices):
log.info('Removing {} records from threat list "{}"'.format(len(indices), str(threat_list)))
indices = set(indices)
q = '''SELECT value FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
ORDER BY value
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
values_to_remove = []
with self.get_cursor() as dbc:
dbc.execute(q, params)
i = 0
for h in dbc.fetchall():
v = bytes(h[0])
if i in indices:
values_to_remove.append(v)
i += 1
return values_to_remove
def remove_hash_prefix_indices(self, threat_list, indices):
"""Remove records matching idices from a lexicographically-sorted local threat list."""
batch_size = 40
q = '''DELETE FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=? AND value IN ({})
'''
prefixes_to_remove = self.get_hash_prefix_values_to_remove(threat_list, indices)
with self.get_cursor() as dbc:
for i in range(0, len(prefixes_to_remove), batch_size):
remove_batch = prefixes_to_remove[i:(i + batch_size)]
params = [
threat_list.threat_type,
threat_list.platform_type,
threat_list.threat_entry_type
] + [sqlite3.Binary(b) for b in remove_batch]
dbc.execute(q.format(','.join(['?'] * len(remove_batch))), params)
def dump_hash_prefix_values(self):
"""Export all hash prefix values.
Returns a list of known hash prefix values
"""
q = '''SELECT distinct value from hash_prefix'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
output = [bytes(r[0]) for r in dbc.fetchall()]
return output
def rollback(self):
log.info('Rolling back DB transaction.')
self.db.rollback()
def commit(self):
self.db.commit()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.