text stringlengths 0 1.05M | meta dict |
|---|---|
"""A Python module for interacting and consuming responses from Slack."""
import logging
import slack_sdk.errors as e
from .internal_utils import _next_cursor_is_present
class SlackResponse:
"""An iterable container of response data.
Attributes:
data (dict): The json-encoded content of the response. Along
with the headers and status code information.
Methods:
validate: Check if the response from Slack was successful.
get: Retrieves any key from the response data.
next: Retrieves the next portion of results,
if 'next_cursor' is present.
Example:
```python
import os
import slack
client = slack.WebClient(token=os.environ['SLACK_API_TOKEN'])
response1 = client.auth_revoke(test='true')
assert not response1['revoked']
response2 = client.auth_test()
assert response2.get('ok', False)
users = []
for page in client.users_list(limit=2):
users = users + page['members']
```
Note:
Some responses return collections of information
like channel and user lists. If they do it's likely
that you'll only receive a portion of results. This
object allows you to iterate over the response which
makes subsequent API requests until your code hits
'break' or there are no more results to be found.
Any attributes or methods prefixed with _underscores are
intended to be "private" internal use only. They may be changed or
removed at anytime.
"""
def __init__(
self,
*,
client,
http_verb: str,
api_url: str,
req_args: dict,
data: dict,
headers: dict,
status_code: int,
):
self.http_verb = http_verb
self.api_url = api_url
self.req_args = req_args
self.data = data
self.headers = headers
self.status_code = status_code
self._initial_data = data
self._iteration = None # for __iter__ & __next__
self._client = client
self._logger = logging.getLogger(__name__)
def __str__(self):
"""Return the Response data if object is converted to a string."""
return f"{self.data}"
def __getitem__(self, key):
"""Retrieves any key from the data store.
Note:
This is implemented so users can reference the
SlackResponse object like a dictionary.
e.g. response["ok"]
Returns:
The value from data or None.
"""
return self.data.get(key, None)
def __iter__(self):
"""Enables the ability to iterate over the response.
It's required for the iterator protocol.
Note:
This enables Slack cursor-based pagination.
Returns:
(SlackResponse) self
"""
self._iteration = 0
self.data = self._initial_data
return self
def __next__(self):
"""Retrieves the next portion of results, if 'next_cursor' is present.
Note:
Some responses return collections of information
like channel and user lists. If they do it's likely
that you'll only receive a portion of results. This
method allows you to iterate over the response until
your code hits 'break' or there are no more results
to be found.
Returns:
(SlackResponse) self
With the new response data now attached to this object.
Raises:
SlackApiError: If the request to the Slack API failed.
StopIteration: If 'next_cursor' is not present or empty.
"""
self._iteration += 1
if self._iteration == 1:
return self
if _next_cursor_is_present(self.data): # skipcq: PYL-R1705
params = self.req_args.get("params", {})
if params is None:
params = {}
params.update({"cursor": self.data["response_metadata"]["next_cursor"]})
self.req_args.update({"params": params})
# This method sends a request in a synchronous way
response = self._client._request_for_pagination( # skipcq: PYL-W0212
api_url=self.api_url, req_args=self.req_args
)
self.data = response["data"]
self.headers = response["headers"]
self.status_code = response["status_code"]
return self.validate()
else:
raise StopIteration
def get(self, key, default=None):
"""Retrieves any key from the response data.
Note:
This is implemented so users can reference the
SlackResponse object like a dictionary.
e.g. response.get("ok", False)
Returns:
The value from data or the specified default.
"""
return self.data.get(key, default)
def validate(self):
"""Check if the response from Slack was successful.
Returns:
(SlackResponse)
This method returns it's own object. e.g. 'self'
Raises:
SlackApiError: The request to the Slack API failed.
"""
if self._logger.level <= logging.DEBUG:
self._logger.debug(
"Received the following response - "
f"status: {self.status_code}, "
f"headers: {dict(self.headers)}, "
f"body: {self.data}"
)
if self.status_code == 200 and self.data and self.data.get("ok", False):
return self
msg = "The request to the Slack API failed."
raise e.SlackApiError(message=msg, response=self)
| {
"repo_name": "slackapi/python-slackclient",
"path": "slack_sdk/web/slack_response.py",
"copies": "1",
"size": "5741",
"license": "mit",
"hash": -2014196673231019300,
"line_mean": 31.0726256983,
"line_max": 84,
"alpha_frac": 0.5800383209,
"autogenerated": false,
"ratio": 4.488663017982799,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5568701338882799,
"avg_score": null,
"num_lines": null
} |
"""A Python module for interacting with Slack's RTM API."""
import inspect
import json
import logging
import time
from concurrent.futures.thread import ThreadPoolExecutor
from logging import Logger
from queue import Queue, Empty
from ssl import SSLContext
from threading import Lock, Event
from typing import Optional, Callable, List, Union
from slack_sdk.errors import SlackApiError, SlackClientError
from slack_sdk.proxy_env_variable_loader import load_http_proxy_from_env
from slack_sdk.socket_mode.builtin.connection import Connection, ConnectionState
from slack_sdk.socket_mode.interval_runner import IntervalRunner
from slack_sdk.web import WebClient
class RTMClient:
token: Optional[str]
bot_id: Optional[str]
default_auto_reconnect_enabled: bool
auto_reconnect_enabled: bool
ssl: Optional[SSLContext]
proxy: str
timeout: int
base_url: str
ping_interval: int
logger: Logger
web_client: WebClient
current_session: Optional[Connection]
current_session_state: Optional[ConnectionState]
wss_uri: Optional[str]
message_queue: Queue
message_listeners: List[Callable[["RTMClient", dict], None]]
message_processor: IntervalRunner
message_workers: ThreadPoolExecutor
closed: bool
connect_operation_lock: Lock
on_message_listeners: List[Callable[[str], None]]
on_error_listeners: List[Callable[[Exception], None]]
on_close_listeners: List[Callable[[int, Optional[str]], None]]
def __init__(
self,
*,
token: Optional[str] = None,
web_client: Optional[WebClient] = None,
auto_reconnect_enabled: bool = True,
ssl: Optional[SSLContext] = None,
proxy: Optional[str] = None,
timeout: int = 30,
base_url: str = WebClient.BASE_URL,
headers: Optional[dict] = None,
ping_interval: int = 5,
concurrency: int = 10,
logger: Optional[logging.Logger] = None,
on_message_listeners: Optional[List[Callable[[str], None]]] = None,
on_error_listeners: Optional[List[Callable[[Exception], None]]] = None,
on_close_listeners: Optional[List[Callable[[int, Optional[str]], None]]] = None,
trace_enabled: bool = False,
all_message_trace_enabled: bool = False,
ping_pong_trace_enabled: bool = False,
):
self.token = token.strip() if token is not None else None
self.bot_id = None
self.default_auto_reconnect_enabled = auto_reconnect_enabled
# You may want temporarily turn off the auto_reconnect as necessary
self.auto_reconnect_enabled = self.default_auto_reconnect_enabled
self.ssl = ssl
self.proxy = proxy
self.timeout = timeout
self.base_url = base_url
self.headers = headers
self.ping_interval = ping_interval
self.logger = logger or logging.getLogger(__name__)
if self.proxy is None or len(self.proxy.strip()) == 0:
env_variable = load_http_proxy_from_env(self.logger)
if env_variable is not None:
self.proxy = env_variable
self.web_client = web_client or WebClient(
token=self.token,
base_url=self.base_url,
timeout=self.timeout,
ssl=self.ssl,
proxy=self.proxy,
headers=self.headers,
logger=logger,
)
self.on_message_listeners = on_message_listeners or []
self.on_error_listeners = on_error_listeners or []
self.on_close_listeners = on_close_listeners or []
self.trace_enabled = trace_enabled
self.all_message_trace_enabled = all_message_trace_enabled
self.ping_pong_trace_enabled = ping_pong_trace_enabled
self.message_queue = Queue()
def goodbye_listener(_self, event: dict):
if event.get("type") == "goodbye":
message = "Got a goodbye message. Reconnecting to the server ..."
self.logger.info(message)
self.connect_to_new_endpoint(force=True)
self.message_listeners = [goodbye_listener]
self.socket_mode_request_listeners = []
self.current_session = None
self.current_session_state = ConnectionState()
self.current_session_runner = IntervalRunner(
self._run_current_session, 0.1
).start()
self.wss_uri = None
self.current_app_monitor_started = False
self.current_app_monitor = IntervalRunner(
self._monitor_current_session,
self.ping_interval,
)
self.closed = False
self.connect_operation_lock = Lock()
self.message_processor = IntervalRunner(self.process_messages, 0.001).start()
self.message_workers = ThreadPoolExecutor(max_workers=concurrency)
# --------------------------------------------------------------
# Decorator to register listeners
# --------------------------------------------------------------
def on(self, event_type: str) -> Callable:
"""Registers a new event listener.
Args:
event_type: str representing an event's type (e.g., message, reaction_added)
"""
def __call__(*args, **kwargs):
func = args[0]
if func is not None:
if isinstance(func, Callable):
name = (
func.__name__
if hasattr(func, "__name__")
else f"{func.__class__.__module__}.{func.__class__.__name__}"
)
inspect_result: inspect.FullArgSpec = inspect.getfullargspec(func)
if inspect_result is not None and len(inspect_result.args) != 2:
actual_args = ", ".join(inspect_result.args)
error = f"The listener '{name}' must accept two args: client, event (actual: {actual_args})"
raise SlackClientError(error)
def new_message_listener(_self, event: dict):
actual_event_type = event.get("type")
if event.get("bot_id") == self.bot_id:
# SKip the events generated by this bot user
return
# https://github.com/slackapi/python-slack-sdk/issues/533
if event_type == "*" or (
actual_event_type is not None
and actual_event_type == event_type
):
func(_self, event)
self.message_listeners.append(new_message_listener)
else:
error = f"The listener '{func}' is not a Callable (actual: {type(func).__name__})"
raise SlackClientError(error)
# Not to cause modification to the decorated method
return func
return __call__
# --------------------------------------------------------------
# Connections
# --------------------------------------------------------------
def is_connected(self) -> bool:
"""Returns True if this client is connected."""
return self.current_session is not None and self.current_session.is_active()
def issue_new_wss_url(self) -> str:
"""Acquires a new WSS URL using rtm.connect API method"""
try:
api_response = self.web_client.rtm_connect()
return api_response["url"]
except SlackApiError as e:
if e.response["error"] == "ratelimited":
delay = int(e.response.headers.get("Retry-After", "30")) # Tier1
self.logger.info(f"Rate limited. Retrying in {delay} seconds...")
time.sleep(delay)
# Retry to issue a new WSS URL
return self.issue_new_wss_url()
else:
# other errors
self.logger.error(f"Failed to retrieve WSS URL: {e}")
raise e
def connect_to_new_endpoint(self, force: bool = False):
"""Acquires a new WSS URL and tries to connect to the endpoint."""
with self.connect_operation_lock:
if force or not self.is_connected():
self.logger.info("Connecting to a new endpoint...")
self.wss_uri = self.issue_new_wss_url()
self.connect()
self.logger.info("Connected to a new endpoint...")
def connect(self):
"""Starts talking to the RTM server through a WebSocket connection"""
if self.bot_id is None:
self.bot_id = self.web_client.auth_test()["bot_id"]
old_session: Optional[Connection] = self.current_session
old_current_session_state: ConnectionState = self.current_session_state
if self.wss_uri is None:
self.wss_uri = self.issue_new_wss_url()
current_session = Connection(
url=self.wss_uri,
logger=self.logger,
ping_interval=self.ping_interval,
trace_enabled=self.trace_enabled,
all_message_trace_enabled=self.all_message_trace_enabled,
ping_pong_trace_enabled=self.ping_pong_trace_enabled,
receive_buffer_size=1024,
proxy=self.proxy,
on_message_listener=self.run_all_message_listeners,
on_error_listener=self.run_all_error_listeners,
on_close_listener=self.run_all_close_listeners,
connection_type_name="RTM",
)
current_session.connect()
if old_current_session_state is not None:
old_current_session_state.terminated = True
if old_session is not None:
old_session.close()
self.current_session = current_session
self.current_session_state = ConnectionState()
self.auto_reconnect_enabled = self.default_auto_reconnect_enabled
if not self.current_app_monitor_started:
self.current_app_monitor_started = True
self.current_app_monitor.start()
self.logger.info(
f"A new session has been established (session id: {self.session_id()})"
)
def disconnect(self):
"""Disconnects the current session."""
self.current_session.disconnect()
def close(self) -> None:
"""
Closes this instance and cleans up underlying resources.
After calling this method, this instance is no longer usable.
"""
self.closed = True
self.disconnect()
self.current_session.close()
def start(self) -> None:
"""Establishes an RTM connection and blocks the current thread."""
self.connect()
Event().wait()
def send(self, payload: Union[dict, str]) -> None:
if payload is None:
return
if self.current_session is None or not self.current_session.is_active():
raise SlackClientError(
"The RTM client is not connected to the Slack servers"
)
if isinstance(payload, str):
self.current_session.send(payload)
else:
self.current_session.send(json.dumps(payload))
# --------------------------------------------------------------
# WS Message Processor
# --------------------------------------------------------------
def enqueue_message(self, message: str):
self.message_queue.put(message)
if self.logger.level <= logging.DEBUG:
self.logger.debug(
f"A new message enqueued (current queue size: {self.message_queue.qsize()})"
)
def process_message(self):
try:
raw_message = self.message_queue.get(timeout=1)
if self.logger.level <= logging.DEBUG:
self.logger.debug(
f"A message dequeued (current queue size: {self.message_queue.qsize()})"
)
if raw_message is not None:
message: dict = {}
if raw_message.startswith("{"):
message = json.loads(raw_message)
def _run_message_listeners():
self.run_message_listeners(message)
self.message_workers.submit(_run_message_listeners)
except Empty:
pass
def process_messages(self) -> None:
while not self.closed:
try:
self.process_message()
except Exception as e:
self.logger.exception(f"Failed to process a message: {e}")
def run_message_listeners(self, message: dict) -> None:
type = message.get("type")
if self.logger.level <= logging.DEBUG:
self.logger.debug(f"Message processing started (type: {type})")
try:
for listener in self.message_listeners:
try:
listener(self, message)
except Exception as e:
self.logger.exception(f"Failed to run a message listener: {e}")
except Exception as e:
self.logger.exception(f"Failed to run message listeners: {e}")
finally:
if self.logger.level <= logging.DEBUG:
self.logger.debug(f"Message processing completed (type: {type})")
# --------------------------------------------------------------
# Internals
# --------------------------------------------------------------
def session_id(self) -> Optional[str]:
if self.current_session is not None:
return self.current_session.session_id
return None
def run_all_message_listeners(self, message: str):
if self.logger.level <= logging.DEBUG:
self.logger.debug(f"on_message invoked: (message: {message})")
self.enqueue_message(message)
for listener in self.on_message_listeners:
listener(message)
def run_all_error_listeners(self, error: Exception):
self.logger.exception(
f"on_error invoked (session id: {self.session_id()}, "
f"error: {type(error).__name__}, message: {error})"
)
for listener in self.on_error_listeners:
listener(error)
def run_all_close_listeners(self, code: int, reason: Optional[str] = None):
if self.logger.level <= logging.DEBUG:
self.logger.debug(f"on_close invoked (session id: {self.session_id()})")
if self.auto_reconnect_enabled:
self.logger.info(
"Received CLOSE event. Going to reconnect... "
f"(session id: {self.session_id()})"
)
self.connect_to_new_endpoint()
for listener in self.on_close_listeners:
listener(code, reason)
def _run_current_session(self):
if self.current_session is not None and self.current_session.is_active():
session_id = self.session_id()
try:
self.logger.info(
"Starting to receive messages from a new connection"
f" (session id: {session_id})"
)
self.current_session_state.terminated = False
self.current_session.run_until_completion(self.current_session_state)
self.logger.info(
"Stopped receiving messages from a connection"
f" (session id: {session_id})"
)
except Exception as e:
self.logger.exception(
"Failed to start or stop the current session"
f" (session id: {session_id}, error: {e})"
)
def _monitor_current_session(self):
if self.current_app_monitor_started:
try:
self.current_session.check_state()
if self.auto_reconnect_enabled and (
self.current_session is None or not self.current_session.is_active()
):
self.logger.info(
"The session seems to be already closed. Going to reconnect... "
f"(session id: {self.session_id()})"
)
self.connect_to_new_endpoint()
except Exception as e:
self.logger.error(
"Failed to check the current session or reconnect to the server "
f"(session id: {self.session_id()}, error: {type(e).__name__}, message: {e})"
)
| {
"repo_name": "slackhq/python-slackclient",
"path": "slack_sdk/rtm_v2/__init__.py",
"copies": "1",
"size": "16519",
"license": "mit",
"hash": 8104512609637432000,
"line_mean": 38.519138756,
"line_max": 116,
"alpha_frac": 0.5560869302,
"autogenerated": false,
"ratio": 4.393351063829787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007121663029436743,
"num_lines": 418
} |
"""A Python module for interacting with Slack's RTM API."""
import asyncio
import collections
import inspect
import logging
import os
import random
import signal
from asyncio import Future
from ssl import SSLContext
from threading import current_thread, main_thread
from typing import Any, Union, Sequence
from typing import Optional, Callable, DefaultDict
import aiohttp
import slack_sdk.errors as client_err
from slack_sdk.aiohttp_version_checker import validate_aiohttp_version
from slack_sdk.web.legacy_client import LegacyWebClient as WebClient
validate_aiohttp_version(aiohttp.__version__)
class RTMClient(object): # skipcq: PYL-R0205
"""An RTMClient allows apps to communicate with the Slack Platform's RTM API.
The event-driven architecture of this client allows you to simply
link callbacks to their corresponding events. When an event occurs
this client executes your callback while passing along any
information it receives.
Attributes:
token (str): A string specifying an xoxp or xoxb token.
run_async (bool): A boolean specifying if the client should
be run in async mode. Default is False.
auto_reconnect (bool): When true the client will automatically
reconnect when (not manually) disconnected. Default is True.
ssl (SSLContext): To use SSL support, pass an SSLContext object here.
Default is None.
proxy (str): To use proxy support, pass the string of the proxy server.
e.g. "http://proxy.com"
Authentication credentials can be passed in proxy URL.
e.g. "http://user:pass@some.proxy.com"
Default is None.
timeout (int): The amount of seconds the session should wait before timing out.
Default is 30.
base_url (str): The base url for all HTTP requests.
Note: This is only used in the WebClient.
Default is "https://www.slack.com/api/".
connect_method (str): An string specifying if the client
will connect with `rtm.connect` or `rtm.start`.
Default is `rtm.connect`.
ping_interval (int): automatically send "ping" command every
specified period of seconds. If set to 0, do not send automatically.
Default is 30.
loop (AbstractEventLoop): An event loop provided by asyncio.
If None is specified we attempt to use the current loop
with `get_event_loop`. Default is None.
Methods:
ping: Sends a ping message over the websocket to Slack.
typing: Sends a typing indicator to the specified channel.
on: Stores and links callbacks to websocket and Slack events.
run_on: Decorator that stores and links callbacks to websocket and Slack events.
start: Starts an RTM Session with Slack.
stop: Closes the websocket connection and ensures it won't reconnect.
Example:
```python
import os
from slack import RTMClient
@RTMClient.run_on(event="message")
def say_hello(**payload):
data = payload['data']
web_client = payload['web_client']
if 'Hello' in data['text']:
channel_id = data['channel']
thread_ts = data['ts']
user = data['user']
web_client.chat_postMessage(
channel=channel_id,
text=f"Hi <@{user}>!",
thread_ts=thread_ts
)
slack_token = os.environ["SLACK_API_TOKEN"]
rtm_client = RTMClient(token=slack_token)
rtm_client.start()
```
Note:
The initial state returned when establishing an RTM connection will
be available as the data in payload for the 'open' event. This data is not and
will not be stored on the RTM Client.
Any attributes or methods prefixed with _underscores are
intended to be "private" internal use only. They may be changed or
removed at anytime.
"""
_callbacks: DefaultDict = collections.defaultdict(list)
def __init__(
self,
*,
token: str,
run_async: Optional[bool] = False,
auto_reconnect: Optional[bool] = True,
ssl: Optional[SSLContext] = None,
proxy: Optional[str] = None,
timeout: Optional[int] = 30,
base_url: Optional[str] = WebClient.BASE_URL,
connect_method: Optional[str] = None,
ping_interval: Optional[int] = 30,
loop: Optional[asyncio.AbstractEventLoop] = None,
headers: Optional[dict] = {},
):
self.token = token.strip()
self.run_async = run_async
self.auto_reconnect = auto_reconnect
self.ssl = ssl
self.proxy = proxy
self.timeout = timeout
self.base_url = base_url
self.connect_method = connect_method
self.ping_interval = ping_interval
self.headers = headers
self._event_loop = loop or asyncio.get_event_loop()
self._web_client = None
self._websocket = None
self._session = None
self._logger = logging.getLogger(__name__)
self._last_message_id = 0
self._connection_attempts = 0
self._stopped = False
self._web_client = WebClient(
token=self.token,
base_url=self.base_url,
timeout=self.timeout,
ssl=self.ssl,
proxy=self.proxy,
run_async=self.run_async,
loop=self._event_loop,
session=self._session,
headers=self.headers,
)
@staticmethod
def run_on(*, event: str):
"""A decorator to store and link a callback to an event."""
def decorator(callback):
RTMClient.on(event=event, callback=callback)
return callback
return decorator
@classmethod
def on(cls, *, event: str, callback: Callable):
"""Stores and links the callback(s) to the event.
Args:
event (str): A string that specifies a Slack or websocket event.
e.g. 'channel_joined' or 'open'
callback (Callable): Any object or a list of objects that can be called.
e.g. <function say_hello at 0x101234567> or
[<function say_hello at 0x10123>,<function say_bye at 0x10456>]
Raises:
SlackClientError: The specified callback is not callable.
SlackClientError: The callback must accept keyword arguments (**kwargs).
"""
if isinstance(callback, list):
for cb in callback:
cls._validate_callback(cb)
previous_callbacks = cls._callbacks[event]
cls._callbacks[event] = list(set(previous_callbacks + callback))
else:
cls._validate_callback(callback)
cls._callbacks[event].append(callback)
def start(self) -> Union[asyncio.Future, Any]:
"""Starts an RTM Session with Slack.
Makes an authenticated call to Slack's RTM API to retrieve
a websocket URL and then connects to the message server.
As events stream-in we run any associated callbacks stored
on the client.
If 'auto_reconnect' is specified we
retrieve a new url and reconnect any time the connection
is lost unintentionally or an exception is thrown.
Raises:
SlackApiError: Unable to retrieve RTM URL from Slack.
"""
# Not yet implemented: Add Windows support for graceful shutdowns.
if os.name != "nt" and current_thread() == main_thread():
signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
for s in signals:
self._event_loop.add_signal_handler(s, self.stop)
future: Future[Any] = asyncio.ensure_future(
self._connect_and_read(), loop=self._event_loop
)
if self.run_async:
return future
return self._event_loop.run_until_complete(future)
def stop(self):
"""Closes the websocket connection and ensures it won't reconnect.
If your application outputs the following errors,
call #async_stop() instead and await for the completion on your application side.
asyncio/base_events.py:641: RuntimeWarning:
coroutine 'ClientWebSocketResponse.close' was never awaited self._ready.clear()
"""
self._logger.debug("The Slack RTMClient is shutting down.")
self._stopped = True
self._close_websocket()
async def async_stop(self):
"""Closes the websocket connection and ensures it won't reconnect."""
self._logger.debug("The Slack RTMClient is shutting down.")
remaining_futures = self._close_websocket()
for future in remaining_futures:
await future
self._stopped = True
def send_over_websocket(self, *, payload: dict):
"""Sends a message to Slack over the WebSocket connection.
Note:
The RTM API only supports posting simple messages formatted using
our default message formatting mode. It does not support
attachments or other message formatting modes. For this reason
we recommend users send messages via the Web API methods.
e.g. web_client.chat_postMessage()
If the message "id" is not specified in the payload, it'll be added.
Args:
payload (dict): The message to send over the wesocket.
e.g.
{
"id": 1,
"type": "typing",
"channel": "C024BE91L"
}
Raises:
SlackClientNotConnectedError: Websocket connection is closed.
"""
return asyncio.ensure_future(self._send_json(payload), loop=self._event_loop)
async def _send_json(self, payload):
if self._websocket is None or self._event_loop is None:
raise client_err.SlackClientNotConnectedError(
"Websocket connection is closed."
)
if "id" not in payload:
payload["id"] = self._next_msg_id()
return await self._websocket.send_json(payload)
async def ping(self):
"""Sends a ping message over the websocket to Slack.
Not all web browsers support the WebSocket ping spec,
so the RTM protocol also supports ping/pong messages.
Raises:
SlackClientNotConnectedError: Websocket connection is closed.
"""
payload = {"id": self._next_msg_id(), "type": "ping"}
await self._send_json(payload=payload)
async def typing(self, *, channel: str):
"""Sends a typing indicator to the specified channel.
This indicates that this app is currently
writing a message to send to a channel.
Args:
channel (str): The channel id. e.g. 'C024BE91L'
Raises:
SlackClientNotConnectedError: Websocket connection is closed.
"""
payload = {"id": self._next_msg_id(), "type": "typing", "channel": channel}
await self._send_json(payload=payload)
@staticmethod
def _validate_callback(callback):
"""Checks if the specified callback is callable and accepts a kwargs param.
Args:
callback (obj): Any object or a list of objects that can be called.
e.g. <function say_hello at 0x101234567>
Raises:
SlackClientError: The specified callback is not callable.
SlackClientError: The callback must accept keyword arguments (**kwargs).
"""
cb_name = callback.__name__ if hasattr(callback, "__name__") else callback
if not callable(callback):
msg = "The specified callback '{}' is not callable.".format(cb_name)
raise client_err.SlackClientError(msg)
callback_params = inspect.signature(callback).parameters.values()
if not any(
param for param in callback_params if param.kind == param.VAR_KEYWORD
):
msg = "The callback '{}' must accept keyword arguments (**kwargs).".format(
cb_name
)
raise client_err.SlackClientError(msg)
def _next_msg_id(self):
"""Retrieves the next message id.
When sending messages to Slack every event should
have a unique (for that connection) positive integer ID.
Returns:
An integer representing the message id. e.g. 98
"""
self._last_message_id += 1
return self._last_message_id
async def _connect_and_read(self):
"""Retrieves the WS url and connects to Slack's RTM API.
Makes an authenticated call to Slack's Web API to retrieve
a websocket URL. Then connects to the message server and
reads event messages as they come in.
If 'auto_reconnect' is specified we
retrieve a new url and reconnect any time the connection
is lost unintentionally or an exception is thrown.
Raises:
SlackApiError: Unable to retrieve RTM URL from Slack.
websockets.exceptions: Errors thrown by the 'websockets' library.
"""
while not self._stopped:
try:
self._connection_attempts += 1
async with aiohttp.ClientSession(
timeout=aiohttp.ClientTimeout(total=self.timeout)
) as session:
self._session = session
url, data = await self._retrieve_websocket_info()
async with session.ws_connect(
url,
heartbeat=self.ping_interval,
ssl=self.ssl,
proxy=self.proxy,
) as websocket:
self._logger.debug("The Websocket connection has been opened.")
self._websocket = websocket
await self._dispatch_event(event="open", data=data)
await self._read_messages()
# The websocket has been disconnected, or self._stopped is True
if not self._stopped and not self.auto_reconnect:
self._logger.warning(
"Not reconnecting the Websocket because auto_reconnect is False"
)
return
# No need to wait exponentially here, since the connection was
# established OK, but timed out, or was closed remotely
except (
client_err.SlackClientNotConnectedError,
client_err.SlackApiError,
# Not yet implemented: Catch websocket exceptions thrown by aiohttp.
) as exception:
await self._dispatch_event(event="error", data=exception)
error_code = (
exception.response.get("error", None)
if hasattr(exception, "response")
else None
)
if (
self.auto_reconnect
and not self._stopped
and error_code != "invalid_auth" # "invalid_auth" is unrecoverable
):
await self._wait_exponentially(exception)
continue
self._logger.exception(
"The Websocket encountered an error. Closing the connection..."
)
self._close_websocket()
raise
async def _read_messages(self):
"""Process messages received on the WebSocket connection."""
while not self._stopped and self._websocket is not None:
try:
# Wait for a message to be received, but timeout after a second so that
# we can check if the socket has been closed, or if self._stopped is
# True
message = await self._websocket.receive(timeout=1)
except asyncio.TimeoutError:
if not self._websocket.closed:
# We didn't receive a message within the timeout interval, but
# aiohttp hasn't closed the socket, so ping responses must still be
# returning
continue
self._logger.warning(
"Websocket was closed (%s).",
self._websocket.close_code if self._websocket else "",
)
await self._dispatch_event(
event="error",
data=self._websocket.exception() if self._websocket else "",
)
self._websocket = None
await self._dispatch_event(event="close")
return
if message.type == aiohttp.WSMsgType.TEXT:
try:
payload = message.json()
event = payload.pop("type", "Unknown")
await self._dispatch_event(event, data=payload)
except Exception as err: # skipcq: PYL-W0703
data = message.data if message else message
self._logger.info(
f"Caught a raised exception ({err}) while dispatching a TEXT message ({data})"
)
# Raised exceptions here happen in users' code and were just unhandled.
# As they're not intended for closing current WebSocket connection,
# this exception should not be propagated to higher level (#_connect_and_read()).
continue
elif message.type == aiohttp.WSMsgType.ERROR:
self._logger.error("Received an error on the websocket: %r", message)
await self._dispatch_event(event="error", data=message)
elif message.type in (
aiohttp.WSMsgType.CLOSE,
aiohttp.WSMsgType.CLOSING,
aiohttp.WSMsgType.CLOSED,
):
self._logger.warning("Websocket was closed.")
self._websocket = None
await self._dispatch_event(event="close")
else:
self._logger.debug("Received unhandled message type: %r", message)
async def _dispatch_event(self, event, data=None):
"""Dispatches the event and executes any associated callbacks.
Note: To prevent the app from crashing due to callback errors. We
catch all exceptions and send all data to the logger.
Args:
event (str): The type of event. e.g. 'bot_added'
data (dict): The data Slack sent. e.g.
{
"type": "bot_added",
"bot": {
"id": "B024BE7LH",
"app_id": "A4H1JB4AZ",
"name": "hugbot"
}
}
"""
if self._logger.level <= logging.DEBUG:
self._logger.debug("Received an event: '%s' - %s", event, data)
for callback in self._callbacks[event]:
self._logger.debug(
"Running %s callbacks for event: '%s'",
len(self._callbacks[event]),
event,
)
try:
if self._stopped and event not in ["close", "error"]:
# Don't run callbacks if client was stopped unless they're
# close/error callbacks.
break
if inspect.iscoroutinefunction(callback):
await callback(
rtm_client=self, web_client=self._web_client, data=data
)
else:
if self.run_async is True:
raise client_err.SlackRequestError(
f'The callback "{callback.__name__}" is NOT a coroutine. '
"Running such with run_async=True is unsupported. "
"Consider adding async/await to the method "
"or going with run_async=False if your app is not really non-blocking."
)
payload = {
"rtm_client": self,
"web_client": self._web_client,
"data": data,
}
callback(**payload)
except Exception as err:
name = callback.__name__
module = callback.__module__
msg = f"When calling '#{name}()' in the '{module}' module the following error was raised: {err}"
self._logger.error(msg)
raise
async def _retrieve_websocket_info(self):
"""Retrieves the WebSocket info from Slack.
Returns:
A tuple of websocket information.
e.g.
(
"wss://...",
{
"self": {"id": "U01234ABC","name": "robotoverlord"},
"team": {
"domain": "exampledomain",
"id": "T123450FP",
"name": "ExampleName"
}
}
)
Raises:
SlackApiError: Unable to retrieve RTM URL from Slack.
"""
if self._web_client is None:
self._web_client = WebClient(
token=self.token,
base_url=self.base_url,
timeout=self.timeout,
ssl=self.ssl,
proxy=self.proxy,
run_async=True,
loop=self._event_loop,
session=self._session,
headers=self.headers,
)
self._logger.debug("Retrieving websocket info.")
use_rtm_start = self.connect_method in ["rtm.start", "rtm_start"]
if self.run_async:
if use_rtm_start:
resp = await self._web_client.rtm_start()
else:
resp = await self._web_client.rtm_connect()
else:
if use_rtm_start:
resp = self._web_client.rtm_start()
else:
resp = self._web_client.rtm_connect()
url = resp.get("url")
if url is None:
msg = "Unable to retrieve RTM URL from Slack."
raise client_err.SlackApiError(message=msg, response=resp)
return url, resp.data
async def _wait_exponentially(self, exception, max_wait_time=300):
"""Wait exponentially longer for each connection attempt.
Calculate the number of seconds to wait and then add
a random number of milliseconds to avoid coincidental
synchronized client retries. Wait up to the maximum amount
of wait time specified via 'max_wait_time'. However,
if Slack returned how long to wait use that.
"""
if hasattr(exception, "response"):
wait_time = exception.response.get("headers", {}).get(
"Retry-After",
min((2 ** self._connection_attempts) + random.random(), max_wait_time),
)
self._logger.debug("Waiting %s seconds before reconnecting.", wait_time)
await asyncio.sleep(float(wait_time))
def _close_websocket(self) -> Sequence[Future]:
"""Closes the websocket connection."""
futures = []
close_method = getattr(self._websocket, "close", None)
if callable(close_method):
future = asyncio.ensure_future( # skipcq: PYL-E1102
close_method(), loop=self._event_loop # skipcq: PYL-E1102
) # skipcq: PYL-E1102
futures.append(future)
self._websocket = None
event_f = asyncio.ensure_future(
self._dispatch_event(event="close"), loop=self._event_loop
)
futures.append(event_f)
return futures
| {
"repo_name": "slackhq/python-slackclient",
"path": "slack_sdk/rtm/__init__.py",
"copies": "1",
"size": "24014",
"license": "mit",
"hash": 3115970183840664000,
"line_mean": 39.2919463087,
"line_max": 112,
"alpha_frac": 0.5632547681,
"autogenerated": false,
"ratio": 4.670167250097238,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007872996592642973,
"num_lines": 596
} |
"""A Python module for interacting with Slack's Web API."""
import os
from io import IOBase
from typing import Union, List, Optional, Dict
import slack_sdk.errors as e
from slack_sdk.models.views import View
from .base_client import BaseClient, SlackResponse
from .internal_utils import _parse_web_class_objects, _update_call_participants
class WebClient(BaseClient):
"""A WebClient allows apps to communicate with the Slack Platform's Web API.
The Slack Web API is an interface for querying information from
and enacting change in a Slack workspace.
This client handles constructing and sending HTTP requests to Slack
as well as parsing any responses received into a `SlackResponse`.
Attributes:
token (str): A string specifying an xoxp or xoxb token.
base_url (str): A string representing the Slack API base URL.
Default is 'https://www.slack.com/api/'
timeout (int): The maximum number of seconds the client will wait
to connect and receive a response from Slack.
Default is 30 seconds.
Methods:
api_call: Constructs a request and executes the API call to Slack.
Example of recommended usage:
```python
import os
from slack_sdk import WebClient
client = WebClient(token=os.environ['SLACK_API_TOKEN'])
response = client.chat_postMessage(
channel='#random',
text="Hello world!")
assert response["ok"]
assert response["message"]["text"] == "Hello world!"
```
Example manually creating an API request:
```python
import os
from slack_sdk import WebClient
client = WebClient(token=os.environ['SLACK_API_TOKEN'])
response = client.api_call(
api_method='chat.postMessage',
json={'channel': '#random','text': "Hello world!"}
)
assert response["ok"]
assert response["message"]["text"] == "Hello world!"
```
Note:
Any attributes or methods prefixed with _underscores are
intended to be "private" internal use only. They may be changed or
removed at anytime.
"""
def admin_apps_approve(
self, *, app_id: str = None, request_id: str = None, **kwargs
) -> SlackResponse:
"""Approve an app for installation on a workspace.
Either app_id or request_id is required.
These IDs can be obtained either directly via the app_requested event,
or by the admin.apps.requests.list method.
Args:
app_id (str): The id of the app to approve. e.g. 'A12345'
request_id (str): The id of the request to approve. e.g. 'Ar12345'
Raises:
SlackRequestError: If neither or both the `app_id` and `request_id` args are specified.
"""
if app_id:
kwargs.update({"app_id": app_id})
elif request_id:
kwargs.update({"request_id": request_id})
else:
raise e.SlackRequestError(
"The app_id or request_id argument must be specified."
)
return self.api_call("admin.apps.approve", json=kwargs)
def admin_apps_approved_list(self, **kwargs) -> SlackResponse:
"""List approved apps for an org or workspace."""
return self.api_call("admin.apps.approved.list", http_verb="GET", params=kwargs)
def admin_apps_requests_list(self, **kwargs) -> SlackResponse:
"""List app requests for a team/workspace."""
return self.api_call("admin.apps.requests.list", http_verb="GET", params=kwargs)
def admin_apps_restrict(self, **kwargs) -> SlackResponse:
"""Restrict an app for installation on a workspace."""
return self.api_call("admin.apps.restrict", json=kwargs)
def admin_apps_restricted_list(self, **kwargs) -> SlackResponse:
"""List restricted apps for an org or workspace."""
return self.api_call(
"admin.apps.restricted.list", http_verb="GET", params=kwargs
)
def admin_conversations_create(
self, *, is_private: bool, name: str, **kwargs
) -> SlackResponse:
"""Create a public or private channel-based conversation.
Args:
is_private (bool): When true, creates a private channel instead of a public channel
name (str): Name of the public or private channel to create.
org_wide (bool): When true, the channel will be available org-wide.
Note: if the channel is not org_wide=true, you must specify a team_id for this channel
team_id (str): The workspace to create the channel in.
Note: this argument is required unless you set org_wide=true.
"""
kwargs.update({"is_private": is_private, "name": name})
return self.api_call("admin.conversations.create", json=kwargs)
def admin_conversations_delete(self, *, channel_id: str, **kwargs) -> SlackResponse:
"""Delete a public or private channel.
Args:
channel_id (str): The channel to delete.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.delete", json=kwargs)
def admin_conversations_invite(
self, *, channel_id: str, user_ids: Union[str, List[str]], **kwargs
) -> SlackResponse:
"""Invite a user to a public or private channel.
Args:
channel_id (str): The channel that the users will be invited to.
user_ids (str or list): The users to invite.
"""
kwargs.update({"channel_id": channel_id})
if isinstance(user_ids, list):
kwargs.update({"user_ids": ",".join(user_ids)})
else:
kwargs.update({"user_ids": user_ids})
# NOTE: the endpoint is unable to handle Content-Type: application/json as of Sep 3, 2020.
return self.api_call("admin.conversations.invite", params=kwargs)
def admin_conversations_archive(
self, *, channel_id: str, **kwargs
) -> SlackResponse:
"""Archive a public or private channel.
Args:
channel_id (str): The channel to archive.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.archive", json=kwargs)
def admin_conversations_unarchive(
self, *, channel_id: str, **kwargs
) -> SlackResponse:
"""Unarchive a public or private channel.
Args:
channel_id (str): The channel to unarchive.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.unarchive", json=kwargs)
def admin_conversations_rename(
self, *, channel_id: str, name: str, **kwargs
) -> SlackResponse:
"""Rename a public or private channel.
Args:
channel_id (str): The channel to rename.
name (str): The name to rename the channel to.
"""
kwargs.update({"channel_id": channel_id, "name": name})
return self.api_call("admin.conversations.rename", json=kwargs)
def admin_conversations_search(self, **kwargs) -> SlackResponse:
"""Search for public or private channels in an Enterprise organization."""
return self.api_call("admin.conversations.search", json=kwargs)
def admin_conversations_convertToPrivate(
self, *, channel_id: str, **kwargs
) -> SlackResponse:
"""Convert a public channel to a private channel.
Args:
channel_id (str): The channel to convert to private.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.convertToPrivate", json=kwargs)
def admin_conversations_setConversationPrefs(
self, *, channel_id: str, prefs: Union[str, dict], **kwargs
) -> SlackResponse:
"""Set the posting permissions for a public or private channel.
Args:
channel_id (str): The channel to set the prefs for
prefs (str or dict): The prefs for this channel in a stringified JSON format.
"""
kwargs.update({"channel_id": channel_id, "prefs": prefs})
return self.api_call("admin.conversations.setConversationPrefs", json=kwargs)
def admin_conversations_getConversationPrefs(
self, *, channel_id: str, **kwargs
) -> SlackResponse:
"""Get conversation preferences for a public or private channel.
Args:
channel_id (str): The channel to get the preferences for.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.getConversationPrefs", json=kwargs)
def admin_conversations_disconnectShared(
self, *, channel_id: str, **kwargs
) -> SlackResponse:
"""Disconnect a connected channel from one or more workspaces.
Args:
channel_id (str): The channel to be disconnected from some workspaces.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.disconnectShared", json=kwargs)
def admin_conversations_ekm_listOriginalConnectedChannelInfo(
self, **kwargs
) -> SlackResponse:
"""List all disconnected channels—i.e.,
channels that were once connected to other workspaces and then disconnected—and
the corresponding original channel IDs for key revocation with EKM.
"""
return self.api_call(
"admin.conversations.ekm.listOriginalConnectedChannelInfo", params=kwargs
)
def admin_conversations_restrictAccess_addGroup(
self, *, channel_id: str, group_id: str, **kwargs
) -> SlackResponse:
"""Add an allowlist of IDP groups for accessing a channel.
Args:
channel_id (str): The channel to link this group to. e.g. 'C1234567890'
group_id (str): The IDP Group ID to be an allowlist for the private channel. 'S0604QSJC'
team_id (str): The workspace where the channel exists.
This argument is required for channels only tied to one workspace,
and optional for channels that are shared across an organization.
e.g 'T1234'
"""
kwargs.update({"channel_id": channel_id, "group_id": group_id})
return self.api_call(
"admin.conversations.restrictAccess.addGroup",
http_verb="GET",
params=kwargs,
)
def admin_conversations_restrictAccess_listGroups(
self, *, channel_id: str, **kwargs
) -> SlackResponse:
"""List all IDP Groups linked to a channel.
Args:
channel_id (str): The channel to link this group to. e.g. 'C1234567890'
team_id (str): The workspace where the channel exists.
This argument is required for channels only tied to one workspace,
and optional for channels that are shared across an organization.
e.g 'T1234'
"""
kwargs.update({"channel_id": channel_id})
return self.api_call(
"admin.conversations.restrictAccess.listGroups",
http_verb="GET",
params=kwargs,
)
def admin_conversations_restrictAccess_removeGroup(
self, *, channel_id: str, group_id: str, team_id: str, **kwargs
) -> SlackResponse:
"""Remove a linked IDP group linked from a private channel.
Args:
channel_id (str): The channel to link this group to. e.g. 'C1234567890'
group_id (str): The IDP Group ID to be an allowlist for the private channel. 'S0604QSJC'
team_id (str): The workspace where the channel exists.
This argument is required for channels only tied to one workspace,
and optional for channels that are shared across an organization.
e.g 'T1234'
"""
kwargs.update(
{"channel_id": channel_id, "group_id": group_id, "team_id": team_id}
)
return self.api_call(
"admin.conversations.restrictAccess.removeGroup",
http_verb="GET",
params=kwargs,
)
def admin_conversations_setTeams(
self, *, channel_id: str, **kwargs
) -> SlackResponse:
"""Set the workspaces in an Enterprise grid org that connect to a channel.
Args:
channel_id (str): The encoded channel_id to add or remove to workspaces.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.setTeams", json=kwargs)
def admin_conversations_getTeams(
self, *, channel_id: str, **kwargs
) -> SlackResponse:
"""Set the workspaces in an Enterprise grid org that connect to a channel.
Args:
channel_id (str): The channel to determine connected workspaces within the organization for.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.getTeams", json=kwargs)
def admin_emoji_add(self, **kwargs) -> SlackResponse:
"""Add an emoji."""
return self.api_call("admin.emoji.add", http_verb="GET", params=kwargs)
def admin_emoji_addAlias(self, **kwargs) -> SlackResponse:
"""Add an emoji alias."""
return self.api_call("admin.emoji.addAlias", http_verb="GET", params=kwargs)
def admin_emoji_list(self, **kwargs) -> SlackResponse:
"""List emoji for an Enterprise Grid organization."""
return self.api_call("admin.emoji.list", http_verb="GET", params=kwargs)
def admin_emoji_remove(self, **kwargs) -> SlackResponse:
"""Remove an emoji across an Enterprise Grid organization."""
return self.api_call("admin.emoji.remove", http_verb="GET", params=kwargs)
def admin_emoji_rename(self, **kwargs) -> SlackResponse:
"""Rename an emoji."""
return self.api_call("admin.emoji.rename", http_verb="GET", params=kwargs)
def admin_users_session_reset(self, *, user_id: str, **kwargs) -> SlackResponse:
"""Wipes all valid sessions on all devices for a given user.
Args:
user_id (str): The ID of the user to wipe sessions for. e.g. 'W12345678'
"""
kwargs.update({"user_id": user_id})
return self.api_call("admin.users.session.reset", json=kwargs)
def admin_users_session_invalidate(
self, *, session_id: str, team_id: str, **kwargs
) -> SlackResponse:
"""Invalidate a single session for a user by session_id.
Args:
session_id (str): The ID of a session
team_id (str): ID of the team that the session belongs to
"""
kwargs.update({"session_id": session_id, "team_id": team_id})
return self.api_call("admin.users.session.invalidate", params=kwargs)
def admin_inviteRequests_approve(
self, *, invite_request_id: str, **kwargs
) -> SlackResponse:
"""Approve a workspace invite request.
team_id is required if your Enterprise Grid org contains more than one workspace.
Args:
invite_request_id (str): ID of the request to invite. e.g. 'Ir1234'
"""
kwargs.update({"invite_request_id": invite_request_id})
return self.api_call("admin.inviteRequests.approve", json=kwargs)
def admin_inviteRequests_approved_list(self, **kwargs) -> SlackResponse:
"""List all approved workspace invite requests."""
return self.api_call("admin.inviteRequests.approved.list", json=kwargs)
def admin_inviteRequests_denied_list(self, **kwargs) -> SlackResponse:
"""List all denied workspace invite requests."""
return self.api_call("admin.inviteRequests.denied.list", json=kwargs)
def admin_inviteRequests_deny(
self, *, invite_request_id: str, **kwargs
) -> SlackResponse:
"""Deny a workspace invite request.
Args:
invite_request_id (str): ID of the request to invite. e.g. 'Ir1234'
"""
kwargs.update({"invite_request_id": invite_request_id})
return self.api_call("admin.inviteRequests.deny", json=kwargs)
def admin_inviteRequests_list(self, **kwargs) -> SlackResponse:
"""List all pending workspace invite requests."""
return self.api_call("admin.inviteRequests.list", json=kwargs)
def admin_teams_admins_list(self, *, team_id: str, **kwargs) -> SlackResponse:
"""List all of the admins on a given workspace.
Args:
team_id (str): ID of the team.
"""
kwargs.update({"team_id": team_id})
return self.api_call("admin.teams.admins.list", http_verb="GET", params=kwargs)
def admin_teams_create(
self, *, team_domain: str, team_name: str, **kwargs
) -> SlackResponse:
"""Create an Enterprise team.
Args:
team_domain (str): Team domain. e.g. 'slacksoftballteam'
team_name (str): Team name. e.g. 'Slack Softball Team'
"""
kwargs.update({"team_domain": team_domain, "team_name": team_name})
return self.api_call("admin.teams.create", json=kwargs)
def admin_teams_list(self, **kwargs) -> SlackResponse:
"""List all teams on an Enterprise organization."""
return self.api_call("admin.teams.list", json=kwargs)
def admin_teams_owners_list(self, *, team_id: str, **kwargs) -> SlackResponse:
"""List all of the admins on a given workspace.
Args:
team_id (str): ID of the team.
"""
kwargs.update({"team_id": team_id})
return self.api_call("admin.teams.owners.list", http_verb="GET", params=kwargs)
def admin_teams_settings_info(self, team_id: str, **kwargs) -> SlackResponse:
"""Fetch information about settings in a workspace
Args:
team_id (str): ID of the team.
"""
kwargs.update({"team_id": team_id})
return self.api_call("admin.teams.settings.info", json=kwargs)
def admin_teams_settings_setDefaultChannels(
self, *, team_id: str, channel_ids: Union[str, List[str]], **kwargs
) -> SlackResponse:
"""Set the default channels of a workspace.
Args:
team_id (str): ID of the team.
channel_ids (str or list): A list of channel_ids.
At least one channel is required. e.g. ['C1A2B3C4D', 'C26Z25Y24']
"""
kwargs.update({"team_id": team_id})
if isinstance(channel_ids, list):
kwargs.update({"channel_ids": ",".join(channel_ids)})
else:
kwargs.update({"channel_ids": channel_ids})
return self.api_call(
"admin.teams.settings.setDefaultChannels", http_verb="GET", params=kwargs
)
def admin_teams_settings_setDescription(
self, *, team_id: str, description: str, **kwargs
) -> SlackResponse:
"""Set the description of a given workspace.
Args:
team_id (str): ID of the team.
description (str): Description of the team.
"""
kwargs.update({"team_id": team_id, "description": description})
return self.api_call("admin.teams.settings.setDescription", json=kwargs)
def admin_teams_settings_setDiscoverability(
self, *, team_id: str, discoverability: str, **kwargs
) -> SlackResponse:
"""Sets the icon of a workspace.
Args:
team_id (str): ID of the team.
discoverability (str): This workspace's discovery setting.
It must be set to one of open, invite_only, closed, or unlisted.
"""
kwargs.update({"team_id": team_id, "discoverability": discoverability})
return self.api_call("admin.teams.settings.setDiscoverability", json=kwargs)
def admin_teams_settings_setIcon(
self, *, team_id: str, image_url: str, **kwargs
) -> SlackResponse:
"""Sets the icon of a workspace.
Args:
team_id (str): ID of the team.
image_url (str): Url of the icon.
"""
kwargs.update({"team_id": team_id, "image_url": image_url})
return self.api_call(
"admin.teams.settings.setIcon", http_verb="GET", params=kwargs
)
def admin_teams_settings_setName(
self, *, team_id: str, name: str, **kwargs
) -> SlackResponse:
"""Sets the icon of a workspace.
Args:
team_id (str): ID of the team.
name (str): Name of the team.
"""
kwargs.update({"team_id": team_id, "name": name})
return self.api_call("admin.teams.settings.setName", json=kwargs)
def admin_usergroups_addChannels(
self,
*,
team_id: str,
usergroup_id: str,
channel_ids: Union[str, List[str]],
**kwargs
) -> SlackResponse:
"""Add one or more default channels to an IDP group.
Args:
team_id (str): The workspace to add default channels in. e.g. 'T1234'
usergroup_id (str): ID of the IDP group to add default channels for. e.g. 'S1234'
channel_ids (str or list): Comma separated string of channel IDs. e.g. 'C123,C234' or ['C123', 'C234']
"""
kwargs.update({"team_id": team_id, "usergroup_id": usergroup_id})
if isinstance(channel_ids, list):
kwargs.update({"channel_ids": ",".join(channel_ids)})
else:
kwargs.update({"channel_ids": channel_ids})
return self.api_call("admin.usergroups.addChannels", json=kwargs)
def admin_usergroups_addTeams(
self, *, usergroup_id: str, team_ids: Union[str, List[str]], **kwargs
) -> SlackResponse:
"""Associate one or more default workspaces with an organization-wide IDP group.
Args:
usergroup_id (str): ID of the IDP group. e.g. 'S1234'
team_ids (str or list): A comma separated list of encoded team (workspace) IDs.
Each workspace MUST belong to the organization associated with the token.
e.g. 'T12345678,T98765432' or ['T12345678', 'T98765432']
"""
kwargs.update({"usergroup_id": usergroup_id})
if isinstance(team_ids, list):
kwargs.update({"team_ids": ",".join(team_ids)})
else:
kwargs.update({"team_ids": team_ids})
return self.api_call("admin.usergroups.addTeams", json=kwargs)
def admin_usergroups_listChannels(
self, *, usergroup_id: str, **kwargs
) -> SlackResponse:
"""Add one or more default channels to an IDP group.
Args:
usergroup_id (str): ID of the IDP group to list default channels for. e.g. 'S1234'
"""
kwargs.update({"usergroup_id": usergroup_id})
return self.api_call("admin.usergroups.listChannels", json=kwargs)
def admin_usergroups_removeChannels(
self, *, usergroup_id: str, channel_ids: Union[str, List[str]], **kwargs
) -> SlackResponse:
"""Add one or more default channels to an IDP group.
Args:
usergroup_id (str): ID of the IDP group. e.g. 'S1234'
channel_ids (str or list): Comma separated string of channel IDs. e.g. 'C123,C234' or ['C123', 'C234']
"""
kwargs.update({"usergroup_id": usergroup_id})
if isinstance(channel_ids, list):
kwargs.update({"channel_ids": ",".join(channel_ids)})
else:
kwargs.update({"channel_ids": channel_ids})
return self.api_call("admin.usergroups.removeChannels", json=kwargs)
def admin_users_assign(
self, *, team_id: str, user_id: str, **kwargs
) -> SlackResponse:
"""Add an Enterprise user to a workspace.
Args:
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): ID of the user to add to the workspace.
"""
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.assign", json=kwargs)
def admin_users_invite(
self, *, team_id: str, email: str, channel_ids: Union[str, List[str]], **kwargs
) -> SlackResponse:
"""Invite a user to a workspace.
Args:
team_id (str): ID of the team. e.g. 'T1234'
email (str): The email address of the person to invite. e.g. 'joe@email.com'
channel_ids (str or list): A list of channel_ids for this user to join.
At least one channel is required. e.g. ['C1A2B3C4D', 'C26Z25Y24']
"""
kwargs.update({"team_id": team_id, "email": email})
if isinstance(channel_ids, list):
kwargs.update({"channel_ids": ",".join(channel_ids)})
else:
kwargs.update({"channel_ids": channel_ids})
return self.api_call("admin.users.invite", json=kwargs)
def admin_users_list(self, *, team_id: str, **kwargs) -> SlackResponse:
"""List users on a workspace
Args:
team_id (str): ID of the team. e.g. 'T1234'
"""
kwargs.update({"team_id": team_id})
return self.api_call("admin.users.list", json=kwargs)
def admin_users_remove(
self, *, team_id: str, user_id: str, **kwargs
) -> SlackResponse:
"""Remove a user from a workspace.
Args:
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): The ID of the user to remove. e.g. 'W12345678'
"""
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.remove", json=kwargs)
def admin_users_setAdmin(
self, *, team_id: str, user_id: str, **kwargs
) -> SlackResponse:
"""Set an existing guest, regular user, or owner to be an admin user.
Args:
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): The ID of the user to remove. e.g. 'W12345678'
"""
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.setAdmin", json=kwargs)
def admin_users_setExpiration(
self, *, expiration_ts: int, team_id: str, user_id: str, **kwargs
) -> SlackResponse:
"""Set an expiration for a guest user.
Args:
expiration_ts (int): Timestamp when guest account should be disabled. e.g. '1234567890'
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): The ID of the user to set an expiration for. e.g. 'W12345678'
"""
kwargs.update(
{"expiration_ts": expiration_ts, "team_id": team_id, "user_id": user_id}
)
return self.api_call("admin.users.setExpiration", json=kwargs)
def admin_users_setOwner(
self, *, team_id: str, user_id: str, **kwargs
) -> SlackResponse:
"""Set an existing guest, regular user, or admin user to be a workspace owner.
Args:
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): The ID of the user to remove. e.g. 'W12345678'
"""
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.setOwner", json=kwargs)
def admin_users_setRegular(
self, *, team_id: str, user_id: str, **kwargs
) -> SlackResponse:
"""Set an existing guest user, admin user, or owner to be a regular user.
Args:
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): The ID of the user to remove. e.g. 'W12345678'
"""
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.setRegular", json=kwargs)
def api_test(self, **kwargs) -> SlackResponse:
"""Checks API calling code."""
return self.api_call("api.test", json=kwargs)
def apps_event_authorizations_list(
self, event_context: str, **kwargs
) -> SlackResponse:
"""Get a list of authorizations for the given event context.
Each authorization represents an app installation that the event is visible to.
Args:
event_context (str): You'll receive an event_context identifying an event in each event payload sent to your app.
"""
kwargs.update({"event_context": event_context})
return self.api_call("apps.event.authorizations.list", params=kwargs)
def apps_uninstall(
self, client_id: str, client_secret: str, **kwargs
) -> SlackResponse:
"""Uninstalls your app from a workspace.
Args:
client_id (str): Issued when you created your application. e.g. '56579136444.26251006572'
client_secret (str): Issued when you created your application. e.g. 'f25b5ceaf8a3c2a2c4f52bb4f0b0499e'
"""
kwargs.update({"client_id": client_id, "client_secret": client_secret})
return self.api_call("apps.uninstall", params=kwargs)
def auth_revoke(self, **kwargs) -> SlackResponse:
"""Revokes a token."""
return self.api_call("auth.revoke", http_verb="GET", params=kwargs)
def auth_test(self, **kwargs) -> SlackResponse:
"""Checks authentication & identity."""
return self.api_call("auth.test", json=kwargs)
def bots_info(self, **kwargs) -> SlackResponse:
"""Gets information about a bot user."""
return self.api_call("bots.info", http_verb="GET", params=kwargs)
def calls_add(
self, *, external_unique_id: str, join_url: str, **kwargs
) -> SlackResponse:
"""Registers a new Call.
Args:
external_unique_id (str): An ID supplied by the 3rd-party Call provider.
It must be unique across all Calls from that service.
e.g. '025169F6-E37A-4E62-BB54-7F93A0FC4C1F'
join_url (str): The URL required for a client to join the Call.
e.g. 'https://example.com/calls/1234567890'
"""
kwargs.update({"external_unique_id": external_unique_id, "join_url": join_url})
_update_call_participants( # skipcq: PTC-W0039
kwargs, kwargs.get("users", None) # skipcq: PTC-W0039
) # skipcq: PTC-W0039
return self.api_call("calls.add", http_verb="POST", params=kwargs)
def calls_end(self, *, id: str, **kwargs) -> SlackResponse: # skipcq: PYL-W0622
"""Ends a Call.
Args:
id (str): id returned when registering the call using the calls.add method.
"""
kwargs.update({"id": id})
return self.api_call("calls.end", http_verb="POST", params=kwargs)
def calls_info(self, *, id: str, **kwargs) -> SlackResponse: # skipcq: PYL-W0622
"""Returns information about a Call.
Args:
id (str): id returned when registering the call using the calls.add method.
"""
kwargs.update({"id": id})
return self.api_call("calls.info", http_verb="POST", params=kwargs)
def calls_participants_add(
self,
*,
id: str, # skipcq: PYL-W0622
users: Union[str, List[Dict[str, str]]],
**kwargs
) -> SlackResponse:
"""Registers new participants added to a Call.
Args:
id (str): id returned when registering the call using the calls.add method.
users: (list): The list of users to add as participants in the Call.
"""
kwargs.update({"id": id})
_update_call_participants(kwargs, users)
return self.api_call("calls.participants.add", http_verb="POST", params=kwargs)
def calls_participants_remove(
self,
*,
id: str, # skipcq: PYL-W0622
users: Union[str, List[Dict[str, str]]],
**kwargs
) -> SlackResponse:
"""Registers participants removed from a Call.
Args:
id (str): id returned when registering the call using the calls.add method.
users: (list): The list of users to remove as participants in the Call.
"""
kwargs.update({"id": id})
_update_call_participants(kwargs, users)
return self.api_call(
"calls.participants.remove", http_verb="POST", params=kwargs
)
def calls_update(self, *, id: str, **kwargs) -> SlackResponse: # skipcq: PYL-W0622
"""Updates information about a Call.
Args:
id (str): id returned by the calls.add method.
"""
kwargs.update({"id": id})
return self.api_call("calls.update", http_verb="POST", params=kwargs)
def channels_archive(self, *, channel: str, **kwargs) -> SlackResponse:
"""Archives a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("channels.archive", json=kwargs)
def channels_create(self, *, name: str, **kwargs) -> SlackResponse:
"""Creates a channel.
Args:
name (str): The name of the channel. e.g. 'mychannel'
"""
kwargs.update({"name": name})
return self.api_call("channels.create", json=kwargs)
def channels_history(self, *, channel: str, **kwargs) -> SlackResponse:
"""Fetches history of messages and events from a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("channels.history", http_verb="GET", params=kwargs)
def channels_info(self, *, channel: str, **kwargs) -> SlackResponse:
"""Gets information about a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("channels.info", http_verb="GET", params=kwargs)
def channels_invite(self, *, channel: str, user: str, **kwargs) -> SlackResponse:
"""Invites a user to a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
user (str): The user id. e.g. 'U1234567890'
"""
kwargs.update({"channel": channel, "user": user})
return self.api_call("channels.invite", json=kwargs)
def channels_join(self, *, name: str, **kwargs) -> SlackResponse:
"""Joins a channel, creating it if needed.
Args:
name (str): The channel name. e.g. '#general'
"""
kwargs.update({"name": name})
return self.api_call("channels.join", json=kwargs)
def channels_kick(self, *, channel: str, user: str, **kwargs) -> SlackResponse:
"""Removes a user from a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
user (str): The user id. e.g. 'U1234567890'
"""
kwargs.update({"channel": channel, "user": user})
return self.api_call("channels.kick", json=kwargs)
def channels_leave(self, *, channel: str, **kwargs) -> SlackResponse:
"""Leaves a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("channels.leave", json=kwargs)
def channels_list(self, **kwargs) -> SlackResponse:
"""Lists all channels in a Slack team."""
return self.api_call("channels.list", http_verb="GET", params=kwargs)
def channels_mark(self, *, channel: str, ts: str, **kwargs) -> SlackResponse:
"""Sets the read cursor in a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
ts (str): Timestamp of the most recently seen message. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("channels.mark", json=kwargs)
def channels_rename(self, *, channel: str, name: str, **kwargs) -> SlackResponse:
"""Renames a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
name (str): The new channel name. e.g. 'newchannel'
"""
kwargs.update({"channel": channel, "name": name})
return self.api_call("channels.rename", json=kwargs)
def channels_replies(
self, *, channel: str, thread_ts: str, **kwargs
) -> SlackResponse:
"""Retrieve a thread of messages posted to a channel
Args:
channel (str): The channel id. e.g. 'C1234567890'
thread_ts (str): The timestamp of an existing message with 0 or more replies.
e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "thread_ts": thread_ts})
return self.api_call("channels.replies", http_verb="GET", params=kwargs)
def channels_setPurpose(
self, *, channel: str, purpose: str, **kwargs
) -> SlackResponse:
"""Sets the purpose for a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
purpose (str): The new purpose for the channel. e.g. 'My Purpose'
"""
kwargs.update({"channel": channel, "purpose": purpose})
return self.api_call("channels.setPurpose", json=kwargs)
def channels_setTopic(self, *, channel: str, topic: str, **kwargs) -> SlackResponse:
"""Sets the topic for a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
topic (str): The new topic for the channel. e.g. 'My Topic'
"""
kwargs.update({"channel": channel, "topic": topic})
return self.api_call("channels.setTopic", json=kwargs)
def channels_unarchive(self, *, channel: str, **kwargs) -> SlackResponse:
"""Unarchives a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("channels.unarchive", json=kwargs)
def chat_delete(self, *, channel: str, ts: str, **kwargs) -> SlackResponse:
"""Deletes a message.
Args:
channel (str): Channel containing the message to be deleted. e.g. 'C1234567890'
ts (str): Timestamp of the message to be deleted. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("chat.delete", json=kwargs)
def chat_deleteScheduledMessage(
self, *, channel: str, scheduled_message_id: str, **kwargs
) -> SlackResponse:
"""Deletes a scheduled message.
Args:
channel (str): The channel the scheduled_message is posting to. e.g. 'C1234567890'
scheduled_message_id (str): scheduled_message_id returned from call to chat.scheduleMessage e.g. 'Q1234ABCD'
"""
kwargs.update(
{"channel": channel, "scheduled_message_id": scheduled_message_id}
)
return self.api_call("chat.deleteScheduledMessage", json=kwargs)
def chat_getPermalink(
self, *, channel: str, message_ts: str, **kwargs
) -> SlackResponse:
"""Retrieve a permalink URL for a specific extant message
Args:
channel (str): The channel id. e.g. 'C1234567890'
message_ts (str): The timestamp. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "message_ts": message_ts})
return self.api_call("chat.getPermalink", http_verb="GET", params=kwargs)
def chat_meMessage(self, *, channel: str, text: str, **kwargs) -> SlackResponse:
"""Share a me message into a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
text (str): The message you'd like to share. e.g. 'Hello world'
"""
kwargs.update({"channel": channel, "text": text})
return self.api_call("chat.meMessage", json=kwargs)
def chat_postEphemeral(self, *, channel: str, user: str, **kwargs) -> SlackResponse:
"""Sends an ephemeral message to a user in a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
user (str): The id of user who should see the message. e.g. 'U0BPQUNTA'
text (str): The message you'd like to share. e.g. 'Hello world'
text is not required when presenting blocks.
blocks (list): A dictionary list of blocks.
Blocks are required when not presenting text.
e.g. [{"type": "section", "text": {"type": "plain_text", "text": "Hello world"}}]
"""
kwargs.update({"channel": channel, "user": user})
_parse_web_class_objects(kwargs)
return self.api_call("chat.postEphemeral", json=kwargs)
def chat_postMessage(self, *, channel: str, **kwargs) -> SlackResponse:
"""Sends a message to a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
text (str): The message you'd like to share. e.g. 'Hello world'
text is not required when presenting blocks.
blocks (list): A dictionary list of blocks.
Blocks are required when not presenting text.
e.g. [{"type": "section", "text": {"type": "plain_text", "text": "Hello world"}}]
"""
kwargs.update({"channel": channel})
_parse_web_class_objects(kwargs)
return self.api_call("chat.postMessage", json=kwargs)
def chat_scheduleMessage(
self, *, channel: str, post_at: str, text: str, **kwargs
) -> SlackResponse:
"""Schedules a message.
Args:
channel (str): The channel the scheduled_message is posting to. e.g. 'C1234567890'
post_at (str): Unix EPOCH timestamp of time in future to send the message. e.g. '299876400'
text (str): The message you'd like to send. e.g. 'Hello world'
"""
kwargs.update({"channel": channel, "post_at": post_at, "text": text})
_parse_web_class_objects(kwargs)
return self.api_call("chat.scheduleMessage", json=kwargs)
def chat_unfurl(
self, *, channel: str, ts: str, unfurls: dict, **kwargs
) -> SlackResponse:
"""Provide custom unfurl behavior for user-posted URLs.
Args:
channel (str): The Channel ID of the message. e.g. 'C1234567890'
ts (str): Timestamp of the message to add unfurl behavior to. e.g. '1234567890.123456'
unfurls (dict): a dict of the specific URLs you're offering an unfurl for.
e.g. {"https://example.com/": {"text": "Every day is the test."}}
"""
kwargs.update({"channel": channel, "ts": ts, "unfurls": unfurls})
return self.api_call("chat.unfurl", json=kwargs)
def chat_update(self, *, channel: str, ts: str, **kwargs) -> SlackResponse:
"""Updates a message in a channel.
Args:
channel (str): The channel containing the message to be updated. e.g. 'C1234567890'
ts (str): Timestamp of the message to be updated. e.g. '1234567890.123456'
text (str): The message you'd like to share. e.g. 'Hello world'
text is not required when presenting blocks.
blocks (list): A dictionary list of blocks.
Blocks are required when not presenting text.
e.g. [{"type": "section", "text": {"type": "plain_text", "text": "Hello world"}}]
"""
kwargs.update({"channel": channel, "ts": ts})
_parse_web_class_objects(kwargs)
return self.api_call("chat.update", json=kwargs)
def chat_scheduledMessages_list(self, **kwargs) -> SlackResponse:
"""Lists all scheduled messages."""
return self.api_call("chat.scheduledMessages.list", json=kwargs)
def conversations_archive(self, *, channel: str, **kwargs) -> SlackResponse:
"""Archives a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.archive", json=kwargs)
def conversations_close(self, *, channel: str, **kwargs) -> SlackResponse:
"""Closes a direct message or multi-person direct message.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.close", json=kwargs)
def conversations_create(self, *, name: str, **kwargs) -> SlackResponse:
"""Initiates a public or private channel-based conversation
Args:
name (str): The name of the channel. e.g. 'mychannel'
"""
kwargs.update({"name": name})
return self.api_call("conversations.create", json=kwargs)
def conversations_history(self, *, channel: str, **kwargs) -> SlackResponse:
"""Fetches a conversation's history of messages and events.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.history", http_verb="GET", params=kwargs)
def conversations_info(self, *, channel: str, **kwargs) -> SlackResponse:
"""Retrieve information about a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.info", http_verb="GET", params=kwargs)
def conversations_invite(
self, *, channel: str, users: Union[str, List[str]], **kwargs
) -> SlackResponse:
"""Invites users to a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
users (str or list): An list of user id's to invite. e.g. ['U2345678901', 'U3456789012']
"""
kwargs.update({"channel": channel})
if isinstance(users, list):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("conversations.invite", json=kwargs)
def conversations_join(self, *, channel: str, **kwargs) -> SlackResponse:
"""Joins an existing conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.join", json=kwargs)
def conversations_kick(self, *, channel: str, user: str, **kwargs) -> SlackResponse:
"""Removes a user from a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
user (str): The id of the user to kick. e.g. 'U2345678901'
"""
kwargs.update({"channel": channel, "user": user})
return self.api_call("conversations.kick", json=kwargs)
def conversations_leave(self, *, channel: str, **kwargs) -> SlackResponse:
"""Leaves a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.leave", json=kwargs)
def conversations_list(self, **kwargs) -> SlackResponse:
"""Lists all channels in a Slack team."""
return self.api_call("conversations.list", http_verb="GET", params=kwargs)
def conversations_mark(self, *, channel: str, ts: str, **kwargs) -> SlackResponse:
"""Sets the read cursor in a channel.
Args:
channel (str): Channel or conversation to set the read cursor for e.g. 'C1234567890'
ts (str): Unique identifier of message to mark as most recently seen in the convo e.g. '1593473566.000200'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("conversations.mark", json=kwargs)
def conversations_members(self, *, channel: str, **kwargs) -> SlackResponse:
"""Retrieve members of a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.members", http_verb="GET", params=kwargs)
def conversations_open(self, **kwargs) -> SlackResponse:
"""Opens or resumes a direct message or multi-person direct message."""
return self.api_call("conversations.open", json=kwargs)
def conversations_rename(
self, *, channel: str, name: str, **kwargs
) -> SlackResponse:
"""Renames a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
name (str): The new channel name. e.g. 'newchannel'
"""
kwargs.update({"channel": channel, "name": name})
return self.api_call("conversations.rename", json=kwargs)
def conversations_replies(
self, *, channel: str, ts: str, **kwargs
) -> SlackResponse:
"""Retrieve a thread of messages posted to a conversation
Args:
channel (str): Conversation ID to fetch thread from. e.g. 'C1234567890'
ts (str): Unique identifier of a thread's parent message. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("conversations.replies", http_verb="GET", params=kwargs)
def conversations_setPurpose(
self, *, channel: str, purpose: str, **kwargs
) -> SlackResponse:
"""Sets the purpose for a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
purpose (str): The new purpose for the channel. e.g. 'My Purpose'
"""
kwargs.update({"channel": channel, "purpose": purpose})
return self.api_call("conversations.setPurpose", json=kwargs)
def conversations_setTopic(
self, *, channel: str, topic: str, **kwargs
) -> SlackResponse:
"""Sets the topic for a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
topic (str): The new topic for the channel. e.g. 'My Topic'
"""
kwargs.update({"channel": channel, "topic": topic})
return self.api_call("conversations.setTopic", json=kwargs)
def conversations_unarchive(self, *, channel: str, **kwargs) -> SlackResponse:
"""Reverses conversation archival.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.unarchive", json=kwargs)
def dialog_open(self, *, dialog: dict, trigger_id: str, **kwargs) -> SlackResponse:
"""Open a dialog with a user.
Args:
dialog (dict): A dictionary of dialog arguments.
{
"callback_id": "46eh782b0",
"title": "Request something",
"submit_label": "Request",
"state": "Max",
"elements": [
{
"type": "text",
"label": "Origin",
"name": "loc_origin"
},
{
"type": "text",
"label": "Destination",
"name": "loc_destination"
}
]
}
trigger_id (str): The trigger id of a recent message interaction.
e.g. '12345.98765.abcd2358fdea'
"""
kwargs.update({"dialog": dialog, "trigger_id": trigger_id})
return self.api_call("dialog.open", json=kwargs)
def dnd_endDnd(self, **kwargs) -> SlackResponse:
"""Ends the current user's Do Not Disturb session immediately."""
return self.api_call("dnd.endDnd", json=kwargs)
def dnd_endSnooze(self, **kwargs) -> SlackResponse:
"""Ends the current user's snooze mode immediately."""
return self.api_call("dnd.endSnooze", json=kwargs)
def dnd_info(self, **kwargs) -> SlackResponse:
"""Retrieves a user's current Do Not Disturb status."""
return self.api_call("dnd.info", http_verb="GET", params=kwargs)
def dnd_setSnooze(self, *, num_minutes: int, **kwargs) -> SlackResponse:
"""Turns on Do Not Disturb mode for the current user, or changes its duration.
Args:
num_minutes (int): The snooze duration. e.g. 60
"""
kwargs.update({"num_minutes": num_minutes})
return self.api_call("dnd.setSnooze", http_verb="GET", params=kwargs)
def dnd_teamInfo(self, users: Union[str, List[str]], **kwargs) -> SlackResponse:
"""Retrieves the Do Not Disturb status for users on a team.
Args:
users (str or list): User IDs to fetch information e.g. 'U123,U234' or ["U123", "U234"]
"""
if isinstance(users, list):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("dnd.teamInfo", http_verb="GET", params=kwargs)
def emoji_list(self, **kwargs) -> SlackResponse:
"""Lists custom emoji for a team."""
return self.api_call("emoji.list", http_verb="GET", params=kwargs)
def files_comments_delete(
self, *, file: str, id: str, **kwargs # skipcq: PYL-W0622
) -> SlackResponse:
"""Deletes an existing comment on a file.
Args:
file (str): The file id. e.g. 'F1234467890'
id (str): The file comment id. e.g. 'Fc1234567890'
"""
kwargs.update({"file": file, "id": id})
return self.api_call("files.comments.delete", json=kwargs)
def files_delete(self, *, file: str, **kwargs) -> SlackResponse:
"""Deletes a file.
Args:
file (str): The file id. e.g. 'F1234467890'
"""
kwargs.update({"file": file})
return self.api_call("files.delete", json=kwargs)
def files_info(self, *, file: str, **kwargs) -> SlackResponse:
"""Gets information about a team file.
Args:
file (str): The file id. e.g. 'F1234467890'
"""
kwargs.update({"file": file})
return self.api_call("files.info", http_verb="GET", params=kwargs)
def files_list(self, **kwargs) -> SlackResponse:
"""Lists & filters team files."""
return self.api_call("files.list", http_verb="GET", params=kwargs)
def files_remote_info(self, **kwargs) -> SlackResponse:
"""Retrieve information about a remote file added to Slack."""
return self.api_call("files.remote.info", http_verb="GET", params=kwargs)
def files_remote_list(self, **kwargs) -> SlackResponse:
"""Retrieve information about a remote file added to Slack."""
return self.api_call("files.remote.list", http_verb="GET", params=kwargs)
def files_remote_add(
self, *, external_id: str, external_url: str, title: str, **kwargs
) -> SlackResponse:
"""Adds a file from a remote service.
Args:
external_id (str): Creator defined GUID for the file. e.g. '123456'
external_url (str): URL of the remote file. e.g. 'http://example.com/my_cloud_service_file/abc123'
title (str): Title of the file being shared. e.g. 'Danger, High Voltage!'
"""
kwargs.update(
{"external_id": external_id, "external_url": external_url, "title": title}
)
files = None
# preview_image (file): Preview of the document via multipart/form-data.
if "preview_image" in kwargs:
files = {"preview_image": kwargs.pop("preview_image")}
return self.api_call(
# Intentionally using "POST" method over "GET" here
"files.remote.add",
http_verb="POST",
data=kwargs,
files=files,
)
def files_remote_update(self, **kwargs) -> SlackResponse:
"""Updates an existing remote file."""
return self.api_call("files.remote.update", http_verb="GET", params=kwargs)
def files_remote_remove(self, **kwargs) -> SlackResponse:
"""Remove a remote file."""
return self.api_call("files.remote.remove", http_verb="GET", params=kwargs)
def files_remote_share(
self, *, channels: Union[str, List[str]], **kwargs
) -> SlackResponse:
"""Share a remote file into a channel.
Args:
channels (str or list): Comma-separated list of channel IDs where the file will be shared.
e.g. ['C1234567890', 'C2345678901']
"""
if isinstance(channels, list):
kwargs.update({"channels": ",".join(channels)})
else:
kwargs.update({"channels": channels})
return self.api_call("files.remote.share", http_verb="GET", params=kwargs)
def files_revokePublicURL(self, *, file: str, **kwargs) -> SlackResponse:
"""Revokes public/external sharing access for a file
Args:
file (str): The file id. e.g. 'F1234467890'
"""
kwargs.update({"file": file})
return self.api_call("files.revokePublicURL", json=kwargs)
def files_sharedPublicURL(self, *, file: str, **kwargs) -> SlackResponse:
"""Enables a file for public/external sharing.
Args:
file (str): The file id. e.g. 'F1234467890'
"""
kwargs.update({"file": file})
return self.api_call("files.sharedPublicURL", json=kwargs)
def files_upload(
self, *, file: Union[str, IOBase] = None, content: str = None, **kwargs
) -> SlackResponse:
"""Uploads or creates a file.
Args:
file (str): Supply a file path.
when you'd like to upload a specific file. e.g. 'dramacat.gif'
content (str): Supply content when you'd like to create an
editable text file containing the specified text. e.g. 'launch plan'
Raises:
SlackRequestError: If niether or both the `file` and `content` args are specified.
"""
if file is None and content is None:
raise e.SlackRequestError("The file or content argument must be specified.")
if file is not None and content is not None:
raise e.SlackRequestError(
"You cannot specify both the file and the content argument."
)
if file:
if "filename" not in kwargs and isinstance(file, str):
# use the local filename if filename is missing
kwargs["filename"] = file.split(os.path.sep)[-1]
return self.api_call("files.upload", files={"file": file}, data=kwargs)
data = kwargs.copy()
data.update({"content": content})
return self.api_call("files.upload", data=data)
def groups_archive(self, *, channel: str, **kwargs) -> SlackResponse:
"""Archives a private channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.archive", json=kwargs)
def groups_create(self, *, name: str, **kwargs) -> SlackResponse:
"""Creates a private channel.
Args:
name (str): The name of the private group. e.g. 'mychannel'
"""
kwargs.update({"name": name})
return self.api_call("groups.create", json=kwargs)
def groups_createChild(self, *, channel: str, **kwargs) -> SlackResponse:
"""Clones and archives a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.createChild", http_verb="GET", params=kwargs)
def groups_history(self, *, channel: str, **kwargs) -> SlackResponse:
"""Fetches history of messages and events from a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.history", http_verb="GET", params=kwargs)
def groups_info(self, *, channel: str, **kwargs) -> SlackResponse:
"""Gets information about a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.info", http_verb="GET", params=kwargs)
def groups_invite(self, *, channel: str, user: str, **kwargs) -> SlackResponse:
"""Invites a user to a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
user (str): The user id. e.g. 'U1234567890'
"""
kwargs.update({"channel": channel, "user": user})
return self.api_call("groups.invite", json=kwargs)
def groups_kick(self, *, channel: str, user: str, **kwargs) -> SlackResponse:
"""Removes a user from a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
user (str): The user id. e.g. 'U1234567890'
"""
kwargs.update({"channel": channel, "user": user})
return self.api_call("groups.kick", json=kwargs)
def groups_leave(self, *, channel: str, **kwargs) -> SlackResponse:
"""Leaves a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.leave", json=kwargs)
def groups_list(self, **kwargs) -> SlackResponse:
"""Lists private channels that the calling user has access to."""
return self.api_call("groups.list", http_verb="GET", params=kwargs)
def groups_mark(self, *, channel: str, ts: str, **kwargs) -> SlackResponse:
"""Sets the read cursor in a private channel.
Args:
channel (str): Private channel to set reading cursor in. e.g. 'C1234567890'
ts (str): Timestamp of the most recently seen message. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("groups.mark", json=kwargs)
def groups_open(self, *, channel: str, **kwargs) -> SlackResponse:
"""Opens a private channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.open", json=kwargs)
def groups_rename(self, *, channel: str, name: str, **kwargs) -> SlackResponse:
"""Renames a private channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
name (str): The new channel name. e.g. 'newchannel'
"""
kwargs.update({"channel": channel, "name": name})
return self.api_call("groups.rename", json=kwargs)
def groups_replies(
self, *, channel: str, thread_ts: str, **kwargs
) -> SlackResponse:
"""Retrieve a thread of messages posted to a private channel
Args:
channel (str): The channel id. e.g. 'C1234567890'
thread_ts (str): The timestamp of an existing message with 0 or more replies.
e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "thread_ts": thread_ts})
return self.api_call("groups.replies", http_verb="GET", params=kwargs)
def groups_setPurpose(
self, *, channel: str, purpose: str, **kwargs
) -> SlackResponse:
"""Sets the purpose for a private channel.
Args:
channel (str): The channel id. e.g. 'G1234567890'
purpose (str): The new purpose for the channel. e.g. 'My Purpose'
"""
kwargs.update({"channel": channel, "purpose": purpose})
return self.api_call("groups.setPurpose", json=kwargs)
def groups_setTopic(self, *, channel: str, topic: str, **kwargs) -> SlackResponse:
"""Sets the topic for a private channel.
Args:
channel (str): The channel id. e.g. 'G1234567890'
topic (str): The new topic for the channel. e.g. 'My Topic'
"""
kwargs.update({"channel": channel, "topic": topic})
return self.api_call("groups.setTopic", json=kwargs)
def groups_unarchive(self, *, channel: str, **kwargs) -> SlackResponse:
"""Unarchives a private channel.
Args:
channel (str): The channel id. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.unarchive", json=kwargs)
def im_close(self, *, channel: str, **kwargs) -> SlackResponse:
"""Close a direct message channel.
Args:
channel (str): Direct message channel to close. e.g. 'D1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("im.close", json=kwargs)
def im_history(self, *, channel: str, **kwargs) -> SlackResponse:
"""Fetches history of messages and events from direct message channel.
Args:
channel (str): Direct message channel to fetch history from. e.g. 'D1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("im.history", http_verb="GET", params=kwargs)
def im_list(self, **kwargs) -> SlackResponse:
"""Lists direct message channels for the calling user."""
return self.api_call("im.list", http_verb="GET", params=kwargs)
def im_mark(self, *, channel: str, ts: str, **kwargs) -> SlackResponse:
"""Sets the read cursor in a direct message channel.
Args:
channel (str): Direct message channel to set reading cursor in. e.g. 'D1234567890'
ts (str): Timestamp of the most recently seen message. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("im.mark", json=kwargs)
def im_open(self, *, user: str, **kwargs) -> SlackResponse:
"""Opens a direct message channel.
Args:
user (str): The user id to open a DM with. e.g. 'W1234567890'
"""
kwargs.update({"user": user})
return self.api_call("im.open", json=kwargs)
def im_replies(self, *, channel: str, thread_ts: str, **kwargs) -> SlackResponse:
"""Retrieve a thread of messages posted to a direct message conversation
Args:
channel (str): Direct message channel to fetch thread from. e.g. 'C1234567890'
thread_ts (str): The timestamp of an existing message with 0 or more replies.
e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "thread_ts": thread_ts})
return self.api_call("im.replies", http_verb="GET", params=kwargs)
def migration_exchange(
self, *, users: Union[str, List[str]], **kwargs
) -> SlackResponse:
"""For Enterprise Grid workspaces, map local user IDs to global user IDs
Args:
users (str or list): A list of user ids, up to 400 per request.
e.g. ['W1234567890', 'U2345678901', 'U3456789012']
"""
if isinstance(users, list):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("migration.exchange", http_verb="GET", params=kwargs)
def mpim_close(self, *, channel: str, **kwargs) -> SlackResponse:
"""Closes a multiparty direct message channel.
Args:
channel (str): Multiparty Direct message channel to close. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("mpim.close", json=kwargs)
def mpim_history(self, *, channel: str, **kwargs) -> SlackResponse:
"""Fetches history of messages and events from a multiparty direct message.
Args:
channel (str): Multiparty direct message to fetch history for. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("mpim.history", http_verb="GET", params=kwargs)
def mpim_list(self, **kwargs) -> SlackResponse:
"""Lists multiparty direct message channels for the calling user."""
return self.api_call("mpim.list", http_verb="GET", params=kwargs)
def mpim_mark(self, *, channel: str, ts: str, **kwargs) -> SlackResponse:
"""Sets the read cursor in a multiparty direct message channel.
Args:
channel (str): Multiparty direct message channel to set reading cursor in.
e.g. 'G1234567890'
ts (str): Timestamp of the most recently seen message.
e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("mpim.mark", json=kwargs)
def mpim_open(self, *, users: Union[str, List[str]], **kwargs) -> SlackResponse:
"""This method opens a multiparty direct message.
Args:
users (str or list): A lists of user ids. The ordering of the users
is preserved whenever a MPIM group is returned.
e.g. ['W1234567890', 'U2345678901', 'U3456789012']
"""
if isinstance(users, list):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("mpim.open", json=kwargs)
def mpim_replies(self, *, channel: str, thread_ts: str, **kwargs) -> SlackResponse:
"""Retrieve a thread of messages posted to a direct message conversation from a
multiparty direct message.
Args:
channel (str): Multiparty direct message channel to fetch thread from.
e.g. 'G1234567890'
thread_ts (str): Unique identifier of a thread's parent message.
e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "thread_ts": thread_ts})
return self.api_call("mpim.replies", http_verb="GET", params=kwargs)
def oauth_v2_access(
self,
*,
client_id: str,
client_secret: str,
code: str,
redirect_uri: Optional[str] = None,
**kwargs
) -> SlackResponse:
"""Exchanges a temporary OAuth verifier code for an access token.
Args:
client_id (str): Issued when you created your application. e.g. '4b39e9-752c4'
client_secret (str): Issued when you created your application. e.g. '33fea0113f5b1'
code (str): The code param returned via the OAuth callback. e.g. 'ccdaa72ad'
redirect_uri (optional str): Must match the originally submitted URI
(if one was sent). e.g. 'https://example.com'
"""
if redirect_uri is not None:
kwargs.update({"redirect_uri": redirect_uri})
kwargs.update({"code": code})
return self.api_call(
"oauth.v2.access",
data=kwargs,
auth={"client_id": client_id, "client_secret": client_secret},
)
def oauth_access(
self,
*,
client_id: str,
client_secret: str,
code: str,
redirect_uri: Optional[str] = None,
**kwargs
) -> SlackResponse:
"""Exchanges a temporary OAuth verifier code for an access token.
Args:
client_id (str): Issued when you created your application. e.g. '4b39e9-752c4'
client_secret (str): Issued when you created your application. e.g. '33fea0113f5b1'
code (str): The code param returned via the OAuth callback. e.g. 'ccdaa72ad'
redirect_uri (optional str): Must match the originally submitted URI
(if one was sent). e.g. 'https://example.com'
"""
if redirect_uri is not None:
kwargs.update({"redirect_uri": redirect_uri})
kwargs.update({"code": code})
return self.api_call(
"oauth.access",
data=kwargs,
auth={"client_id": client_id, "client_secret": client_secret},
)
def pins_add(self, *, channel: str, **kwargs) -> SlackResponse:
"""Pins an item to a channel.
Args:
channel (str): Channel to pin the item in. e.g. 'C1234567890'
file (str): File id to pin. e.g. 'F1234567890'
file_comment (str): File comment to pin. e.g. 'Fc1234567890'
timestamp (str): Timestamp of message to pin. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel})
return self.api_call("pins.add", json=kwargs)
def pins_list(self, *, channel: str, **kwargs) -> SlackResponse:
"""Lists items pinned to a channel.
Args:
channel (str): Channel to get pinned items for. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("pins.list", http_verb="GET", params=kwargs)
def pins_remove(self, *, channel: str, **kwargs) -> SlackResponse:
"""Un-pins an item from a channel.
Args:
channel (str): Channel to pin the item in. e.g. 'C1234567890'
file (str): File id to pin. e.g. 'F1234567890'
file_comment (str): File comment to pin. e.g. 'Fc1234567890'
timestamp (str): Timestamp of message to pin. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel})
return self.api_call("pins.remove", json=kwargs)
def reactions_add(self, *, name: str, **kwargs) -> SlackResponse:
"""Adds a reaction to an item.
Args:
name (str): Reaction (emoji) name. e.g. 'thumbsup'
channel (str): Channel where the message to add reaction to was posted.
e.g. 'C1234567890'
timestamp (str): Timestamp of the message to add reaction to. e.g. '1234567890.123456'
"""
kwargs.update({"name": name})
return self.api_call("reactions.add", json=kwargs)
def reactions_get(self, **kwargs) -> SlackResponse:
"""Gets reactions for an item."""
return self.api_call("reactions.get", http_verb="GET", params=kwargs)
def reactions_list(self, **kwargs) -> SlackResponse:
"""Lists reactions made by a user."""
return self.api_call("reactions.list", http_verb="GET", params=kwargs)
def reactions_remove(self, *, name: str, **kwargs) -> SlackResponse:
"""Removes a reaction from an item.
Args:
name (str): Reaction (emoji) name. e.g. 'thumbsup'
"""
kwargs.update({"name": name})
return self.api_call("reactions.remove", json=kwargs)
def reminders_add(self, *, text: str, time: str, **kwargs) -> SlackResponse:
"""Creates a reminder.
Args:
text (str): The content of the reminder. e.g. 'eat a banana'
time (str): When this reminder should happen:
the Unix timestamp (up to five years from now e.g. '1602288000'),
the number of seconds until the reminder (if within 24 hours),
or a natural language description (Ex. 'in 15 minutes' or 'every Thursday')
"""
kwargs.update({"text": text, "time": time})
return self.api_call("reminders.add", json=kwargs)
def reminders_complete(self, *, reminder: str, **kwargs) -> SlackResponse:
"""Marks a reminder as complete.
Args:
reminder (str): The ID of the reminder to be marked as complete.
e.g. 'Rm12345678'
"""
kwargs.update({"reminder": reminder})
return self.api_call("reminders.complete", json=kwargs)
def reminders_delete(self, *, reminder: str, **kwargs) -> SlackResponse:
"""Deletes a reminder.
Args:
reminder (str): The ID of the reminder. e.g. 'Rm12345678'
"""
kwargs.update({"reminder": reminder})
return self.api_call("reminders.delete", json=kwargs)
def reminders_info(self, *, reminder: str, **kwargs) -> SlackResponse:
"""Gets information about a reminder.
Args:
reminder (str): The ID of the reminder. e.g. 'Rm12345678'
"""
kwargs.update({"reminder": reminder})
return self.api_call("reminders.info", http_verb="GET", params=kwargs)
def reminders_list(self, **kwargs) -> SlackResponse:
"""Lists all reminders created by or for a given user."""
return self.api_call("reminders.list", http_verb="GET", params=kwargs)
def rtm_connect(self, **kwargs) -> SlackResponse:
"""Starts a Real Time Messaging session."""
return self.api_call("rtm.connect", http_verb="GET", params=kwargs)
def rtm_start(self, **kwargs) -> SlackResponse:
"""Starts a Real Time Messaging session."""
return self.api_call("rtm.start", http_verb="GET", params=kwargs)
def search_all(self, *, query: str, **kwargs) -> SlackResponse:
"""Searches for messages and files matching a query.
Args:
query (str): Search query. May contains booleans, etc.
e.g. 'pickleface'
"""
kwargs.update({"query": query})
return self.api_call("search.all", http_verb="GET", params=kwargs)
def search_files(self, *, query: str, **kwargs) -> SlackResponse:
"""Searches for files matching a query.
Args:
query (str): Search query. May contains booleans, etc.
e.g. 'pickleface'
"""
kwargs.update({"query": query})
return self.api_call("search.files", http_verb="GET", params=kwargs)
def search_messages(self, *, query: str, **kwargs) -> SlackResponse:
"""Searches for messages matching a query.
Args:
query (str): Search query. May contains booleans, etc.
e.g. 'pickleface'
"""
kwargs.update({"query": query})
return self.api_call("search.messages", http_verb="GET", params=kwargs)
def stars_add(self, **kwargs) -> SlackResponse:
"""Adds a star to an item.
Args:
channel (str): Channel to add star to, or channel where the message to add
star to was posted (used with timestamp). e.g. 'C1234567890'
file (str): File to add star to. e.g. 'F1234567890'
file_comment (str): File comment to add star to. e.g. 'Fc1234567890'
timestamp (str): Timestamp of the message to add star to. e.g. '1234567890.123456'
"""
return self.api_call("stars.add", json=kwargs)
def stars_list(self, **kwargs) -> SlackResponse:
"""Lists stars for a user."""
return self.api_call("stars.list", http_verb="GET", params=kwargs)
def stars_remove(self, **kwargs) -> SlackResponse:
"""Removes a star from an item.
Args:
channel (str): Channel to remove star from, or channel where
the message to remove star from was posted (used with timestamp). e.g. 'C1234567890'
file (str): File to remove star from. e.g. 'F1234567890'
file_comment (str): File comment to remove star from. e.g. 'Fc1234567890'
timestamp (str): Timestamp of the message to remove star from. e.g. '1234567890.123456'
"""
return self.api_call("stars.remove", json=kwargs)
def team_accessLogs(self, **kwargs) -> SlackResponse:
"""Gets the access logs for the current team."""
return self.api_call("team.accessLogs", http_verb="GET", params=kwargs)
def team_billableInfo(self, **kwargs) -> SlackResponse:
"""Gets billable users information for the current team."""
return self.api_call("team.billableInfo", http_verb="GET", params=kwargs)
def team_info(self, **kwargs) -> SlackResponse:
"""Gets information about the current team."""
return self.api_call("team.info", http_verb="GET", params=kwargs)
def team_integrationLogs(self, **kwargs) -> SlackResponse:
"""Gets the integration logs for the current team."""
return self.api_call("team.integrationLogs", http_verb="GET", params=kwargs)
def team_profile_get(self, **kwargs) -> SlackResponse:
"""Retrieve a team's profile."""
return self.api_call("team.profile.get", http_verb="GET", params=kwargs)
def usergroups_create(self, *, name: str, **kwargs) -> SlackResponse:
"""Create a User Group
Args:
name (str): A name for the User Group. Must be unique among User Groups.
e.g. 'My Test Team'
"""
kwargs.update({"name": name})
return self.api_call("usergroups.create", json=kwargs)
def usergroups_disable(self, *, usergroup: str, **kwargs) -> SlackResponse:
"""Disable an existing User Group
Args:
usergroup (str): The encoded ID of the User Group to disable.
e.g. 'S0604QSJC'
"""
kwargs.update({"usergroup": usergroup})
return self.api_call("usergroups.disable", json=kwargs)
def usergroups_enable(self, *, usergroup: str, **kwargs) -> SlackResponse:
"""Enable a User Group
Args:
usergroup (str): The encoded ID of the User Group to enable.
e.g. 'S0604QSJC'
"""
kwargs.update({"usergroup": usergroup})
return self.api_call("usergroups.enable", json=kwargs)
def usergroups_list(self, **kwargs) -> SlackResponse:
"""List all User Groups for a team"""
return self.api_call("usergroups.list", http_verb="GET", params=kwargs)
def usergroups_update(self, *, usergroup: str, **kwargs) -> SlackResponse:
"""Update an existing User Group
Args:
usergroup (str): The encoded ID of the User Group to update.
e.g. 'S0604QSJC'
"""
kwargs.update({"usergroup": usergroup})
return self.api_call("usergroups.update", json=kwargs)
def usergroups_users_list(self, *, usergroup: str, **kwargs) -> SlackResponse:
"""List all users in a User Group
Args:
usergroup (str): The encoded ID of the User Group to update.
e.g. 'S0604QSJC'
"""
kwargs.update({"usergroup": usergroup})
return self.api_call("usergroups.users.list", http_verb="GET", params=kwargs)
def usergroups_users_update(
self, *, usergroup: str, users: Union[str, List[str]], **kwargs
) -> SlackResponse:
"""Update the list of users for a User Group
Args:
usergroup (str): The encoded ID of the User Group to update.
e.g. 'S0604QSJC'
users (str or list): A list user IDs that represent the entire list of
users for the User Group. e.g. ['U060R4BJ4', 'U060RNRCZ']
"""
kwargs.update({"usergroup": usergroup})
if isinstance(users, list):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("usergroups.users.update", json=kwargs)
def users_conversations(self, **kwargs) -> SlackResponse:
"""List conversations the calling user may access."""
return self.api_call("users.conversations", http_verb="GET", params=kwargs)
def users_deletePhoto(self, **kwargs) -> SlackResponse:
"""Delete the user profile photo"""
return self.api_call("users.deletePhoto", http_verb="GET", params=kwargs)
def users_getPresence(self, *, user: str, **kwargs) -> SlackResponse:
"""Gets user presence information.
Args:
user (str): User to get presence info on. Defaults to the authed user.
e.g. 'W1234567890'
"""
kwargs.update({"user": user})
return self.api_call("users.getPresence", http_verb="GET", params=kwargs)
def users_identity(self, **kwargs) -> SlackResponse:
"""Get a user's identity."""
return self.api_call("users.identity", http_verb="GET", params=kwargs)
def users_info(self, *, user: str, **kwargs) -> SlackResponse:
"""Gets information about a user.
Args:
user (str): User to get info on.
e.g. 'W1234567890'
"""
kwargs.update({"user": user})
return self.api_call("users.info", http_verb="GET", params=kwargs)
def users_list(self, **kwargs) -> SlackResponse:
"""Lists all users in a Slack team."""
return self.api_call("users.list", http_verb="GET", params=kwargs)
def users_lookupByEmail(self, *, email: str, **kwargs) -> SlackResponse:
"""Find a user with an email address.
Args:
email (str): An email address belonging to a user in the workspace.
e.g. 'spengler@ghostbusters.example.com'
"""
kwargs.update({"email": email})
return self.api_call("users.lookupByEmail", http_verb="GET", params=kwargs)
def users_setPhoto(self, *, image: Union[str, IOBase], **kwargs) -> SlackResponse:
"""Set the user profile photo
Args:
image (str): Supply the path of the image you'd like to upload.
e.g. 'myimage.png'
"""
return self.api_call("users.setPhoto", files={"image": image}, data=kwargs)
def users_setPresence(self, *, presence: str, **kwargs) -> SlackResponse:
"""Manually sets user presence.
Args:
presence (str): Either 'auto' or 'away'.
"""
kwargs.update({"presence": presence})
return self.api_call("users.setPresence", json=kwargs)
def users_profile_get(self, **kwargs) -> SlackResponse:
"""Retrieves a user's profile information."""
return self.api_call("users.profile.get", http_verb="GET", params=kwargs)
def users_profile_set(self, **kwargs) -> SlackResponse:
"""Set the profile information for a user."""
return self.api_call("users.profile.set", json=kwargs)
def views_open(
self, *, trigger_id: str, view: Union[dict, View], **kwargs
) -> SlackResponse:
"""Open a view for a user.
See https://api.slack.com/block-kit/surfaces/modals for details.
Args:
trigger_id (str): Exchange a trigger to post to the user.
e.g. '12345.98765.abcd2358fdea'
view (dict or View): The view payload.
"""
kwargs.update({"trigger_id": trigger_id})
if isinstance(view, View):
kwargs.update({"view": view.to_dict()})
else:
kwargs.update({"view": view})
return self.api_call("views.open", json=kwargs)
def views_push(
self, *, trigger_id: str, view: Union[dict, View], **kwargs
) -> SlackResponse:
"""Push a view onto the stack of a root view.
Push a new view onto the existing view stack by passing a view
payload and a valid trigger_id generated from an interaction
within the existing modal.
Read the modals documentation (https://api.slack.com/block-kit/surfaces/modals)
to learn more about the lifecycle and intricacies of views.
Args:
trigger_id (str): Exchange a trigger to post to the user.
e.g. '12345.98765.abcd2358fdea'
view (dict or View): The view payload.
"""
kwargs.update({"trigger_id": trigger_id, "view": view})
if isinstance(view, View):
kwargs.update({"view": view.to_dict()})
else:
kwargs.update({"view": view})
return self.api_call("views.push", json=kwargs)
def views_update(
self,
*,
view: Union[dict, View],
external_id: str = None,
view_id: str = None,
**kwargs
) -> SlackResponse:
"""Update an existing view.
Update a view by passing a new view definition along with the
view_id returned in views.open or the external_id.
See the modals documentation (https://api.slack.com/block-kit/surfaces/modals#updating_views)
to learn more about updating views and avoiding race conditions with the hash argument.
Args:
view (dict or View): The view payload.
external_id (str): A unique identifier of the view set by the developer.
e.g. 'bmarley_view2'
view_id (str): A unique identifier of the view to be updated.
e.g. 'VMM512F2U'
Raises:
SlackRequestError: Either view_id or external_id is required.
"""
if isinstance(view, View):
kwargs.update({"view": view.to_dict()})
else:
kwargs.update({"view": view})
if external_id:
kwargs.update({"external_id": external_id})
elif view_id:
kwargs.update({"view_id": view_id})
else:
raise e.SlackRequestError("Either view_id or external_id is required.")
return self.api_call("views.update", json=kwargs)
def views_publish(
self, *, user_id: str, view: Union[dict, View], **kwargs
) -> SlackResponse:
"""Publish a static view for a User.
Create or update the view that comprises an
app's Home tab (https://api.slack.com/surfaces/tabs)
for a specific user.
Args:
user_id (str): id of the user you want publish a view to.
e.g. 'U0BPQUNTA'
view (dict or View): The view payload.
"""
kwargs.update({"user_id": user_id})
if isinstance(view, View):
kwargs.update({"view": view.to_dict()})
else:
kwargs.update({"view": view})
return self.api_call("views.publish", json=kwargs)
def workflows_stepCompleted(
self, *, workflow_step_execute_id: str, outputs: dict = None, **kwargs
) -> SlackResponse:
"""Indicate a successful outcome of a workflow step's execution.
Args:
workflow_step_execute_id (str): A unique identifier of the workflow step to be updated.
e.g. 'add_task'
outputs (dict): A key-value object of outputs from your step.
e.g. { 'task_name': 'Task Name' }
"""
kwargs.update({"workflow_step_execute_id": workflow_step_execute_id})
if outputs:
kwargs.update({"outputs": outputs})
return self.api_call("workflows.stepCompleted", json=kwargs)
def workflows_stepFailed(
self, *, workflow_step_execute_id: str, error: dict, **kwargs
) -> SlackResponse:
"""Indicate an unsuccessful outcome of a workflow step's execution.
Args:
workflow_step_execute_id (str): A unique identifier of the workflow step to be updated.
e.g. 'add_task'
error (dict): A dict with a message property that contains a human readable error message
e.g. { message: 'Step failed to execute.' }
"""
kwargs.update(
{"workflow_step_execute_id": workflow_step_execute_id, "error": error}
)
return self.api_call("workflows.stepFailed", json=kwargs)
def workflows_updateStep(
self,
*,
workflow_step_edit_id: str,
inputs: dict = None,
outputs: list = None,
**kwargs
) -> SlackResponse:
"""Update the configuration for a workflow extension step.
Args:
workflow_step_edit_id (str): A unique identifier of the workflow step to be updated.
e.g. 'add_task'
inputs (dict): A key-value object of inputs required from a user during step configuration.
e.g. { 'title': { 'value': 'The Title' }, 'submitter': { 'value': 'The Submitter' } }
outputs (list): A list of output objects used during step execution.
e.g. [{ 'type': 'text', 'name': 'title', 'label': 'Title' }]
"""
kwargs.update({"workflow_step_edit_id": workflow_step_edit_id})
if inputs:
kwargs.update({"inputs": inputs})
if outputs:
kwargs.update({"outputs": outputs})
return self.api_call("workflows.updateStep", json=kwargs)
| {
"repo_name": "slackapi/python-slackclient",
"path": "slack_sdk/web/client.py",
"copies": "1",
"size": "92296",
"license": "mit",
"hash": -8624254514327431000,
"line_mean": 39.5322793149,
"line_max": 125,
"alpha_frac": 0.5924023751,
"autogenerated": false,
"ratio": 3.911506675143039,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5003909050243038,
"avg_score": null,
"num_lines": null
} |
"""A Python module for interacting with Slack's Web API."""
import os
from io import IOBase
from typing import Union, Sequence, Optional, Dict, Tuple
import slack_sdk.errors as e
from slack_sdk.models.views import View
from .base_client import BaseClient, SlackResponse
from .internal_utils import (
_parse_web_class_objects,
_update_call_participants,
_warn_if_text_is_missing,
)
class WebClient(BaseClient):
"""A WebClient allows apps to communicate with the Slack Platform's Web API.
https://api.slack.com/methods
The Slack Web API is an interface for querying information from
and enacting change in a Slack workspace.
This client handles constructing and sending HTTP requests to Slack
as well as parsing any responses received into a `SlackResponse`.
Attributes:
token (str): A string specifying an xoxp or xoxb token.
base_url (str): A string representing the Slack API base URL.
Default is 'https://www.slack.com/api/'
timeout (int): The maximum number of seconds the client will wait
to connect and receive a response from Slack.
Default is 30 seconds.
Methods:
api_call: Constructs a request and executes the API call to Slack.
Example of recommended usage:
```python
import os
from slack_sdk import WebClient
client = WebClient(token=os.environ['SLACK_API_TOKEN'])
response = client.chat_postMessage(
channel='#random',
text="Hello world!")
assert response["ok"]
assert response["message"]["text"] == "Hello world!"
```
Example manually creating an API request:
```python
import os
from slack_sdk import WebClient
client = WebClient(token=os.environ['SLACK_API_TOKEN'])
response = client.api_call(
api_method='chat.postMessage',
json={'channel': '#random','text': "Hello world!"}
)
assert response["ok"]
assert response["message"]["text"] == "Hello world!"
```
Note:
Any attributes or methods prefixed with _underscores are
intended to be "private" internal use only. They may be changed or
removed at anytime.
"""
def admin_analytics_getFile(
self,
*,
type: str,
date: Optional[str] = None,
metadata_only: Optional[bool] = None,
**kwargs
) -> SlackResponse:
"""Retrieve analytics data for a given date, presented as a compressed JSON file
Args:
date (str): Date to retrieve the analytics data for,
expressed as YYYY-MM-DD in UTC.
type (str): The type of analytics to retrieve.
The options are currently limited to member.
"""
kwargs.update({"type": type})
if date is not None:
kwargs.update({"date": date})
if metadata_only is not None:
kwargs.update({"metadata_only": metadata_only})
return self.api_call("admin.analytics.getFile", params=kwargs)
def admin_apps_approve(
self, *, app_id: str = None, request_id: str = None, **kwargs
) -> SlackResponse:
"""Approve an app for installation on a workspace.
Either app_id or request_id is required.
These IDs can be obtained either directly via the app_requested event,
or by the admin.apps.requests.list method.
Args:
app_id (str): The id of the app to approve. e.g. 'A12345'
request_id (str): The id of the request to approve. e.g. 'Ar12345'
Raises:
SlackRequestError: If neither or both the `app_id` and `request_id` args are specified.
"""
if app_id:
kwargs.update({"app_id": app_id})
elif request_id:
kwargs.update({"request_id": request_id})
else:
raise e.SlackRequestError(
"The app_id or request_id argument must be specified."
)
return self.api_call("admin.apps.approve", json=kwargs)
def admin_apps_approved_list(self, **kwargs) -> SlackResponse:
"""List approved apps for an org or workspace."""
return self.api_call("admin.apps.approved.list", http_verb="GET", params=kwargs)
def admin_apps_clearResolution(self, *, app_id: str, **kwargs) -> SlackResponse:
"""Clear an app resolution
Args:
app_id (str): The id of the app whose resolution you want to clear/undo.
"""
kwargs.update({"app_id": app_id})
return self.api_call(
"admin.apps.clearResolution", http_verb="POST", params=kwargs
)
def admin_apps_requests_list(self, **kwargs) -> SlackResponse:
"""List app requests for a team/workspace."""
return self.api_call("admin.apps.requests.list", http_verb="GET", params=kwargs)
def admin_apps_restrict(self, **kwargs) -> SlackResponse:
"""Restrict an app for installation on a workspace."""
return self.api_call("admin.apps.restrict", json=kwargs)
def admin_apps_restricted_list(self, **kwargs) -> SlackResponse:
"""List restricted apps for an org or workspace."""
return self.api_call(
"admin.apps.restricted.list", http_verb="GET", params=kwargs
)
def admin_barriers_create(
self,
*,
barriered_from_usergroup_ids: Union[str, Sequence[str]],
primary_usergroup_id: str,
restricted_subjects: Union[str, Sequence[str]],
**kwargs
) -> SlackResponse:
"""Create an Information Barrier"""
kwargs.update({"primary_usergroup_id": primary_usergroup_id})
if isinstance(barriered_from_usergroup_ids, (list, Tuple)):
kwargs.update(
{"barriered_from_usergroup_ids": ",".join(barriered_from_usergroup_ids)}
)
else:
kwargs.update(
{"barriered_from_usergroup_ids": barriered_from_usergroup_ids}
)
if isinstance(restricted_subjects, (list, Tuple)):
kwargs.update({"restricted_subjects": ",".join(restricted_subjects)})
else:
kwargs.update({"restricted_subjects": restricted_subjects})
return self.api_call("admin.barriers.create", http_verb="POST", params=kwargs)
def admin_barriers_delete(self, *, barrier_id: str, **kwargs) -> SlackResponse:
"""Delete an existing Information Barrier"""
kwargs.update({"barrier_id": barrier_id})
return self.api_call("admin.barriers.delete", http_verb="POST", params=kwargs)
def admin_barriers_update(
self,
*,
barrier_id: str,
barriered_from_usergroup_ids: Union[str, Sequence[str]],
primary_usergroup_id: str,
restricted_subjects: Union[str, Sequence[str]],
**kwargs
) -> SlackResponse:
"""Update an existing Information Barrier"""
kwargs.update(
{"barrier_id": barrier_id, "primary_usergroup_id": primary_usergroup_id}
)
if isinstance(barriered_from_usergroup_ids, (list, Tuple)):
kwargs.update(
{"barriered_from_usergroup_ids": ",".join(barriered_from_usergroup_ids)}
)
else:
kwargs.update(
{"barriered_from_usergroup_ids": barriered_from_usergroup_ids}
)
if isinstance(restricted_subjects, (list, Tuple)):
kwargs.update({"restricted_subjects": ",".join(restricted_subjects)})
else:
kwargs.update({"restricted_subjects": restricted_subjects})
return self.api_call("admin.barriers.update", http_verb="POST", params=kwargs)
def admin_barriers_list(self, **kwargs) -> SlackResponse:
"""Get all Information Barriers for your organization"""
return self.api_call("admin.barriers.list", http_verb="GET", params=kwargs)
def admin_conversations_create(
self, *, is_private: bool, name: str, **kwargs
) -> SlackResponse:
"""Create a public or private channel-based conversation.
Args:
is_private (bool): When true, creates a private channel instead of a public channel
name (str): Name of the public or private channel to create.
org_wide (bool): When true, the channel will be available org-wide.
Note: if the channel is not org_wide=true, you must specify a team_id for this channel
team_id (str): The workspace to create the channel in.
Note: this argument is required unless you set org_wide=true.
"""
kwargs.update({"is_private": is_private, "name": name})
return self.api_call("admin.conversations.create", json=kwargs)
def admin_conversations_delete(self, *, channel_id: str, **kwargs) -> SlackResponse:
"""Delete a public or private channel.
Args:
channel_id (str): The channel to delete.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.delete", json=kwargs)
def admin_conversations_invite(
self, *, channel_id: str, user_ids: Union[str, Sequence[str]], **kwargs
) -> SlackResponse:
"""Invite a user to a public or private channel.
Args:
channel_id (str): The channel that the users will be invited to.
user_ids (str or list): The users to invite.
"""
kwargs.update({"channel_id": channel_id})
if isinstance(user_ids, (list, Tuple)):
kwargs.update({"user_ids": ",".join(user_ids)})
else:
kwargs.update({"user_ids": user_ids})
# NOTE: the endpoint is unable to handle Content-Type: application/json as of Sep 3, 2020.
return self.api_call("admin.conversations.invite", params=kwargs)
def admin_conversations_archive(
self, *, channel_id: str, **kwargs
) -> SlackResponse:
"""Archive a public or private channel.
Args:
channel_id (str): The channel to archive.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.archive", json=kwargs)
def admin_conversations_unarchive(
self, *, channel_id: str, **kwargs
) -> SlackResponse:
"""Unarchive a public or private channel.
Args:
channel_id (str): The channel to unarchive.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.unarchive", json=kwargs)
def admin_conversations_rename(
self, *, channel_id: str, name: str, **kwargs
) -> SlackResponse:
"""Rename a public or private channel.
Args:
channel_id (str): The channel to rename.
name (str): The name to rename the channel to.
"""
kwargs.update({"channel_id": channel_id, "name": name})
return self.api_call("admin.conversations.rename", json=kwargs)
def admin_conversations_search(self, **kwargs) -> SlackResponse:
"""Search for public or private channels in an Enterprise organization."""
return self.api_call("admin.conversations.search", json=kwargs)
def admin_conversations_convertToPrivate(
self, *, channel_id: str, **kwargs
) -> SlackResponse:
"""Convert a public channel to a private channel.
Args:
channel_id (str): The channel to convert to private.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.convertToPrivate", json=kwargs)
def admin_conversations_setConversationPrefs(
self, *, channel_id: str, prefs: Union[str, dict], **kwargs
) -> SlackResponse:
"""Set the posting permissions for a public or private channel.
Args:
channel_id (str): The channel to set the prefs for
prefs (str or dict): The prefs for this channel in a stringified JSON format.
"""
kwargs.update({"channel_id": channel_id, "prefs": prefs})
return self.api_call("admin.conversations.setConversationPrefs", json=kwargs)
def admin_conversations_getConversationPrefs(
self, *, channel_id: str, **kwargs
) -> SlackResponse:
"""Get conversation preferences for a public or private channel.
Args:
channel_id (str): The channel to get the preferences for.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.getConversationPrefs", json=kwargs)
def admin_conversations_disconnectShared(
self, *, channel_id: str, **kwargs
) -> SlackResponse:
"""Disconnect a connected channel from one or more workspaces.
Args:
channel_id (str): The channel to be disconnected from some workspaces.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.disconnectShared", json=kwargs)
def admin_conversations_ekm_listOriginalConnectedChannelInfo(
self, **kwargs
) -> SlackResponse:
"""List all disconnected channels—i.e.,
channels that were once connected to other workspaces and then disconnected—and
the corresponding original channel IDs for key revocation with EKM.
"""
return self.api_call(
"admin.conversations.ekm.listOriginalConnectedChannelInfo", params=kwargs
)
def admin_conversations_restrictAccess_addGroup(
self, *, channel_id: str, group_id: str, **kwargs
) -> SlackResponse:
"""Add an allowlist of IDP groups for accessing a channel.
Args:
channel_id (str): The channel to link this group to. e.g. 'C1234567890'
group_id (str): The IDP Group ID to be an allowlist for the private channel. 'S0604QSJC'
team_id (str): The workspace where the channel exists.
This argument is required for channels only tied to one workspace,
and optional for channels that are shared across an organization.
e.g 'T1234'
"""
kwargs.update({"channel_id": channel_id, "group_id": group_id})
return self.api_call(
"admin.conversations.restrictAccess.addGroup",
http_verb="GET",
params=kwargs,
)
def admin_conversations_restrictAccess_listGroups(
self, *, channel_id: str, **kwargs
) -> SlackResponse:
"""List all IDP Groups linked to a channel.
Args:
channel_id (str): The channel to link this group to. e.g. 'C1234567890'
team_id (str): The workspace where the channel exists.
This argument is required for channels only tied to one workspace,
and optional for channels that are shared across an organization.
e.g 'T1234'
"""
kwargs.update({"channel_id": channel_id})
return self.api_call(
"admin.conversations.restrictAccess.listGroups",
http_verb="GET",
params=kwargs,
)
def admin_conversations_restrictAccess_removeGroup(
self, *, channel_id: str, group_id: str, team_id: str, **kwargs
) -> SlackResponse:
"""Remove a linked IDP group linked from a private channel.
Args:
channel_id (str): The channel to link this group to. e.g. 'C1234567890'
group_id (str): The IDP Group ID to be an allowlist for the private channel. 'S0604QSJC'
team_id (str): The workspace where the channel exists.
This argument is required for channels only tied to one workspace,
and optional for channels that are shared across an organization.
e.g 'T1234'
"""
kwargs.update(
{"channel_id": channel_id, "group_id": group_id, "team_id": team_id}
)
return self.api_call(
"admin.conversations.restrictAccess.removeGroup",
http_verb="GET",
params=kwargs,
)
def admin_conversations_setTeams(
self, *, channel_id: str, **kwargs
) -> SlackResponse:
"""Set the workspaces in an Enterprise grid org that connect to a channel.
Args:
channel_id (str): The encoded channel_id to add or remove to workspaces.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.setTeams", json=kwargs)
def admin_conversations_getTeams(
self, *, channel_id: str, **kwargs
) -> SlackResponse:
"""Set the workspaces in an Enterprise grid org that connect to a channel.
Args:
channel_id (str): The channel to determine connected workspaces within the organization for.
"""
kwargs.update({"channel_id": channel_id})
return self.api_call("admin.conversations.getTeams", json=kwargs)
def admin_emoji_add(self, **kwargs) -> SlackResponse:
"""Add an emoji."""
return self.api_call("admin.emoji.add", http_verb="GET", params=kwargs)
def admin_emoji_addAlias(self, **kwargs) -> SlackResponse:
"""Add an emoji alias."""
return self.api_call("admin.emoji.addAlias", http_verb="GET", params=kwargs)
def admin_emoji_list(self, **kwargs) -> SlackResponse:
"""List emoji for an Enterprise Grid organization."""
return self.api_call("admin.emoji.list", http_verb="GET", params=kwargs)
def admin_emoji_remove(self, **kwargs) -> SlackResponse:
"""Remove an emoji across an Enterprise Grid organization."""
return self.api_call("admin.emoji.remove", http_verb="GET", params=kwargs)
def admin_emoji_rename(self, **kwargs) -> SlackResponse:
"""Rename an emoji."""
return self.api_call("admin.emoji.rename", http_verb="GET", params=kwargs)
def admin_users_session_reset(self, *, user_id: str, **kwargs) -> SlackResponse:
"""Wipes all valid sessions on all devices for a given user.
Args:
user_id (str): The ID of the user to wipe sessions for. e.g. 'W12345678'
"""
kwargs.update({"user_id": user_id})
return self.api_call("admin.users.session.reset", json=kwargs)
def admin_users_session_invalidate(
self, *, session_id: str, team_id: str, **kwargs
) -> SlackResponse:
"""Invalidate a single session for a user by session_id.
Args:
session_id (str): The ID of a session
team_id (str): ID of the team that the session belongs to
"""
kwargs.update({"session_id": session_id, "team_id": team_id})
return self.api_call("admin.users.session.invalidate", params=kwargs)
def admin_users_session_list(self, **kwargs) -> SlackResponse:
"""Lists all active user sessions for an organization"""
return self.api_call("admin.users.session.list", params=kwargs)
def admin_teams_settings_setDefaultChannels(
self, *, team_id: str, channel_ids: Union[str, Sequence[str]], **kwargs
) -> SlackResponse:
"""Set the default channels of a workspace.
Args:
team_id (str): ID of the team.
channel_ids (str or list): A list of channel_ids.
At least one channel is required. e.g. ['C1A2B3C4D', 'C26Z25Y24']
"""
kwargs.update({"team_id": team_id})
if isinstance(channel_ids, (list, Tuple)):
kwargs.update({"channel_ids": ",".join(channel_ids)})
else:
kwargs.update({"channel_ids": channel_ids})
return self.api_call(
"admin.teams.settings.setDefaultChannels", http_verb="GET", params=kwargs
)
def admin_users_session_getSettings(
self, *, user_ids: Union[str, Sequence[str]], **kwargs
) -> SlackResponse:
"""Get user-specific session settings—the session duration
and what happens when the client closes—given a list of users.
Args:
user_ids (str or list): The IDs of users you'd like to fetch session settings for.
Note: if a user does not have any active sessions, they will not be returned in the response.
"""
if isinstance(user_ids, (list, Tuple)):
kwargs.update({"user_ids": ",".join(user_ids)})
else:
kwargs.update({"user_ids": user_ids})
return self.api_call("admin.users.session.getSettings", json=kwargs)
def admin_users_session_setSettings(
self, *, user_ids: Union[str, Sequence[str]], **kwargs
) -> SlackResponse:
"""Configure the user-level session settings—the session duration
and what happens when the client closes—for one or more users.
Args:
user_ids (str or list): The list of user IDs to apply the session settings for.
"""
if isinstance(user_ids, (list, Tuple)):
kwargs.update({"user_ids": ",".join(user_ids)})
else:
kwargs.update({"user_ids": user_ids})
return self.api_call("admin.users.session.setSettings", json=kwargs)
def admin_users_session_clearSettings(
self, *, user_ids: Union[str, Sequence[str]], **kwargs
) -> SlackResponse:
"""Clear user-specific session settings—the session duration
and what happens when the client closes—for a list of users.
Args:
user_ids (str or list): The list of user IDs to apply the session settings for.
"""
if isinstance(user_ids, (list, Tuple)):
kwargs.update({"user_ids": ",".join(user_ids)})
else:
kwargs.update({"user_ids": user_ids})
return self.api_call("admin.users.session.clearSettings", json=kwargs)
def admin_inviteRequests_approve(
self, *, invite_request_id: str, **kwargs
) -> SlackResponse:
"""Approve a workspace invite request.
team_id is required if your Enterprise Grid org contains more than one workspace.
Args:
invite_request_id (str): ID of the request to invite. e.g. 'Ir1234'
"""
kwargs.update({"invite_request_id": invite_request_id})
return self.api_call("admin.inviteRequests.approve", json=kwargs)
def admin_inviteRequests_approved_list(self, **kwargs) -> SlackResponse:
"""List all approved workspace invite requests."""
return self.api_call("admin.inviteRequests.approved.list", json=kwargs)
def admin_inviteRequests_denied_list(self, **kwargs) -> SlackResponse:
"""List all denied workspace invite requests."""
return self.api_call("admin.inviteRequests.denied.list", json=kwargs)
def admin_inviteRequests_deny(
self, *, invite_request_id: str, **kwargs
) -> SlackResponse:
"""Deny a workspace invite request.
Args:
invite_request_id (str): ID of the request to invite. e.g. 'Ir1234'
"""
kwargs.update({"invite_request_id": invite_request_id})
return self.api_call("admin.inviteRequests.deny", json=kwargs)
def admin_inviteRequests_list(self, **kwargs) -> SlackResponse:
"""List all pending workspace invite requests."""
return self.api_call("admin.inviteRequests.list", json=kwargs)
def admin_teams_admins_list(self, *, team_id: str, **kwargs) -> SlackResponse:
"""List all of the admins on a given workspace.
Args:
team_id (str): ID of the team.
"""
kwargs.update({"team_id": team_id})
return self.api_call("admin.teams.admins.list", http_verb="GET", params=kwargs)
def admin_teams_create(
self, *, team_domain: str, team_name: str, **kwargs
) -> SlackResponse:
"""Create an Enterprise team.
Args:
team_domain (str): Team domain. e.g. 'slacksoftballteam'
team_name (str): Team name. e.g. 'Slack Softball Team'
"""
kwargs.update({"team_domain": team_domain, "team_name": team_name})
return self.api_call("admin.teams.create", json=kwargs)
def admin_teams_list(self, **kwargs) -> SlackResponse:
"""List all teams on an Enterprise organization."""
return self.api_call("admin.teams.list", json=kwargs)
def admin_teams_owners_list(self, *, team_id: str, **kwargs) -> SlackResponse:
"""List all of the admins on a given workspace.
Args:
team_id (str): ID of the team.
"""
kwargs.update({"team_id": team_id})
return self.api_call("admin.teams.owners.list", http_verb="GET", params=kwargs)
def admin_teams_settings_info(self, team_id: str, **kwargs) -> SlackResponse:
"""Fetch information about settings in a workspace
Args:
team_id (str): ID of the team.
"""
kwargs.update({"team_id": team_id})
return self.api_call("admin.teams.settings.info", json=kwargs)
def admin_teams_settings_setDescription(
self, *, team_id: str, description: str, **kwargs
) -> SlackResponse:
"""Set the description of a given workspace.
Args:
team_id (str): ID of the team.
description (str): Description of the team.
"""
kwargs.update({"team_id": team_id, "description": description})
return self.api_call("admin.teams.settings.setDescription", json=kwargs)
def admin_teams_settings_setDiscoverability(
self, *, team_id: str, discoverability: str, **kwargs
) -> SlackResponse:
"""Sets the icon of a workspace.
Args:
team_id (str): ID of the team.
discoverability (str): This workspace's discovery setting.
It must be set to one of open, invite_only, closed, or unlisted.
"""
kwargs.update({"team_id": team_id, "discoverability": discoverability})
return self.api_call("admin.teams.settings.setDiscoverability", json=kwargs)
def admin_teams_settings_setIcon(
self, *, team_id: str, image_url: str, **kwargs
) -> SlackResponse:
"""Sets the icon of a workspace.
Args:
team_id (str): ID of the team.
image_url (str): Url of the icon.
"""
kwargs.update({"team_id": team_id, "image_url": image_url})
return self.api_call(
"admin.teams.settings.setIcon", http_verb="GET", params=kwargs
)
def admin_teams_settings_setName(
self, *, team_id: str, name: str, **kwargs
) -> SlackResponse:
"""Sets the icon of a workspace.
Args:
team_id (str): ID of the team.
name (str): Name of the team.
"""
kwargs.update({"team_id": team_id, "name": name})
return self.api_call("admin.teams.settings.setName", json=kwargs)
def admin_usergroups_addChannels(
self,
*,
team_id: str,
usergroup_id: str,
channel_ids: Union[str, Sequence[str]],
**kwargs
) -> SlackResponse:
"""Add one or more default channels to an IDP group.
Args:
team_id (str): The workspace to add default channels in. e.g. 'T1234'
usergroup_id (str): ID of the IDP group to add default channels for. e.g. 'S1234'
channel_ids (str or list): Comma separated string of channel IDs. e.g. 'C123,C234' or ['C123', 'C234']
"""
kwargs.update({"team_id": team_id, "usergroup_id": usergroup_id})
if isinstance(channel_ids, (list, Tuple)):
kwargs.update({"channel_ids": ",".join(channel_ids)})
else:
kwargs.update({"channel_ids": channel_ids})
return self.api_call("admin.usergroups.addChannels", json=kwargs)
def admin_usergroups_addTeams(
self, *, usergroup_id: str, team_ids: Union[str, Sequence[str]], **kwargs
) -> SlackResponse:
"""Associate one or more default workspaces with an organization-wide IDP group.
Args:
usergroup_id (str): ID of the IDP group. e.g. 'S1234'
team_ids (str or list): A comma separated list of encoded team (workspace) IDs.
Each workspace MUST belong to the organization associated with the token.
e.g. 'T12345678,T98765432' or ['T12345678', 'T98765432']
"""
kwargs.update({"usergroup_id": usergroup_id})
if isinstance(team_ids, (list, Tuple)):
kwargs.update({"team_ids": ",".join(team_ids)})
else:
kwargs.update({"team_ids": team_ids})
return self.api_call("admin.usergroups.addTeams", json=kwargs)
def admin_usergroups_listChannels(
self, *, usergroup_id: str, **kwargs
) -> SlackResponse:
"""Add one or more default channels to an IDP group.
Args:
usergroup_id (str): ID of the IDP group to list default channels for. e.g. 'S1234'
"""
kwargs.update({"usergroup_id": usergroup_id})
return self.api_call("admin.usergroups.listChannels", json=kwargs)
def admin_usergroups_removeChannels(
self, *, usergroup_id: str, channel_ids: Union[str, Sequence[str]], **kwargs
) -> SlackResponse:
"""Add one or more default channels to an IDP group.
Args:
usergroup_id (str): ID of the IDP group. e.g. 'S1234'
channel_ids (str or list): Comma separated string of channel IDs. e.g. 'C123,C234' or ['C123', 'C234']
"""
kwargs.update({"usergroup_id": usergroup_id})
if isinstance(channel_ids, (list, Tuple)):
kwargs.update({"channel_ids": ",".join(channel_ids)})
else:
kwargs.update({"channel_ids": channel_ids})
return self.api_call("admin.usergroups.removeChannels", json=kwargs)
def admin_users_assign(
self, *, team_id: str, user_id: str, **kwargs
) -> SlackResponse:
"""Add an Enterprise user to a workspace.
Args:
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): ID of the user to add to the workspace.
"""
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.assign", json=kwargs)
def admin_users_invite(
self,
*,
team_id: str,
email: str,
channel_ids: Union[str, Sequence[str]],
**kwargs
) -> SlackResponse:
"""Invite a user to a workspace.
Args:
team_id (str): ID of the team. e.g. 'T1234'
email (str): The email address of the person to invite. e.g. 'joe@email.com'
channel_ids (str or list): A list of channel_ids for this user to join.
At least one channel is required. e.g. ['C1A2B3C4D', 'C26Z25Y24']
"""
kwargs.update({"team_id": team_id, "email": email})
if isinstance(channel_ids, (list, Tuple)):
kwargs.update({"channel_ids": ",".join(channel_ids)})
else:
kwargs.update({"channel_ids": channel_ids})
return self.api_call("admin.users.invite", json=kwargs)
def admin_users_list(self, *, team_id: str, **kwargs) -> SlackResponse:
"""List users on a workspace
Args:
team_id (str): ID of the team. e.g. 'T1234'
"""
kwargs.update({"team_id": team_id})
return self.api_call("admin.users.list", json=kwargs)
def admin_users_remove(
self, *, team_id: str, user_id: str, **kwargs
) -> SlackResponse:
"""Remove a user from a workspace.
Args:
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): The ID of the user to remove. e.g. 'W12345678'
"""
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.remove", json=kwargs)
def admin_users_setAdmin(
self, *, team_id: str, user_id: str, **kwargs
) -> SlackResponse:
"""Set an existing guest, regular user, or owner to be an admin user.
Args:
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): The ID of the user to remove. e.g. 'W12345678'
"""
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.setAdmin", json=kwargs)
def admin_users_setExpiration(
self, *, expiration_ts: int, team_id: str, user_id: str, **kwargs
) -> SlackResponse:
"""Set an expiration for a guest user.
Args:
expiration_ts (int): Timestamp when guest account should be disabled. e.g. '1234567890'
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): The ID of the user to set an expiration for. e.g. 'W12345678'
"""
kwargs.update(
{"expiration_ts": expiration_ts, "team_id": team_id, "user_id": user_id}
)
return self.api_call("admin.users.setExpiration", json=kwargs)
def admin_users_setOwner(
self, *, team_id: str, user_id: str, **kwargs
) -> SlackResponse:
"""Set an existing guest, regular user, or admin user to be a workspace owner.
Args:
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): The ID of the user to remove. e.g. 'W12345678'
"""
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.setOwner", json=kwargs)
def admin_users_setRegular(
self, *, team_id: str, user_id: str, **kwargs
) -> SlackResponse:
"""Set an existing guest user, admin user, or owner to be a regular user.
Args:
team_id (str): ID of the team. e.g. 'T1234'
user_id (str): The ID of the user to remove. e.g. 'W12345678'
"""
kwargs.update({"team_id": team_id, "user_id": user_id})
return self.api_call("admin.users.setRegular", json=kwargs)
def api_test(self, **kwargs) -> SlackResponse:
"""Checks API calling code."""
return self.api_call("api.test", json=kwargs)
def apps_connections_open(self, *, app_token: str, **kwargs) -> SlackResponse:
"""Get a new WSS URL for Socket Mode"""
kwargs.update({"token": app_token})
return self.api_call("apps.connections.open", http_verb="POST", params=kwargs)
def apps_event_authorizations_list(
self, event_context: str, **kwargs
) -> SlackResponse:
"""Get a list of authorizations for the given event context.
Each authorization represents an app installation that the event is visible to.
Args:
event_context (str): You'll receive an event_context identifying an event in each event payload sent to your app.
"""
kwargs.update({"event_context": event_context})
return self.api_call("apps.event.authorizations.list", params=kwargs)
def apps_uninstall(
self, client_id: str, client_secret: str, **kwargs
) -> SlackResponse:
"""Uninstalls your app from a workspace.
Args:
client_id (str): Issued when you created your application. e.g. '56579136444.26251006572'
client_secret (str): Issued when you created your application. e.g. 'f25b5ceaf8a3c2a2c4f52bb4f0b0499e'
"""
kwargs.update({"client_id": client_id, "client_secret": client_secret})
return self.api_call("apps.uninstall", params=kwargs)
def auth_revoke(self, **kwargs) -> SlackResponse:
"""Revokes a token."""
return self.api_call("auth.revoke", http_verb="GET", params=kwargs)
def auth_test(self, **kwargs) -> SlackResponse:
"""Checks authentication & identity."""
return self.api_call("auth.test", json=kwargs)
def bots_info(self, **kwargs) -> SlackResponse:
"""Gets information about a bot user."""
return self.api_call("bots.info", http_verb="GET", params=kwargs)
def calls_add(
self, *, external_unique_id: str, join_url: str, **kwargs
) -> SlackResponse:
"""Registers a new Call.
Args:
external_unique_id (str): An ID supplied by the 3rd-party Call provider.
It must be unique across all Calls from that service.
e.g. '025169F6-E37A-4E62-BB54-7F93A0FC4C1F'
join_url (str): The URL required for a client to join the Call.
e.g. 'https://example.com/calls/1234567890'
"""
kwargs.update({"external_unique_id": external_unique_id, "join_url": join_url})
_update_call_participants( # skipcq: PTC-W0039
kwargs, kwargs.get("users", None) # skipcq: PTC-W0039
) # skipcq: PTC-W0039
return self.api_call("calls.add", http_verb="POST", params=kwargs)
def calls_end(self, *, id: str, **kwargs) -> SlackResponse: # skipcq: PYL-W0622
"""Ends a Call.
Args:
id (str): id returned when registering the call using the calls.add method.
"""
kwargs.update({"id": id})
return self.api_call("calls.end", http_verb="POST", params=kwargs)
def calls_info(self, *, id: str, **kwargs) -> SlackResponse: # skipcq: PYL-W0622
"""Returns information about a Call.
Args:
id (str): id returned when registering the call using the calls.add method.
"""
kwargs.update({"id": id})
return self.api_call("calls.info", http_verb="POST", params=kwargs)
def calls_participants_add(
self,
*,
id: str, # skipcq: PYL-W0622
users: Union[str, Sequence[Dict[str, str]]],
**kwargs
) -> SlackResponse:
"""Registers new participants added to a Call.
Args:
id (str): id returned when registering the call using the calls.add method.
users: (list): The list of users to add as participants in the Call.
"""
kwargs.update({"id": id})
_update_call_participants(kwargs, users)
return self.api_call("calls.participants.add", http_verb="POST", params=kwargs)
def calls_participants_remove(
self,
*,
id: str, # skipcq: PYL-W0622
users: Union[str, Sequence[Dict[str, str]]],
**kwargs
) -> SlackResponse:
"""Registers participants removed from a Call.
Args:
id (str): id returned when registering the call using the calls.add method.
users: (list): The list of users to remove as participants in the Call.
"""
kwargs.update({"id": id})
_update_call_participants(kwargs, users)
return self.api_call(
"calls.participants.remove", http_verb="POST", params=kwargs
)
def calls_update(self, *, id: str, **kwargs) -> SlackResponse: # skipcq: PYL-W0622
"""Updates information about a Call.
Args:
id (str): id returned by the calls.add method.
"""
kwargs.update({"id": id})
return self.api_call("calls.update", http_verb="POST", params=kwargs)
def channels_archive(self, *, channel: str, **kwargs) -> SlackResponse:
"""Archives a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("channels.archive", json=kwargs)
def channels_create(self, *, name: str, **kwargs) -> SlackResponse:
"""Creates a channel.
Args:
name (str): The name of the channel. e.g. 'mychannel'
"""
kwargs.update({"name": name})
return self.api_call("channels.create", json=kwargs)
def channels_history(self, *, channel: str, **kwargs) -> SlackResponse:
"""Fetches history of messages and events from a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("channels.history", http_verb="GET", params=kwargs)
def channels_info(self, *, channel: str, **kwargs) -> SlackResponse:
"""Gets information about a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("channels.info", http_verb="GET", params=kwargs)
def channels_invite(self, *, channel: str, user: str, **kwargs) -> SlackResponse:
"""Invites a user to a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
user (str): The user id. e.g. 'U1234567890'
"""
kwargs.update({"channel": channel, "user": user})
return self.api_call("channels.invite", json=kwargs)
def channels_join(self, *, name: str, **kwargs) -> SlackResponse:
"""Joins a channel, creating it if needed.
Args:
name (str): The channel name. e.g. '#general'
"""
kwargs.update({"name": name})
return self.api_call("channels.join", json=kwargs)
def channels_kick(self, *, channel: str, user: str, **kwargs) -> SlackResponse:
"""Removes a user from a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
user (str): The user id. e.g. 'U1234567890'
"""
kwargs.update({"channel": channel, "user": user})
return self.api_call("channels.kick", json=kwargs)
def channels_leave(self, *, channel: str, **kwargs) -> SlackResponse:
"""Leaves a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("channels.leave", json=kwargs)
def channels_list(self, **kwargs) -> SlackResponse:
"""Lists all channels in a Slack team."""
return self.api_call("channels.list", http_verb="GET", params=kwargs)
def channels_mark(self, *, channel: str, ts: str, **kwargs) -> SlackResponse:
"""Sets the read cursor in a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
ts (str): Timestamp of the most recently seen message. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("channels.mark", json=kwargs)
def channels_rename(self, *, channel: str, name: str, **kwargs) -> SlackResponse:
"""Renames a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
name (str): The new channel name. e.g. 'newchannel'
"""
kwargs.update({"channel": channel, "name": name})
return self.api_call("channels.rename", json=kwargs)
def channels_replies(
self, *, channel: str, thread_ts: str, **kwargs
) -> SlackResponse:
"""Retrieve a thread of messages posted to a channel
Args:
channel (str): The channel id. e.g. 'C1234567890'
thread_ts (str): The timestamp of an existing message with 0 or more replies.
e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "thread_ts": thread_ts})
return self.api_call("channels.replies", http_verb="GET", params=kwargs)
def channels_setPurpose(
self, *, channel: str, purpose: str, **kwargs
) -> SlackResponse:
"""Sets the purpose for a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
purpose (str): The new purpose for the channel. e.g. 'My Purpose'
"""
kwargs.update({"channel": channel, "purpose": purpose})
return self.api_call("channels.setPurpose", json=kwargs)
def channels_setTopic(self, *, channel: str, topic: str, **kwargs) -> SlackResponse:
"""Sets the topic for a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
topic (str): The new topic for the channel. e.g. 'My Topic'
"""
kwargs.update({"channel": channel, "topic": topic})
return self.api_call("channels.setTopic", json=kwargs)
def channels_unarchive(self, *, channel: str, **kwargs) -> SlackResponse:
"""Unarchives a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("channels.unarchive", json=kwargs)
def chat_delete(self, *, channel: str, ts: str, **kwargs) -> SlackResponse:
"""Deletes a message.
Args:
channel (str): Channel containing the message to be deleted. e.g. 'C1234567890'
ts (str): Timestamp of the message to be deleted. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("chat.delete", json=kwargs)
def chat_deleteScheduledMessage(
self, *, channel: str, scheduled_message_id: str, **kwargs
) -> SlackResponse:
"""Deletes a scheduled message.
Args:
channel (str): The channel the scheduled_message is posting to. e.g. 'C1234567890'
scheduled_message_id (str): scheduled_message_id returned from call to chat.scheduleMessage e.g. 'Q1234ABCD'
"""
kwargs.update(
{"channel": channel, "scheduled_message_id": scheduled_message_id}
)
return self.api_call("chat.deleteScheduledMessage", json=kwargs)
def chat_getPermalink(
self, *, channel: str, message_ts: str, **kwargs
) -> SlackResponse:
"""Retrieve a permalink URL for a specific extant message
Args:
channel (str): The channel id. e.g. 'C1234567890'
message_ts (str): The timestamp. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "message_ts": message_ts})
return self.api_call("chat.getPermalink", http_verb="GET", params=kwargs)
def chat_meMessage(self, *, channel: str, text: str, **kwargs) -> SlackResponse:
"""Share a me message into a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
text (str): The message you'd like to share. e.g. 'Hello world'
"""
kwargs.update({"channel": channel, "text": text})
return self.api_call("chat.meMessage", json=kwargs)
def chat_postEphemeral(self, *, channel: str, user: str, **kwargs) -> SlackResponse:
"""Sends an ephemeral message to a user in a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
user (str): The id of user who should see the message. e.g. 'U0BPQUNTA'
text (str): The message you'd like to share. e.g. 'Hello world'
text is not required when presenting blocks.
blocks (list): A list of either dict values or `slack_sdk.models.blocks.Block` objects.
Blocks are required when not presenting text.
e.g. [{"type": "section", "text": {"type": "plain_text", "text": "Hello world"}}]
"""
kwargs.update({"channel": channel, "user": user})
_parse_web_class_objects(kwargs)
_warn_if_text_is_missing("chat.postEphemeral", kwargs)
return self.api_call("chat.postEphemeral", json=kwargs)
def chat_postMessage(self, *, channel: str, **kwargs) -> SlackResponse:
"""Sends a message to a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
text (str): The message you'd like to share. e.g. 'Hello world'
text is not required when presenting blocks.
blocks (list): A list of either dict values or `slack_sdk.models.blocks.Block` objects.
Blocks are required when not presenting text.
e.g. [{"type": "section", "text": {"type": "plain_text", "text": "Hello world"}}]
"""
kwargs.update({"channel": channel})
_parse_web_class_objects(kwargs)
_warn_if_text_is_missing("chat.postMessage", kwargs)
return self.api_call("chat.postMessage", json=kwargs)
def chat_scheduleMessage(
self, *, channel: str, post_at: str, text: str, **kwargs
) -> SlackResponse:
"""Schedules a message.
Args:
channel (str): The channel the scheduled_message is posting to. e.g. 'C1234567890'
post_at (str): Unix EPOCH timestamp of time in future to send the message. e.g. '299876400'
text (str): The message you'd like to send. e.g. 'Hello world'
"""
kwargs.update({"channel": channel, "post_at": post_at, "text": text})
_parse_web_class_objects(kwargs)
_warn_if_text_is_missing("chat.scheduleMessage", kwargs)
return self.api_call("chat.scheduleMessage", json=kwargs)
def chat_unfurl(
self, *, channel: str, ts: str, unfurls: dict, **kwargs
) -> SlackResponse:
"""Provide custom unfurl behavior for user-posted URLs.
Args:
channel (str): The Channel ID of the message. e.g. 'C1234567890'
ts (str): Timestamp of the message to add unfurl behavior to. e.g. '1234567890.123456'
unfurls (dict): a dict of the specific URLs you're offering an unfurl for.
e.g. {"https://example.com/": {"text": "Every day is the test."}}
"""
kwargs.update({"channel": channel, "ts": ts, "unfurls": unfurls})
return self.api_call("chat.unfurl", json=kwargs)
def chat_update(self, *, channel: str, ts: str, **kwargs) -> SlackResponse:
"""Updates a message in a channel.
Args:
channel (str): The channel containing the message to be updated. e.g. 'C1234567890'
ts (str): Timestamp of the message to be updated. e.g. '1234567890.123456'
text (str): The message you'd like to share. e.g. 'Hello world'
text is not required when presenting blocks.
blocks (list): A list of either dict values or `slack_sdk.models.blocks.Block` objects.
Blocks are required when not presenting text.
e.g. [{"type": "section", "text": {"type": "plain_text", "text": "Hello world"}}]
"""
kwargs.update({"channel": channel, "ts": ts})
_parse_web_class_objects(kwargs)
_warn_if_text_is_missing("chat.update", kwargs)
return self.api_call("chat.update", json=kwargs)
def chat_scheduledMessages_list(self, **kwargs) -> SlackResponse:
"""Lists all scheduled messages."""
return self.api_call("chat.scheduledMessages.list", json=kwargs)
def conversations_archive(self, *, channel: str, **kwargs) -> SlackResponse:
"""Archives a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.archive", json=kwargs)
def conversations_close(self, *, channel: str, **kwargs) -> SlackResponse:
"""Closes a direct message or multi-person direct message.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.close", json=kwargs)
def conversations_create(self, *, name: str, **kwargs) -> SlackResponse:
"""Initiates a public or private channel-based conversation
Args:
name (str): The name of the channel. e.g. 'mychannel'
"""
kwargs.update({"name": name})
return self.api_call("conversations.create", json=kwargs)
def conversations_history(self, *, channel: str, **kwargs) -> SlackResponse:
"""Fetches a conversation's history of messages and events.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.history", http_verb="GET", params=kwargs)
def conversations_info(self, *, channel: str, **kwargs) -> SlackResponse:
"""Retrieve information about a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.info", http_verb="GET", params=kwargs)
def conversations_invite(
self, *, channel: str, users: Union[str, Sequence[str]], **kwargs
) -> SlackResponse:
"""Invites users to a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
users (str or list): An list of user id's to invite. e.g. ['U2345678901', 'U3456789012']
"""
kwargs.update({"channel": channel})
if isinstance(users, (list, Tuple)):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("conversations.invite", json=kwargs)
def conversations_join(self, *, channel: str, **kwargs) -> SlackResponse:
"""Joins an existing conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.join", json=kwargs)
def conversations_kick(self, *, channel: str, user: str, **kwargs) -> SlackResponse:
"""Removes a user from a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
user (str): The id of the user to kick. e.g. 'U2345678901'
"""
kwargs.update({"channel": channel, "user": user})
return self.api_call("conversations.kick", json=kwargs)
def conversations_leave(self, *, channel: str, **kwargs) -> SlackResponse:
"""Leaves a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.leave", json=kwargs)
def conversations_list(self, **kwargs) -> SlackResponse:
"""Lists all channels in a Slack team."""
return self.api_call("conversations.list", http_verb="GET", params=kwargs)
def conversations_mark(self, *, channel: str, ts: str, **kwargs) -> SlackResponse:
"""Sets the read cursor in a channel.
Args:
channel (str): Channel or conversation to set the read cursor for e.g. 'C1234567890'
ts (str): Unique identifier of message to mark as most recently seen in the convo e.g. '1593473566.000200'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("conversations.mark", json=kwargs)
def conversations_members(self, *, channel: str, **kwargs) -> SlackResponse:
"""Retrieve members of a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.members", http_verb="GET", params=kwargs)
def conversations_open(self, **kwargs) -> SlackResponse:
"""Opens or resumes a direct message or multi-person direct message."""
return self.api_call("conversations.open", json=kwargs)
def conversations_rename(
self, *, channel: str, name: str, **kwargs
) -> SlackResponse:
"""Renames a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
name (str): The new channel name. e.g. 'newchannel'
"""
kwargs.update({"channel": channel, "name": name})
return self.api_call("conversations.rename", json=kwargs)
def conversations_replies(
self, *, channel: str, ts: str, **kwargs
) -> SlackResponse:
"""Retrieve a thread of messages posted to a conversation
Args:
channel (str): Conversation ID to fetch thread from. e.g. 'C1234567890'
ts (str): Unique identifier of a thread's parent message. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("conversations.replies", http_verb="GET", params=kwargs)
def conversations_setPurpose(
self, *, channel: str, purpose: str, **kwargs
) -> SlackResponse:
"""Sets the purpose for a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
purpose (str): The new purpose for the channel. e.g. 'My Purpose'
"""
kwargs.update({"channel": channel, "purpose": purpose})
return self.api_call("conversations.setPurpose", json=kwargs)
def conversations_setTopic(
self, *, channel: str, topic: str, **kwargs
) -> SlackResponse:
"""Sets the topic for a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
topic (str): The new topic for the channel. e.g. 'My Topic'
"""
kwargs.update({"channel": channel, "topic": topic})
return self.api_call("conversations.setTopic", json=kwargs)
def conversations_unarchive(self, *, channel: str, **kwargs) -> SlackResponse:
"""Reverses conversation archival.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("conversations.unarchive", json=kwargs)
def dialog_open(self, *, dialog: dict, trigger_id: str, **kwargs) -> SlackResponse:
"""Open a dialog with a user.
Args:
dialog (dict): A dictionary of dialog arguments.
{
"callback_id": "46eh782b0",
"title": "Request something",
"submit_label": "Request",
"state": "Max",
"elements": [
{
"type": "text",
"label": "Origin",
"name": "loc_origin"
},
{
"type": "text",
"label": "Destination",
"name": "loc_destination"
}
]
}
trigger_id (str): The trigger id of a recent message interaction.
e.g. '12345.98765.abcd2358fdea'
"""
kwargs.update({"dialog": dialog, "trigger_id": trigger_id})
return self.api_call("dialog.open", json=kwargs)
def dnd_endDnd(self, **kwargs) -> SlackResponse:
"""Ends the current user's Do Not Disturb session immediately."""
return self.api_call("dnd.endDnd", json=kwargs)
def dnd_endSnooze(self, **kwargs) -> SlackResponse:
"""Ends the current user's snooze mode immediately."""
return self.api_call("dnd.endSnooze", json=kwargs)
def dnd_info(self, **kwargs) -> SlackResponse:
"""Retrieves a user's current Do Not Disturb status."""
return self.api_call("dnd.info", http_verb="GET", params=kwargs)
def dnd_setSnooze(self, *, num_minutes: int, **kwargs) -> SlackResponse:
"""Turns on Do Not Disturb mode for the current user, or changes its duration.
Args:
num_minutes (int): The snooze duration. e.g. 60
"""
kwargs.update({"num_minutes": num_minutes})
return self.api_call("dnd.setSnooze", http_verb="GET", params=kwargs)
def dnd_teamInfo(self, users: Union[str, Sequence[str]], **kwargs) -> SlackResponse:
"""Retrieves the Do Not Disturb status for users on a team.
Args:
users (str or list): User IDs to fetch information e.g. 'U123,U234' or ["U123", "U234"]
"""
if isinstance(users, (list, Tuple)):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("dnd.teamInfo", http_verb="GET", params=kwargs)
def emoji_list(self, **kwargs) -> SlackResponse:
"""Lists custom emoji for a team."""
return self.api_call("emoji.list", http_verb="GET", params=kwargs)
def files_comments_delete(
self, *, file: str, id: str, **kwargs # skipcq: PYL-W0622
) -> SlackResponse:
"""Deletes an existing comment on a file.
Args:
file (str): The file id. e.g. 'F1234467890'
id (str): The file comment id. e.g. 'Fc1234567890'
"""
kwargs.update({"file": file, "id": id})
return self.api_call("files.comments.delete", json=kwargs)
def files_delete(self, *, file: str, **kwargs) -> SlackResponse:
"""Deletes a file.
Args:
file (str): The file id. e.g. 'F1234467890'
"""
kwargs.update({"file": file})
return self.api_call("files.delete", json=kwargs)
def files_info(self, *, file: str, **kwargs) -> SlackResponse:
"""Gets information about a team file.
Args:
file (str): The file id. e.g. 'F1234467890'
"""
kwargs.update({"file": file})
return self.api_call("files.info", http_verb="GET", params=kwargs)
def files_list(self, **kwargs) -> SlackResponse:
"""Lists & filters team files."""
return self.api_call("files.list", http_verb="GET", params=kwargs)
def files_remote_info(self, **kwargs) -> SlackResponse:
"""Retrieve information about a remote file added to Slack."""
return self.api_call("files.remote.info", http_verb="GET", params=kwargs)
def files_remote_list(self, **kwargs) -> SlackResponse:
"""Retrieve information about a remote file added to Slack."""
return self.api_call("files.remote.list", http_verb="GET", params=kwargs)
def files_remote_add(
self, *, external_id: str, external_url: str, title: str, **kwargs
) -> SlackResponse:
"""Adds a file from a remote service.
Args:
external_id (str): Creator defined GUID for the file. e.g. '123456'
external_url (str): URL of the remote file. e.g. 'http://example.com/my_cloud_service_file/abc123'
title (str): Title of the file being shared. e.g. 'Danger, High Voltage!'
"""
kwargs.update(
{"external_id": external_id, "external_url": external_url, "title": title}
)
files = None
# preview_image (file): Preview of the document via multipart/form-data.
if "preview_image" in kwargs:
files = {"preview_image": kwargs.pop("preview_image")}
return self.api_call(
# Intentionally using "POST" method over "GET" here
"files.remote.add",
http_verb="POST",
data=kwargs,
files=files,
)
def files_remote_update(self, **kwargs) -> SlackResponse:
"""Updates an existing remote file."""
return self.api_call("files.remote.update", http_verb="GET", params=kwargs)
def files_remote_remove(self, **kwargs) -> SlackResponse:
"""Remove a remote file."""
return self.api_call("files.remote.remove", http_verb="GET", params=kwargs)
def files_remote_share(
self, *, channels: Union[str, Sequence[str]], **kwargs
) -> SlackResponse:
"""Share a remote file into a channel.
Args:
channels (str or list): Comma-separated list of channel IDs where the file will be shared.
e.g. ['C1234567890', 'C2345678901']
"""
if isinstance(channels, (list, Tuple)):
kwargs.update({"channels": ",".join(channels)})
else:
kwargs.update({"channels": channels})
return self.api_call("files.remote.share", http_verb="GET", params=kwargs)
def files_revokePublicURL(self, *, file: str, **kwargs) -> SlackResponse:
"""Revokes public/external sharing access for a file
Args:
file (str): The file id. e.g. 'F1234467890'
"""
kwargs.update({"file": file})
return self.api_call("files.revokePublicURL", json=kwargs)
def files_sharedPublicURL(self, *, file: str, **kwargs) -> SlackResponse:
"""Enables a file for public/external sharing.
Args:
file (str): The file id. e.g. 'F1234467890'
"""
kwargs.update({"file": file})
return self.api_call("files.sharedPublicURL", json=kwargs)
def files_upload(
self, *, file: Union[str, bytes, IOBase] = None, content: str = None, **kwargs
) -> SlackResponse:
"""Uploads or creates a file.
Args:
file (str): Supply a file path.
when you'd like to upload a specific file. e.g. 'dramacat.gif'
content (str): Supply content when you'd like to create an
editable text file containing the specified text. e.g. 'launch plan'
Raises:
SlackRequestError: If neither or both the `file` and `content` args are specified.
"""
if file is None and content is None:
raise e.SlackRequestError("The file or content argument must be specified.")
if file is not None and content is not None:
raise e.SlackRequestError(
"You cannot specify both the file and the content argument."
)
if file:
if "filename" not in kwargs and isinstance(file, str):
# use the local filename if filename is missing
kwargs["filename"] = file.split(os.path.sep)[-1]
return self.api_call("files.upload", files={"file": file}, data=kwargs)
data = kwargs.copy()
data.update({"content": content})
return self.api_call("files.upload", data=data)
def groups_archive(self, *, channel: str, **kwargs) -> SlackResponse:
"""Archives a private channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.archive", json=kwargs)
def groups_create(self, *, name: str, **kwargs) -> SlackResponse:
"""Creates a private channel.
Args:
name (str): The name of the private group. e.g. 'mychannel'
"""
kwargs.update({"name": name})
return self.api_call("groups.create", json=kwargs)
def groups_createChild(self, *, channel: str, **kwargs) -> SlackResponse:
"""Clones and archives a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.createChild", http_verb="GET", params=kwargs)
def groups_history(self, *, channel: str, **kwargs) -> SlackResponse:
"""Fetches history of messages and events from a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.history", http_verb="GET", params=kwargs)
def groups_info(self, *, channel: str, **kwargs) -> SlackResponse:
"""Gets information about a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.info", http_verb="GET", params=kwargs)
def groups_invite(self, *, channel: str, user: str, **kwargs) -> SlackResponse:
"""Invites a user to a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
user (str): The user id. e.g. 'U1234567890'
"""
kwargs.update({"channel": channel, "user": user})
return self.api_call("groups.invite", json=kwargs)
def groups_kick(self, *, channel: str, user: str, **kwargs) -> SlackResponse:
"""Removes a user from a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
user (str): The user id. e.g. 'U1234567890'
"""
kwargs.update({"channel": channel, "user": user})
return self.api_call("groups.kick", json=kwargs)
def groups_leave(self, *, channel: str, **kwargs) -> SlackResponse:
"""Leaves a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.leave", json=kwargs)
def groups_list(self, **kwargs) -> SlackResponse:
"""Lists private channels that the calling user has access to."""
return self.api_call("groups.list", http_verb="GET", params=kwargs)
def groups_mark(self, *, channel: str, ts: str, **kwargs) -> SlackResponse:
"""Sets the read cursor in a private channel.
Args:
channel (str): Private channel to set reading cursor in. e.g. 'C1234567890'
ts (str): Timestamp of the most recently seen message. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("groups.mark", json=kwargs)
def groups_open(self, *, channel: str, **kwargs) -> SlackResponse:
"""Opens a private channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.open", json=kwargs)
def groups_rename(self, *, channel: str, name: str, **kwargs) -> SlackResponse:
"""Renames a private channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
name (str): The new channel name. e.g. 'newchannel'
"""
kwargs.update({"channel": channel, "name": name})
return self.api_call("groups.rename", json=kwargs)
def groups_replies(
self, *, channel: str, thread_ts: str, **kwargs
) -> SlackResponse:
"""Retrieve a thread of messages posted to a private channel
Args:
channel (str): The channel id. e.g. 'C1234567890'
thread_ts (str): The timestamp of an existing message with 0 or more replies.
e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "thread_ts": thread_ts})
return self.api_call("groups.replies", http_verb="GET", params=kwargs)
def groups_setPurpose(
self, *, channel: str, purpose: str, **kwargs
) -> SlackResponse:
"""Sets the purpose for a private channel.
Args:
channel (str): The channel id. e.g. 'G1234567890'
purpose (str): The new purpose for the channel. e.g. 'My Purpose'
"""
kwargs.update({"channel": channel, "purpose": purpose})
return self.api_call("groups.setPurpose", json=kwargs)
def groups_setTopic(self, *, channel: str, topic: str, **kwargs) -> SlackResponse:
"""Sets the topic for a private channel.
Args:
channel (str): The channel id. e.g. 'G1234567890'
topic (str): The new topic for the channel. e.g. 'My Topic'
"""
kwargs.update({"channel": channel, "topic": topic})
return self.api_call("groups.setTopic", json=kwargs)
def groups_unarchive(self, *, channel: str, **kwargs) -> SlackResponse:
"""Unarchives a private channel.
Args:
channel (str): The channel id. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("groups.unarchive", json=kwargs)
def im_close(self, *, channel: str, **kwargs) -> SlackResponse:
"""Close a direct message channel.
Args:
channel (str): Direct message channel to close. e.g. 'D1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("im.close", json=kwargs)
def im_history(self, *, channel: str, **kwargs) -> SlackResponse:
"""Fetches history of messages and events from direct message channel.
Args:
channel (str): Direct message channel to fetch history from. e.g. 'D1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("im.history", http_verb="GET", params=kwargs)
def im_list(self, **kwargs) -> SlackResponse:
"""Lists direct message channels for the calling user."""
return self.api_call("im.list", http_verb="GET", params=kwargs)
def im_mark(self, *, channel: str, ts: str, **kwargs) -> SlackResponse:
"""Sets the read cursor in a direct message channel.
Args:
channel (str): Direct message channel to set reading cursor in. e.g. 'D1234567890'
ts (str): Timestamp of the most recently seen message. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("im.mark", json=kwargs)
def im_open(self, *, user: str, **kwargs) -> SlackResponse:
"""Opens a direct message channel.
Args:
user (str): The user id to open a DM with. e.g. 'W1234567890'
"""
kwargs.update({"user": user})
return self.api_call("im.open", json=kwargs)
def im_replies(self, *, channel: str, thread_ts: str, **kwargs) -> SlackResponse:
"""Retrieve a thread of messages posted to a direct message conversation
Args:
channel (str): Direct message channel to fetch thread from. e.g. 'C1234567890'
thread_ts (str): The timestamp of an existing message with 0 or more replies.
e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "thread_ts": thread_ts})
return self.api_call("im.replies", http_verb="GET", params=kwargs)
def migration_exchange(
self, *, users: Union[str, Sequence[str]], **kwargs
) -> SlackResponse:
"""For Enterprise Grid workspaces, map local user IDs to global user IDs
Args:
users (str or list): A list of user ids, up to 400 per request.
e.g. ['W1234567890', 'U2345678901', 'U3456789012']
"""
if isinstance(users, (list, Tuple)):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("migration.exchange", http_verb="GET", params=kwargs)
def mpim_close(self, *, channel: str, **kwargs) -> SlackResponse:
"""Closes a multiparty direct message channel.
Args:
channel (str): Multiparty Direct message channel to close. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("mpim.close", json=kwargs)
def mpim_history(self, *, channel: str, **kwargs) -> SlackResponse:
"""Fetches history of messages and events from a multiparty direct message.
Args:
channel (str): Multiparty direct message to fetch history for. e.g. 'G1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("mpim.history", http_verb="GET", params=kwargs)
def mpim_list(self, **kwargs) -> SlackResponse:
"""Lists multiparty direct message channels for the calling user."""
return self.api_call("mpim.list", http_verb="GET", params=kwargs)
def mpim_mark(self, *, channel: str, ts: str, **kwargs) -> SlackResponse:
"""Sets the read cursor in a multiparty direct message channel.
Args:
channel (str): Multiparty direct message channel to set reading cursor in.
e.g. 'G1234567890'
ts (str): Timestamp of the most recently seen message.
e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "ts": ts})
return self.api_call("mpim.mark", json=kwargs)
def mpim_open(self, *, users: Union[str, Sequence[str]], **kwargs) -> SlackResponse:
"""This method opens a multiparty direct message.
Args:
users (str or list): A lists of user ids. The ordering of the users
is preserved whenever a MPIM group is returned.
e.g. ['W1234567890', 'U2345678901', 'U3456789012']
"""
if isinstance(users, (list, Tuple)):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("mpim.open", json=kwargs)
def mpim_replies(self, *, channel: str, thread_ts: str, **kwargs) -> SlackResponse:
"""Retrieve a thread of messages posted to a direct message conversation from a
multiparty direct message.
Args:
channel (str): Multiparty direct message channel to fetch thread from.
e.g. 'G1234567890'
thread_ts (str): Unique identifier of a thread's parent message.
e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel, "thread_ts": thread_ts})
return self.api_call("mpim.replies", http_verb="GET", params=kwargs)
def oauth_v2_access(
self,
*,
client_id: str,
client_secret: str,
code: str,
redirect_uri: Optional[str] = None,
**kwargs
) -> SlackResponse:
"""Exchanges a temporary OAuth verifier code for an access token.
Args:
client_id (str): Issued when you created your application. e.g. '4b39e9-752c4'
client_secret (str): Issued when you created your application. e.g. '33fea0113f5b1'
code (str): The code param returned via the OAuth callback. e.g. 'ccdaa72ad'
redirect_uri (optional str): Must match the originally submitted URI
(if one was sent). e.g. 'https://example.com'
"""
if redirect_uri is not None:
kwargs.update({"redirect_uri": redirect_uri})
kwargs.update({"code": code})
return self.api_call(
"oauth.v2.access",
data=kwargs,
auth={"client_id": client_id, "client_secret": client_secret},
)
def oauth_access(
self,
*,
client_id: str,
client_secret: str,
code: str,
redirect_uri: Optional[str] = None,
**kwargs
) -> SlackResponse:
"""Exchanges a temporary OAuth verifier code for an access token.
Args:
client_id (str): Issued when you created your application. e.g. '4b39e9-752c4'
client_secret (str): Issued when you created your application. e.g. '33fea0113f5b1'
code (str): The code param returned via the OAuth callback. e.g. 'ccdaa72ad'
redirect_uri (optional str): Must match the originally submitted URI
(if one was sent). e.g. 'https://example.com'
"""
if redirect_uri is not None:
kwargs.update({"redirect_uri": redirect_uri})
kwargs.update({"code": code})
return self.api_call(
"oauth.access",
data=kwargs,
auth={"client_id": client_id, "client_secret": client_secret},
)
def pins_add(self, *, channel: str, **kwargs) -> SlackResponse:
"""Pins an item to a channel.
Args:
channel (str): Channel to pin the item in. e.g. 'C1234567890'
file (str): File id to pin. e.g. 'F1234567890'
file_comment (str): File comment to pin. e.g. 'Fc1234567890'
timestamp (str): Timestamp of message to pin. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel})
return self.api_call("pins.add", json=kwargs)
def pins_list(self, *, channel: str, **kwargs) -> SlackResponse:
"""Lists items pinned to a channel.
Args:
channel (str): Channel to get pinned items for. e.g. 'C1234567890'
"""
kwargs.update({"channel": channel})
return self.api_call("pins.list", http_verb="GET", params=kwargs)
def pins_remove(self, *, channel: str, **kwargs) -> SlackResponse:
"""Un-pins an item from a channel.
Args:
channel (str): Channel to pin the item in. e.g. 'C1234567890'
file (str): File id to pin. e.g. 'F1234567890'
file_comment (str): File comment to pin. e.g. 'Fc1234567890'
timestamp (str): Timestamp of message to pin. e.g. '1234567890.123456'
"""
kwargs.update({"channel": channel})
return self.api_call("pins.remove", json=kwargs)
def reactions_add(self, *, name: str, **kwargs) -> SlackResponse:
"""Adds a reaction to an item.
Args:
name (str): Reaction (emoji) name. e.g. 'thumbsup'
channel (str): Channel where the message to add reaction to was posted.
e.g. 'C1234567890'
timestamp (str): Timestamp of the message to add reaction to. e.g. '1234567890.123456'
"""
kwargs.update({"name": name})
return self.api_call("reactions.add", json=kwargs)
def reactions_get(self, **kwargs) -> SlackResponse:
"""Gets reactions for an item."""
return self.api_call("reactions.get", http_verb="GET", params=kwargs)
def reactions_list(self, **kwargs) -> SlackResponse:
"""Lists reactions made by a user."""
return self.api_call("reactions.list", http_verb="GET", params=kwargs)
def reactions_remove(self, *, name: str, **kwargs) -> SlackResponse:
"""Removes a reaction from an item.
Args:
name (str): Reaction (emoji) name. e.g. 'thumbsup'
"""
kwargs.update({"name": name})
return self.api_call("reactions.remove", json=kwargs)
def reminders_add(self, *, text: str, time: str, **kwargs) -> SlackResponse:
"""Creates a reminder.
Args:
text (str): The content of the reminder. e.g. 'eat a banana'
time (str): When this reminder should happen:
the Unix timestamp (up to five years from now e.g. '1602288000'),
the number of seconds until the reminder (if within 24 hours),
or a natural language description (Ex. 'in 15 minutes' or 'every Thursday')
"""
kwargs.update({"text": text, "time": time})
return self.api_call("reminders.add", json=kwargs)
def reminders_complete(self, *, reminder: str, **kwargs) -> SlackResponse:
"""Marks a reminder as complete.
Args:
reminder (str): The ID of the reminder to be marked as complete.
e.g. 'Rm12345678'
"""
kwargs.update({"reminder": reminder})
return self.api_call("reminders.complete", json=kwargs)
def reminders_delete(self, *, reminder: str, **kwargs) -> SlackResponse:
"""Deletes a reminder.
Args:
reminder (str): The ID of the reminder. e.g. 'Rm12345678'
"""
kwargs.update({"reminder": reminder})
return self.api_call("reminders.delete", json=kwargs)
def reminders_info(self, *, reminder: str, **kwargs) -> SlackResponse:
"""Gets information about a reminder.
Args:
reminder (str): The ID of the reminder. e.g. 'Rm12345678'
"""
kwargs.update({"reminder": reminder})
return self.api_call("reminders.info", http_verb="GET", params=kwargs)
def reminders_list(self, **kwargs) -> SlackResponse:
"""Lists all reminders created by or for a given user."""
return self.api_call("reminders.list", http_verb="GET", params=kwargs)
def rtm_connect(self, **kwargs) -> SlackResponse:
"""Starts a Real Time Messaging session."""
return self.api_call("rtm.connect", http_verb="GET", params=kwargs)
def rtm_start(self, **kwargs) -> SlackResponse:
"""Starts a Real Time Messaging session."""
return self.api_call("rtm.start", http_verb="GET", params=kwargs)
def search_all(self, *, query: str, **kwargs) -> SlackResponse:
"""Searches for messages and files matching a query.
Args:
query (str): Search query. May contains booleans, etc.
e.g. 'pickleface'
"""
kwargs.update({"query": query})
return self.api_call("search.all", http_verb="GET", params=kwargs)
def search_files(self, *, query: str, **kwargs) -> SlackResponse:
"""Searches for files matching a query.
Args:
query (str): Search query. May contains booleans, etc.
e.g. 'pickleface'
"""
kwargs.update({"query": query})
return self.api_call("search.files", http_verb="GET", params=kwargs)
def search_messages(self, *, query: str, **kwargs) -> SlackResponse:
"""Searches for messages matching a query.
Args:
query (str): Search query. May contains booleans, etc.
e.g. 'pickleface'
"""
kwargs.update({"query": query})
return self.api_call("search.messages", http_verb="GET", params=kwargs)
def stars_add(self, **kwargs) -> SlackResponse:
"""Adds a star to an item.
Args:
channel (str): Channel to add star to, or channel where the message to add
star to was posted (used with timestamp). e.g. 'C1234567890'
file (str): File to add star to. e.g. 'F1234567890'
file_comment (str): File comment to add star to. e.g. 'Fc1234567890'
timestamp (str): Timestamp of the message to add star to. e.g. '1234567890.123456'
"""
return self.api_call("stars.add", json=kwargs)
def stars_list(self, **kwargs) -> SlackResponse:
"""Lists stars for a user."""
return self.api_call("stars.list", http_verb="GET", params=kwargs)
def stars_remove(self, **kwargs) -> SlackResponse:
"""Removes a star from an item.
Args:
channel (str): Channel to remove star from, or channel where
the message to remove star from was posted (used with timestamp). e.g. 'C1234567890'
file (str): File to remove star from. e.g. 'F1234567890'
file_comment (str): File comment to remove star from. e.g. 'Fc1234567890'
timestamp (str): Timestamp of the message to remove star from. e.g. '1234567890.123456'
"""
return self.api_call("stars.remove", json=kwargs)
def team_accessLogs(self, **kwargs) -> SlackResponse:
"""Gets the access logs for the current team."""
return self.api_call("team.accessLogs", http_verb="GET", params=kwargs)
def team_billableInfo(self, **kwargs) -> SlackResponse:
"""Gets billable users information for the current team."""
return self.api_call("team.billableInfo", http_verb="GET", params=kwargs)
def team_info(self, **kwargs) -> SlackResponse:
"""Gets information about the current team."""
return self.api_call("team.info", http_verb="GET", params=kwargs)
def team_integrationLogs(self, **kwargs) -> SlackResponse:
"""Gets the integration logs for the current team."""
return self.api_call("team.integrationLogs", http_verb="GET", params=kwargs)
def team_profile_get(self, **kwargs) -> SlackResponse:
"""Retrieve a team's profile."""
return self.api_call("team.profile.get", http_verb="GET", params=kwargs)
def usergroups_create(self, *, name: str, **kwargs) -> SlackResponse:
"""Create a User Group
Args:
name (str): A name for the User Group. Must be unique among User Groups.
e.g. 'My Test Team'
"""
kwargs.update({"name": name})
return self.api_call("usergroups.create", json=kwargs)
def usergroups_disable(self, *, usergroup: str, **kwargs) -> SlackResponse:
"""Disable an existing User Group
Args:
usergroup (str): The encoded ID of the User Group to disable.
e.g. 'S0604QSJC'
"""
kwargs.update({"usergroup": usergroup})
return self.api_call("usergroups.disable", json=kwargs)
def usergroups_enable(self, *, usergroup: str, **kwargs) -> SlackResponse:
"""Enable a User Group
Args:
usergroup (str): The encoded ID of the User Group to enable.
e.g. 'S0604QSJC'
"""
kwargs.update({"usergroup": usergroup})
return self.api_call("usergroups.enable", json=kwargs)
def usergroups_list(self, **kwargs) -> SlackResponse:
"""List all User Groups for a team"""
return self.api_call("usergroups.list", http_verb="GET", params=kwargs)
def usergroups_update(self, *, usergroup: str, **kwargs) -> SlackResponse:
"""Update an existing User Group
Args:
usergroup (str): The encoded ID of the User Group to update.
e.g. 'S0604QSJC'
"""
kwargs.update({"usergroup": usergroup})
return self.api_call("usergroups.update", json=kwargs)
def usergroups_users_list(self, *, usergroup: str, **kwargs) -> SlackResponse:
"""List all users in a User Group
Args:
usergroup (str): The encoded ID of the User Group to update.
e.g. 'S0604QSJC'
"""
kwargs.update({"usergroup": usergroup})
return self.api_call("usergroups.users.list", http_verb="GET", params=kwargs)
def usergroups_users_update(
self, *, usergroup: str, users: Union[str, Sequence[str]], **kwargs
) -> SlackResponse:
"""Update the list of users for a User Group
Args:
usergroup (str): The encoded ID of the User Group to update.
e.g. 'S0604QSJC'
users (str or list): A list user IDs that represent the entire list of
users for the User Group. e.g. ['U060R4BJ4', 'U060RNRCZ']
"""
kwargs.update({"usergroup": usergroup})
if isinstance(users, (list, Tuple)):
kwargs.update({"users": ",".join(users)})
else:
kwargs.update({"users": users})
return self.api_call("usergroups.users.update", json=kwargs)
def users_conversations(self, **kwargs) -> SlackResponse:
"""List conversations the calling user may access."""
return self.api_call("users.conversations", http_verb="GET", params=kwargs)
def users_deletePhoto(self, **kwargs) -> SlackResponse:
"""Delete the user profile photo"""
return self.api_call("users.deletePhoto", http_verb="GET", params=kwargs)
def users_getPresence(self, *, user: str, **kwargs) -> SlackResponse:
"""Gets user presence information.
Args:
user (str): User to get presence info on. Defaults to the authed user.
e.g. 'W1234567890'
"""
kwargs.update({"user": user})
return self.api_call("users.getPresence", http_verb="GET", params=kwargs)
def users_identity(self, **kwargs) -> SlackResponse:
"""Get a user's identity."""
return self.api_call("users.identity", http_verb="GET", params=kwargs)
def users_info(self, *, user: str, **kwargs) -> SlackResponse:
"""Gets information about a user.
Args:
user (str): User to get info on.
e.g. 'W1234567890'
"""
kwargs.update({"user": user})
return self.api_call("users.info", http_verb="GET", params=kwargs)
def users_list(self, **kwargs) -> SlackResponse:
"""Lists all users in a Slack team."""
return self.api_call("users.list", http_verb="GET", params=kwargs)
def users_lookupByEmail(self, *, email: str, **kwargs) -> SlackResponse:
"""Find a user with an email address.
Args:
email (str): An email address belonging to a user in the workspace.
e.g. 'spengler@ghostbusters.example.com'
"""
kwargs.update({"email": email})
return self.api_call("users.lookupByEmail", http_verb="GET", params=kwargs)
def users_setPhoto(self, *, image: Union[str, IOBase], **kwargs) -> SlackResponse:
"""Set the user profile photo
Args:
image (str): Supply the path of the image you'd like to upload.
e.g. 'myimage.png'
"""
return self.api_call("users.setPhoto", files={"image": image}, data=kwargs)
def users_setPresence(self, *, presence: str, **kwargs) -> SlackResponse:
"""Manually sets user presence.
Args:
presence (str): Either 'auto' or 'away'.
"""
kwargs.update({"presence": presence})
return self.api_call("users.setPresence", json=kwargs)
def users_profile_get(self, **kwargs) -> SlackResponse:
"""Retrieves a user's profile information."""
return self.api_call("users.profile.get", http_verb="GET", params=kwargs)
def users_profile_set(self, **kwargs) -> SlackResponse:
"""Set the profile information for a user."""
return self.api_call("users.profile.set", json=kwargs)
def views_open(
self, *, trigger_id: str, view: Union[dict, View], **kwargs
) -> SlackResponse:
"""Open a view for a user.
See https://api.slack.com/block-kit/surfaces/modals for details.
Args:
trigger_id (str): Exchange a trigger to post to the user.
e.g. '12345.98765.abcd2358fdea'
view (dict or View): The view payload.
"""
kwargs.update({"trigger_id": trigger_id})
if isinstance(view, View):
kwargs.update({"view": view.to_dict()})
else:
kwargs.update({"view": view})
return self.api_call("views.open", json=kwargs)
def views_push(
self, *, trigger_id: str, view: Union[dict, View], **kwargs
) -> SlackResponse:
"""Push a view onto the stack of a root view.
Push a new view onto the existing view stack by passing a view
payload and a valid trigger_id generated from an interaction
within the existing modal.
Read the modals documentation (https://api.slack.com/block-kit/surfaces/modals)
to learn more about the lifecycle and intricacies of views.
Args:
trigger_id (str): Exchange a trigger to post to the user.
e.g. '12345.98765.abcd2358fdea'
view (dict or View): The view payload.
"""
kwargs.update({"trigger_id": trigger_id, "view": view})
if isinstance(view, View):
kwargs.update({"view": view.to_dict()})
else:
kwargs.update({"view": view})
return self.api_call("views.push", json=kwargs)
def views_update(
self,
*,
view: Union[dict, View],
external_id: str = None,
view_id: str = None,
**kwargs
) -> SlackResponse:
"""Update an existing view.
Update a view by passing a new view definition along with the
view_id returned in views.open or the external_id.
See the modals documentation (https://api.slack.com/block-kit/surfaces/modals#updating_views)
to learn more about updating views and avoiding race conditions with the hash argument.
Args:
view (dict or View): The view payload.
external_id (str): A unique identifier of the view set by the developer.
e.g. 'bmarley_view2'
view_id (str): A unique identifier of the view to be updated.
e.g. 'VMM512F2U'
Raises:
SlackRequestError: Either view_id or external_id is required.
"""
if isinstance(view, View):
kwargs.update({"view": view.to_dict()})
else:
kwargs.update({"view": view})
if external_id:
kwargs.update({"external_id": external_id})
elif view_id:
kwargs.update({"view_id": view_id})
else:
raise e.SlackRequestError("Either view_id or external_id is required.")
return self.api_call("views.update", json=kwargs)
def views_publish(
self, *, user_id: str, view: Union[dict, View], **kwargs
) -> SlackResponse:
"""Publish a static view for a User.
Create or update the view that comprises an
app's Home tab (https://api.slack.com/surfaces/tabs)
for a specific user.
Args:
user_id (str): id of the user you want publish a view to.
e.g. 'U0BPQUNTA'
view (dict or View): The view payload.
"""
kwargs.update({"user_id": user_id})
if isinstance(view, View):
kwargs.update({"view": view.to_dict()})
else:
kwargs.update({"view": view})
return self.api_call("views.publish", json=kwargs)
def workflows_stepCompleted(
self, *, workflow_step_execute_id: str, outputs: dict = None, **kwargs
) -> SlackResponse:
"""Indicate a successful outcome of a workflow step's execution.
Args:
workflow_step_execute_id (str): A unique identifier of the workflow step to be updated.
e.g. 'add_task'
outputs (dict): A key-value object of outputs from your step.
e.g. { 'task_name': 'Task Name' }
"""
kwargs.update({"workflow_step_execute_id": workflow_step_execute_id})
if outputs:
kwargs.update({"outputs": outputs})
return self.api_call("workflows.stepCompleted", json=kwargs)
def workflows_stepFailed(
self, *, workflow_step_execute_id: str, error: dict, **kwargs
) -> SlackResponse:
"""Indicate an unsuccessful outcome of a workflow step's execution.
Args:
workflow_step_execute_id (str): A unique identifier of the workflow step to be updated.
e.g. 'add_task'
error (dict): A dict with a message property that contains a human readable error message
e.g. { message: 'Step failed to execute.' }
"""
kwargs.update(
{"workflow_step_execute_id": workflow_step_execute_id, "error": error}
)
return self.api_call("workflows.stepFailed", json=kwargs)
def workflows_updateStep(
self,
*,
workflow_step_edit_id: str,
inputs: dict = None,
outputs: list = None,
**kwargs
) -> SlackResponse:
"""Update the configuration for a workflow extension step.
Args:
workflow_step_edit_id (str): A unique identifier of the workflow step to be updated.
e.g. 'add_task'
inputs (dict): A key-value object of inputs required from a user during step configuration.
e.g. { 'title': { 'value': 'The Title' }, 'submitter': { 'value': 'The Submitter' } }
outputs (list): A list of output objects used during step execution.
e.g. [{ 'type': 'text', 'name': 'title', 'label': 'Title' }]
"""
kwargs.update({"workflow_step_edit_id": workflow_step_edit_id})
if inputs:
kwargs.update({"inputs": inputs})
if outputs:
kwargs.update({"outputs": outputs})
return self.api_call("workflows.updateStep", json=kwargs)
| {
"repo_name": "slackhq/python-slackclient",
"path": "slack_sdk/web/client.py",
"copies": "1",
"size": "99296",
"license": "mit",
"hash": -4404737304909984300,
"line_mean": 39.6718557968,
"line_max": 125,
"alpha_frac": 0.593422643,
"autogenerated": false,
"ratio": 3.917607134401389,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5011029777401389,
"avg_score": null,
"num_lines": null
} |
"""A Python module for interacting with Slack's Web API."""
import asyncio
import copy
import hashlib
import hmac
import io
import json
import logging
import mimetypes
import urllib
import uuid
import warnings
from http.client import HTTPResponse
from ssl import SSLContext
from typing import BinaryIO, Dict, List
from typing import Optional, Union
from urllib.error import HTTPError
from urllib.parse import urlencode
from urllib.request import Request, urlopen, OpenerDirector, ProxyHandler, HTTPSHandler
import aiohttp
from aiohttp import FormData, BasicAuth
import slack.errors as err
from slack.errors import SlackRequestError
from slack.web import convert_bool_to_0_or_1, get_user_agent
from slack.web.async_internal_utils import (
_get_event_loop,
_build_req_args,
_get_url,
_files_to_data,
_request_with_session,
)
from slack.web.deprecation import show_2020_01_deprecation
from slack.web.slack_response import SlackResponse
class BaseClient:
BASE_URL = "https://www.slack.com/api/"
def __init__(
self,
token: Optional[str] = None,
base_url: str = BASE_URL,
timeout: int = 30,
loop: Optional[asyncio.AbstractEventLoop] = None,
ssl: Optional[SSLContext] = None,
proxy: Optional[str] = None,
run_async: bool = False,
use_sync_aiohttp: bool = False,
session: Optional[aiohttp.ClientSession] = None,
headers: Optional[dict] = None,
user_agent_prefix: Optional[str] = None,
user_agent_suffix: Optional[str] = None,
):
self.token = None if token is None else token.strip()
self.base_url = base_url
self.timeout = timeout
self.ssl = ssl
self.proxy = proxy
self.run_async = run_async
self.use_sync_aiohttp = use_sync_aiohttp
self.session = session
self.headers = headers or {}
self.headers["User-Agent"] = get_user_agent(
user_agent_prefix, user_agent_suffix
)
self._logger = logging.getLogger(__name__)
self._event_loop = loop
def api_call( # skipcq: PYL-R1710
self,
api_method: str,
*,
http_verb: str = "POST",
files: dict = None,
data: Union[dict, FormData] = None,
params: dict = None,
json: dict = None, # skipcq: PYL-W0621
headers: dict = None,
auth: dict = None,
) -> Union[asyncio.Future, SlackResponse]:
"""Create a request and execute the API call to Slack.
Args:
api_method (str): The target Slack API method.
e.g. 'chat.postMessage'
http_verb (str): HTTP Verb. e.g. 'POST'
files (dict): Files to multipart upload.
e.g. {image OR file: file_object OR file_path}
data: The body to attach to the request. If a dictionary is
provided, form-encoding will take place.
e.g. {'key1': 'value1', 'key2': 'value2'}
params (dict): The URL parameters to append to the URL.
e.g. {'key1': 'value1', 'key2': 'value2'}
json (dict): JSON for the body to attach to the request
(if files or data is not specified).
e.g. {'key1': 'value1', 'key2': 'value2'}
headers (dict): Additional request headers
auth (dict): A dictionary that consists of client_id and client_secret
Returns:
(SlackResponse)
The server's response to an HTTP request. Data
from the response can be accessed like a dict.
If the response included 'next_cursor' it can
be iterated on to execute subsequent requests.
Raises:
SlackApiError: The following Slack API call failed:
'chat.postMessage'.
SlackRequestError: Json data can only be submitted as
POST requests.
"""
api_url = _get_url(self.base_url, api_method)
headers = headers or {}
headers.update(self.headers)
req_args = _build_req_args(
token=self.token,
http_verb=http_verb,
files=files,
data=data,
params=params,
json=json, # skipcq: PYL-W0621
headers=headers,
auth=auth,
ssl=self.ssl,
proxy=self.proxy,
)
show_2020_01_deprecation(api_method)
if self.run_async or self.use_sync_aiohttp:
if self._event_loop is None:
self._event_loop = _get_event_loop()
future = asyncio.ensure_future(
self._send(http_verb=http_verb, api_url=api_url, req_args=req_args),
loop=self._event_loop,
)
if self.run_async:
return future
if self.use_sync_aiohttp:
# Using this is no longer recommended - just keep this for backward-compatibility
return self._event_loop.run_until_complete(future)
else:
return self._sync_send(api_url=api_url, req_args=req_args)
# =================================================================
# aiohttp based async WebClient
# =================================================================
async def _send(
self, http_verb: str, api_url: str, req_args: dict
) -> SlackResponse:
"""Sends the request out for transmission.
Args:
http_verb (str): The HTTP verb. e.g. 'GET' or 'POST'.
api_url (str): The Slack API url. e.g. 'https://slack.com/api/chat.postMessage'
req_args (dict): The request arguments to be attached to the request.
e.g.
{
json: {
'attachments': [{"pretext": "pre-hello", "text": "text-world"}],
'channel': '#random'
}
}
Returns:
The response parsed into a SlackResponse object.
"""
open_files = _files_to_data(req_args)
try:
if "params" in req_args:
# True/False -> "1"/"0"
req_args["params"] = convert_bool_to_0_or_1(req_args["params"])
res = await self._request(
http_verb=http_verb, api_url=api_url, req_args=req_args
)
finally:
for f in open_files:
f.close()
data = {
"client": self,
"http_verb": http_verb,
"api_url": api_url,
"req_args": req_args,
"use_sync_aiohttp": self.use_sync_aiohttp,
}
return SlackResponse(**{**data, **res}).validate()
async def _request(self, *, http_verb, api_url, req_args) -> Dict[str, any]:
"""Submit the HTTP request with the running session or a new session.
Returns:
A dictionary of the response data.
"""
return await _request_with_session(
current_session=self.session,
timeout=self.timeout,
logger=self._logger,
http_verb=http_verb,
api_url=api_url,
req_args=req_args,
)
# =================================================================
# urllib based WebClient
# =================================================================
def _sync_send(self, api_url, req_args) -> SlackResponse:
params = req_args["params"] if "params" in req_args else None
data = req_args["data"] if "data" in req_args else None
files = req_args["files"] if "files" in req_args else None
_json = req_args["json"] if "json" in req_args else None
headers = req_args["headers"] if "headers" in req_args else None
token = params.get("token") if params and "token" in params else None
auth = (
req_args["auth"] if "auth" in req_args else None
) # Basic Auth for oauth.v2.access / oauth.access
if auth is not None:
if isinstance(auth, BasicAuth):
headers["Authorization"] = auth.encode()
elif isinstance(auth, str):
headers["Authorization"] = auth
else:
self._logger.warning(
f"As the auth: {auth}: {type(auth)} is unsupported, skipped"
)
body_params = {}
if params:
body_params.update(params)
if data:
body_params.update(data)
return self._urllib_api_call(
token=token,
url=api_url,
query_params={},
body_params=body_params,
files=files,
json_body=_json,
additional_headers=headers,
)
def _request_for_pagination(self, api_url, req_args) -> Dict[str, any]:
"""This method is supposed to be used only for SlackResponse pagination
You can paginate using Python's for iterator as below:
for response in client.conversations_list(limit=100):
# do something with each response here
"""
response = self._perform_urllib_http_request(url=api_url, args=req_args)
return {
"status_code": int(response["status"]),
"headers": dict(response["headers"]),
"data": json.loads(response["body"]),
}
def _urllib_api_call(
self,
*,
token: str = None,
url: str,
query_params: Dict[str, str] = {},
json_body: Dict = {},
body_params: Dict[str, str] = {},
files: Dict[str, io.BytesIO] = {},
additional_headers: Dict[str, str] = {},
) -> SlackResponse:
files_to_close: List[BinaryIO] = []
try:
# True/False -> "1"/"0"
query_params = convert_bool_to_0_or_1(query_params)
body_params = convert_bool_to_0_or_1(body_params)
if self._logger.level <= logging.DEBUG:
def convert_params(values: dict) -> dict:
if not values or not isinstance(values, dict):
return {}
return {
k: ("(bytes)" if isinstance(v, bytes) else v)
for k, v in values.items()
}
headers = {
k: "(redacted)" if k.lower() == "authorization" else v
for k, v in additional_headers.items()
}
self._logger.debug(
f"Sending a request - url: {url}, "
f"query_params: {convert_params(query_params)}, "
f"body_params: {convert_params(body_params)}, "
f"files: {convert_params(files)}, "
f"json_body: {json_body}, "
f"headers: {headers}"
)
request_data = {}
if files is not None and isinstance(files, dict) and len(files) > 0:
if body_params:
for k, v in body_params.items():
request_data.update({k: v})
for k, v in files.items():
if isinstance(v, str):
f: BinaryIO = open(v.encode("utf-8", "ignore"), "rb")
files_to_close.append(f)
request_data.update({k: f})
elif isinstance(v, (bytearray, bytes)):
request_data.update({k: io.BytesIO(v)})
else:
request_data.update({k: v})
request_headers = self._build_urllib_request_headers(
token=token or self.token,
has_json=json is not None,
has_files=files is not None,
additional_headers=additional_headers,
)
request_args = {
"headers": request_headers,
"data": request_data,
"params": body_params,
"files": files,
"json": json_body,
}
if query_params:
q = urlencode(query_params)
url = f"{url}&{q}" if "?" in url else f"{url}?{q}"
response = self._perform_urllib_http_request(url=url, args=request_args)
if response.get("body"):
try:
response_body_data: dict = json.loads(response["body"])
except json.decoder.JSONDecodeError as e:
message = f"Failed to parse the response body: {str(e)}"
raise err.SlackApiError(message, response)
else:
response_body_data: dict = None
if query_params:
all_params = copy.copy(body_params)
all_params.update(query_params)
else:
all_params = body_params
request_args["params"] = all_params # for backward-compatibility
return SlackResponse(
client=self,
http_verb="POST", # you can use POST method for all the Web APIs
api_url=url,
req_args=request_args,
data=response_body_data,
headers=dict(response["headers"]),
status_code=response["status"],
use_sync_aiohttp=False,
).validate()
finally:
for f in files_to_close:
if not f.closed:
f.close()
def _perform_urllib_http_request(
self, *, url: str, args: Dict[str, Dict[str, any]]
) -> Dict[str, any]:
headers = args["headers"]
if args["json"]:
body = json.dumps(args["json"])
headers["Content-Type"] = "application/json;charset=utf-8"
elif args["data"]:
boundary = f"--------------{uuid.uuid4()}"
sep_boundary = b"\r\n--" + boundary.encode("ascii")
end_boundary = sep_boundary + b"--\r\n"
body = io.BytesIO()
data = args["data"]
for key, value in data.items():
readable = getattr(value, "readable", None)
if readable and value.readable():
filename = "Uploaded file"
name_attr = getattr(value, "name", None)
if name_attr:
filename = (
name_attr.decode("utf-8")
if isinstance(name_attr, bytes)
else name_attr
)
if "filename" in data:
filename = data["filename"]
mimetype = (
mimetypes.guess_type(filename)[0] or "application/octet-stream"
)
title = (
f'\r\nContent-Disposition: form-data; name="{key}"; filename="{filename}"\r\n'
+ f"Content-Type: {mimetype}\r\n"
)
value = value.read()
else:
title = f'\r\nContent-Disposition: form-data; name="{key}"\r\n'
value = str(value).encode("utf-8")
body.write(sep_boundary)
body.write(title.encode("utf-8"))
body.write(b"\r\n")
body.write(value)
body.write(end_boundary)
body = body.getvalue()
headers["Content-Type"] = f"multipart/form-data; boundary={boundary}"
headers["Content-Length"] = len(body)
elif args["params"]:
body = urlencode(args["params"])
headers["Content-Type"] = "application/x-www-form-urlencoded"
else:
body = None
if isinstance(body, str):
body = body.encode("utf-8")
# NOTE: Intentionally ignore the `http_verb` here
# Slack APIs accepts any API method requests with POST methods
try:
# urllib not only opens http:// or https:// URLs, but also ftp:// and file://.
# With this it might be possible to open local files on the executing machine
# which might be a security risk if the URL to open can be manipulated by an external user.
# (BAN-B310)
if url.lower().startswith("http"):
req = Request(method="POST", url=url, data=body, headers=headers)
opener: Optional[OpenerDirector] = None
if self.proxy is not None:
if isinstance(self.proxy, str):
opener = urllib.request.build_opener(
ProxyHandler({"http": self.proxy, "https": self.proxy}),
HTTPSHandler(context=self.ssl),
)
else:
raise SlackRequestError(
f"Invalid proxy detected: {self.proxy} must be a str value"
)
# NOTE: BAN-B310 is already checked above
resp: Optional[HTTPResponse] = None
if opener:
resp = opener.open(req, timeout=self.timeout) # skipcq: BAN-B310
else:
resp = urlopen( # skipcq: BAN-B310
req, context=self.ssl, timeout=self.timeout
)
charset = resp.headers.get_content_charset() or "utf-8"
body: str = resp.read().decode(charset) # read the response body here
return {"status": resp.code, "headers": resp.headers, "body": body}
raise SlackRequestError(f"Invalid URL detected: {url}")
except HTTPError as e:
resp = {"status": e.code, "headers": e.headers}
if e.code == 429:
# for compatibility with aiohttp
resp["headers"]["Retry-After"] = resp["headers"]["retry-after"]
charset = e.headers.get_content_charset() or "utf-8"
body: str = e.read().decode(charset) # read the response body here
resp["body"] = body
return resp
except Exception as err:
self._logger.error(f"Failed to send a request to Slack API server: {err}")
raise err
def _build_urllib_request_headers(
self, token: str, has_json: bool, has_files: bool, additional_headers: dict
) -> Dict[str, str]:
headers = {"Content-Type": "application/x-www-form-urlencoded"}
headers.update(self.headers)
if token:
headers.update({"Authorization": "Bearer {}".format(token)})
if additional_headers:
headers.update(additional_headers)
if has_json:
headers.update({"Content-Type": "application/json;charset=utf-8"})
if has_files:
# will be set afterwards
headers.pop("Content-Type", None)
return headers
# =================================================================
@staticmethod
def validate_slack_signature(
*, signing_secret: str, data: str, timestamp: str, signature: str
) -> bool:
"""
Slack creates a unique string for your app and shares it with you. Verify
requests from Slack with confidence by verifying signatures using your
signing secret.
On each HTTP request that Slack sends, we add an X-Slack-Signature HTTP
header. The signature is created by combining the signing secret with the
body of the request we're sending using a standard HMAC-SHA256 keyed hash.
https://api.slack.com/docs/verifying-requests-from-slack#how_to_make_a_request_signature_in_4_easy_steps__an_overview
Args:
signing_secret: Your application's signing secret, available in the
Slack API dashboard
data: The raw body of the incoming request - no headers, just the body.
timestamp: from the 'X-Slack-Request-Timestamp' header
signature: from the 'X-Slack-Signature' header - the calculated signature
should match this.
Returns:
True if signatures matches
"""
warnings.warn(
"As this method is deprecated since slackclient 2.6.0, "
"use `from slack.signature import SignatureVerifier` instead",
DeprecationWarning,
)
format_req = str.encode(f"v0:{timestamp}:{data}")
encoded_secret = str.encode(signing_secret)
request_hash = hmac.new(encoded_secret, format_req, hashlib.sha256).hexdigest()
calculated_signature = f"v0={request_hash}"
return hmac.compare_digest(calculated_signature, signature)
| {
"repo_name": "slackhq/python-slackclient",
"path": "slack/web/base_client.py",
"copies": "1",
"size": "20791",
"license": "mit",
"hash": 4768471504430178000,
"line_mean": 38.4516129032,
"line_max": 125,
"alpha_frac": 0.5207541725,
"autogenerated": false,
"ratio": 4.319758986079369,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007505207989224229,
"num_lines": 527
} |
"""A Python module for interacting with Slack's Web API."""
import copy
import hashlib
import hmac
import io
import json
import logging
import mimetypes
import urllib
import uuid
import warnings
from base64 import b64encode
from http.client import HTTPResponse
from ssl import SSLContext
from typing import BinaryIO, Dict, List
from typing import Optional, Union
from urllib.error import HTTPError
from urllib.parse import urlencode
from urllib.request import Request, urlopen, OpenerDirector, ProxyHandler, HTTPSHandler
import slack_sdk.errors as err
from slack_sdk.errors import SlackRequestError
from .deprecation import show_2020_01_deprecation
from .internal_utils import (
convert_bool_to_0_or_1,
get_user_agent,
_get_url,
_build_req_args,
_build_unexpected_body_error_message,
)
from .slack_response import SlackResponse
from ..proxy_env_variable_loader import load_http_proxy_from_env
class BaseClient:
BASE_URL = "https://www.slack.com/api/"
def __init__(
self,
token: Optional[str] = None,
base_url: str = BASE_URL,
timeout: int = 30,
ssl: Optional[SSLContext] = None,
proxy: Optional[str] = None,
headers: Optional[dict] = None,
user_agent_prefix: Optional[str] = None,
user_agent_suffix: Optional[str] = None,
# for Org-Wide App installation
team_id: Optional[str] = None,
logger: Optional[logging.Logger] = None,
):
self.token = None if token is None else token.strip()
self.base_url = base_url
self.timeout = timeout
self.ssl = ssl
self.proxy = proxy
self.headers = headers or {}
self.headers["User-Agent"] = get_user_agent(
user_agent_prefix, user_agent_suffix
)
self.default_params = {}
if team_id is not None:
self.default_params["team_id"] = team_id
self._logger = logger if logger is not None else logging.getLogger(__name__)
if self.proxy is None or len(self.proxy.strip()) == 0:
env_variable = load_http_proxy_from_env(self._logger)
if env_variable is not None:
self.proxy = env_variable
def api_call( # skipcq: PYL-R1710
self,
api_method: str,
*,
http_verb: str = "POST",
files: dict = None,
data: Union[dict] = None,
params: dict = None,
json: dict = None, # skipcq: PYL-W0621
headers: dict = None,
auth: dict = None,
) -> SlackResponse:
"""Create a request and execute the API call to Slack.
Args:
api_method (str): The target Slack API method.
e.g. 'chat.postMessage'
http_verb (str): HTTP Verb. e.g. 'POST'
files (dict): Files to multipart upload.
e.g. {image OR file: file_object OR file_path}
data: The body to attach to the request. If a dictionary is
provided, form-encoding will take place.
e.g. {'key1': 'value1', 'key2': 'value2'}
params (dict): The URL parameters to append to the URL.
e.g. {'key1': 'value1', 'key2': 'value2'}
json (dict): JSON for the body to attach to the request
(if files or data is not specified).
e.g. {'key1': 'value1', 'key2': 'value2'}
headers (dict): Additional request headers
auth (dict): A dictionary that consists of client_id and client_secret
Returns:
(SlackResponse)
The server's response to an HTTP request. Data
from the response can be accessed like a dict.
If the response included 'next_cursor' it can
be iterated on to execute subsequent requests.
Raises:
SlackApiError: The following Slack API call failed:
'chat.postMessage'.
SlackRequestError: Json data can only be submitted as
POST requests.
"""
api_url = _get_url(self.base_url, api_method)
headers = headers or {}
headers.update(self.headers)
req_args = _build_req_args(
token=self.token,
http_verb=http_verb,
files=files,
data=data,
default_params=self.default_params,
params=params,
json=json, # skipcq: PYL-W0621
headers=headers,
auth=auth,
ssl=self.ssl,
proxy=self.proxy,
)
show_2020_01_deprecation(api_method)
return self._sync_send(api_url=api_url, req_args=req_args)
# =================================================================
# urllib based WebClient
# =================================================================
def _sync_send(self, api_url, req_args) -> SlackResponse:
params = req_args["params"] if "params" in req_args else None
data = req_args["data"] if "data" in req_args else None
files = req_args["files"] if "files" in req_args else None
_json = req_args["json"] if "json" in req_args else None
headers = req_args["headers"] if "headers" in req_args else None
token = params.get("token") if params and "token" in params else None
auth = (
req_args["auth"] if "auth" in req_args else None
) # Basic Auth for oauth.v2.access / oauth.access
if auth is not None:
if isinstance(auth, str):
headers["Authorization"] = auth
elif isinstance(auth, dict):
client_id, client_secret = auth["client_id"], auth["client_secret"]
value = b64encode(
f"{client_id}:{client_secret}".encode("utf-8")
).decode("ascii")
headers["Authorization"] = f"Basic {value}"
else:
self._logger.warning(
f"As the auth: {auth}: {type(auth)} is unsupported, skipped"
)
body_params = {}
if params:
body_params.update(params)
if data:
body_params.update(data)
return self._urllib_api_call(
token=token,
url=api_url,
query_params={},
body_params=body_params,
files=files,
json_body=_json,
additional_headers=headers,
)
def _request_for_pagination(self, api_url, req_args) -> Dict[str, any]:
"""This method is supposed to be used only for SlackResponse pagination
You can paginate using Python's for iterator as below:
for response in client.conversations_list(limit=100):
# do something with each response here
"""
response = self._perform_urllib_http_request(url=api_url, args=req_args)
return {
"status_code": int(response["status"]),
"headers": dict(response["headers"]),
"data": json.loads(response["body"]),
}
def _urllib_api_call(
self,
*,
token: str = None,
url: str,
query_params: Dict[str, str] = {},
json_body: Dict = {},
body_params: Dict[str, str] = {},
files: Dict[str, io.BytesIO] = {},
additional_headers: Dict[str, str] = {},
) -> SlackResponse:
"""Performs a Slack API request and returns the result.
Args:
token: Slack API Token (either bot token or user token)
url: Complete URL (e.g., https://www.slack.com/api/chat.postMessage)
query_params: Query string
json_body: JSON data structure (it's still a dict at this point),
if you give this argument, body_params and files will be skipped
body_params: Form body params
files: Files to upload
additional_headers: Request headers to append
Returns:
API response
"""
files_to_close: List[BinaryIO] = []
try:
# True/False -> "1"/"0"
query_params = convert_bool_to_0_or_1(query_params)
body_params = convert_bool_to_0_or_1(body_params)
if self._logger.level <= logging.DEBUG:
def convert_params(values: dict) -> dict:
if not values or not isinstance(values, dict):
return {}
return {
k: ("(bytes)" if isinstance(v, bytes) else v)
for k, v in values.items()
}
headers = {
k: "(redacted)" if k.lower() == "authorization" else v
for k, v in additional_headers.items()
}
self._logger.debug(
f"Sending a request - url: {url}, "
f"query_params: {convert_params(query_params)}, "
f"body_params: {convert_params(body_params)}, "
f"files: {convert_params(files)}, "
f"json_body: {json_body}, "
f"headers: {headers}"
)
request_data = {}
if files is not None and isinstance(files, dict) and len(files) > 0:
if body_params:
for k, v in body_params.items():
request_data.update({k: v})
for k, v in files.items():
if isinstance(v, str):
f: BinaryIO = open(v.encode("utf-8", "ignore"), "rb")
files_to_close.append(f)
request_data.update({k: f})
elif isinstance(v, (bytearray, bytes)):
request_data.update({k: io.BytesIO(v)})
else:
request_data.update({k: v})
request_headers = self._build_urllib_request_headers(
token=token or self.token,
has_json=json is not None,
has_files=files is not None,
additional_headers=additional_headers,
)
request_args = {
"headers": request_headers,
"data": request_data,
"params": body_params,
"files": files,
"json": json_body,
}
if query_params:
q = urlencode(query_params)
url = f"{url}&{q}" if "?" in url else f"{url}?{q}"
response = self._perform_urllib_http_request(url=url, args=request_args)
body = response.get("body", None) # skipcq: PTC-W0039
response_body_data: Optional[Union[dict, bytes]] = body
if body is not None and not isinstance(body, bytes):
try:
response_body_data = json.loads(response["body"])
except json.decoder.JSONDecodeError:
message = _build_unexpected_body_error_message(
response.get("body", "")
)
raise err.SlackApiError(message, response)
if query_params:
all_params = copy.copy(body_params)
all_params.update(query_params)
else:
all_params = body_params
request_args["params"] = all_params # for backward-compatibility
return SlackResponse(
client=self,
http_verb="POST", # you can use POST method for all the Web APIs
api_url=url,
req_args=request_args,
data=response_body_data,
headers=dict(response["headers"]),
status_code=response["status"],
).validate()
finally:
for f in files_to_close:
if not f.closed:
f.close()
def _perform_urllib_http_request(
self, *, url: str, args: Dict[str, Dict[str, any]]
) -> Dict[str, any]:
"""Performs an HTTP request and parses the response.
Args:
url: Complete URL (e.g., https://www.slack.com/api/chat.postMessage)
args: args has "headers", "data", "params", and "json"
"headers": Dict[str, str]
"data": Dict[str, any]
"params": Dict[str, str],
"json": Dict[str, any],
Returns:
dict {status: int, headers: Headers, body: str}
"""
headers = args["headers"]
if args["json"]:
body = json.dumps(args["json"])
headers["Content-Type"] = "application/json;charset=utf-8"
elif args["data"]:
boundary = f"--------------{uuid.uuid4()}"
sep_boundary = b"\r\n--" + boundary.encode("ascii")
end_boundary = sep_boundary + b"--\r\n"
body = io.BytesIO()
data = args["data"]
for key, value in data.items():
readable = getattr(value, "readable", None)
if readable and value.readable():
filename = "Uploaded file"
name_attr = getattr(value, "name", None)
if name_attr:
filename = (
name_attr.decode("utf-8")
if isinstance(name_attr, bytes)
else name_attr
)
if "filename" in data:
filename = data["filename"]
mimetype = (
mimetypes.guess_type(filename)[0] or "application/octet-stream"
)
title = (
f'\r\nContent-Disposition: form-data; name="{key}"; filename="{filename}"\r\n'
+ f"Content-Type: {mimetype}\r\n"
)
value = value.read()
else:
title = f'\r\nContent-Disposition: form-data; name="{key}"\r\n'
value = str(value).encode("utf-8")
body.write(sep_boundary)
body.write(title.encode("utf-8"))
body.write(b"\r\n")
body.write(value)
body.write(end_boundary)
body = body.getvalue()
headers["Content-Type"] = f"multipart/form-data; boundary={boundary}"
headers["Content-Length"] = len(body)
elif args["params"]:
body = urlencode(args["params"])
headers["Content-Type"] = "application/x-www-form-urlencoded"
else:
body = None
if isinstance(body, str):
body = body.encode("utf-8")
# NOTE: Intentionally ignore the `http_verb` here
# Slack APIs accepts any API method requests with POST methods
try:
# urllib not only opens http:// or https:// URLs, but also ftp:// and file://.
# With this it might be possible to open local files on the executing machine
# which might be a security risk if the URL to open can be manipulated by an external user.
# (BAN-B310)
if url.lower().startswith("http"):
req = Request(method="POST", url=url, data=body, headers=headers)
opener: Optional[OpenerDirector] = None
if self.proxy is not None:
if isinstance(self.proxy, str):
opener = urllib.request.build_opener(
ProxyHandler({"http": self.proxy, "https": self.proxy}),
HTTPSHandler(context=self.ssl),
)
else:
raise SlackRequestError(
f"Invalid proxy detected: {self.proxy} must be a str value"
)
# NOTE: BAN-B310 is already checked above
resp: Optional[HTTPResponse] = None
if opener:
resp = opener.open(req, timeout=self.timeout) # skipcq: BAN-B310
else:
resp = urlopen( # skipcq: BAN-B310
req, context=self.ssl, timeout=self.timeout
)
if resp.headers.get_content_type() == "application/gzip":
# admin.analytics.getFile
body: bytes = resp.read()
return {"status": resp.code, "headers": resp.headers, "body": body}
charset = resp.headers.get_content_charset() or "utf-8"
body: str = resp.read().decode(charset) # read the response body here
return {"status": resp.code, "headers": resp.headers, "body": body}
raise SlackRequestError(f"Invalid URL detected: {url}")
except HTTPError as e:
resp = {"status": e.code, "headers": e.headers}
if e.code == 429:
# for compatibility with aiohttp
resp["headers"]["Retry-After"] = resp["headers"]["retry-after"]
# read the response body here
charset = e.headers.get_content_charset() or "utf-8"
body: str = e.read().decode(charset)
resp["body"] = body
return resp
except Exception as err:
self._logger.error(f"Failed to send a request to Slack API server: {err}")
raise err
def _build_urllib_request_headers(
self, token: str, has_json: bool, has_files: bool, additional_headers: dict
) -> Dict[str, str]:
headers = {"Content-Type": "application/x-www-form-urlencoded"}
headers.update(self.headers)
if token:
headers.update({"Authorization": "Bearer {}".format(token)})
if additional_headers:
headers.update(additional_headers)
if has_json:
headers.update({"Content-Type": "application/json;charset=utf-8"})
if has_files:
# will be set afterwards
headers.pop("Content-Type", None)
return headers
# =================================================================
@staticmethod
def validate_slack_signature(
*, signing_secret: str, data: str, timestamp: str, signature: str
) -> bool:
"""
Slack creates a unique string for your app and shares it with you. Verify
requests from Slack with confidence by verifying signatures using your
signing secret.
On each HTTP request that Slack sends, we add an X-Slack-Signature HTTP
header. The signature is created by combining the signing secret with the
body of the request we're sending using a standard HMAC-SHA256 keyed hash.
https://api.slack.com/docs/verifying-requests-from-slack#how_to_make_a_request_signature_in_4_easy_steps__an_overview
Args:
signing_secret: Your application's signing secret, available in the
Slack API dashboard
data: The raw body of the incoming request - no headers, just the body.
timestamp: from the 'X-Slack-Request-Timestamp' header
signature: from the 'X-Slack-Signature' header - the calculated signature
should match this.
Returns:
True if signatures matches
"""
warnings.warn(
"As this method is deprecated since slackclient 2.6.0, "
"use `from slack.signature import SignatureVerifier` instead",
DeprecationWarning,
)
format_req = str.encode(f"v0:{timestamp}:{data}")
encoded_secret = str.encode(signing_secret)
request_hash = hmac.new(encoded_secret, format_req, hashlib.sha256).hexdigest()
calculated_signature = f"v0={request_hash}"
return hmac.compare_digest(calculated_signature, signature)
| {
"repo_name": "slackhq/python-slackclient",
"path": "slack_sdk/web/base_client.py",
"copies": "1",
"size": "19918",
"license": "mit",
"hash": -5378926166902108000,
"line_mean": 39.4837398374,
"line_max": 125,
"alpha_frac": 0.5260066272,
"autogenerated": false,
"ratio": 4.353661202185792,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008098592330113612,
"num_lines": 492
} |
'''A Python module for reading and writing C3D files.'''
from __future__ import unicode_literals
import array
import io
import numpy as np
import struct
import warnings
import codecs
PROCESSOR_INTEL = 84
PROCESSOR_DEC = 85
PROCESSOR_MIPS = 86
class DataTypes(object):
''' Container defining different data types used for reading file data.
Data types depend on the processor format the file is stored in.
'''
def __init__(self, proc_type):
self.proc_type = proc_type
if proc_type == PROCESSOR_MIPS:
# Big-Endian (SGI/MIPS format)
self.float32 = np.dtype(np.float32).newbyteorder('>')
self.float64 = np.dtype(np.float64).newbyteorder('>')
self.uint8 = np.uint8
self.uint16 = np.dtype(np.uint16).newbyteorder('>')
self.uint32 = np.dtype(np.uint32).newbyteorder('>')
self.uint64 = np.dtype(np.uint64).newbyteorder('>')
self.int8 = np.int8
self.int16 = np.dtype(np.int16).newbyteorder('>')
self.int32 = np.dtype(np.int32).newbyteorder('>')
self.int64 = np.dtype(np.int64).newbyteorder('>')
else:
# Little-Endian format (Intel or DEC format)
self.float32 = np.float32
self.float64 = np.float64
self.uint8 = np.uint8
self.uint16 = np.uint16
self.uint32 = np.uint32
self.uint64 = np.uint64
self.int8 = np.int8
self.int16 = np.int16
self.int32 = np.int32
self.int64 = np.int64
@property
def is_ieee(self):
''' True if the associated file is in the Intel format.
'''
return self.proc_type == PROCESSOR_INTEL
@property
def is_dec(self):
''' True if the associated file is in the DEC format.
'''
return self.proc_type == PROCESSOR_DEC
@property
def is_mips(self):
''' True if the associated file is in the SGI/MIPS format.
'''
return self.proc_type == PROCESSOR_MIPS
def decode_string(self, bytes):
''' Decode a byte array to a string.
'''
# Attempt to decode using different decoders
decoders = ['utf-8', 'latin-1']
for dec in decoders:
try:
return codecs.decode(bytes, dec)
except UnicodeDecodeError:
continue
# Revert to using default decoder but replace characters
return codecs.decode(bytes, decoders[0], 'replace')
def UNPACK_FLOAT_IEEE(uint_32):
'''Unpacks a single 32 bit unsigned int to a IEEE float representation
'''
return struct.unpack('f', struct.pack("<I", uint_32))[0]
def UNPACK_FLOAT_MIPS(uint_32):
'''Unpacks a single 32 bit unsigned int to a IEEE float representation
'''
return struct.unpack('f', struct.pack(">I", uint_32))[0]
def DEC_to_IEEE(uint_32):
'''Convert the 32 bit representation of a DEC float to IEEE format.
Params:
----
uint_32 : 32 bit unsigned integer containing the DEC single precision float point bits.
Returns : IEEE formated floating point of the same shape as the input.
'''
# Follows the bit pattern found:
# http://home.fnal.gov/~yang/Notes/ieee_vs_dec_float.txt
# Further formating descriptions can be found:
# http://www.irig106.org/docs/106-07/appendixO.pdf
# In accodance with the first ref. first & second 16 bit words are placed
# in a big endian 16 bit word representation, and needs to be inverted.
# Second reference describe the DEC->IEEE conversion.
# Warning! Unsure if NaN numbers are managed appropriately.
# Shuffle the first two bit words from DEC bit representation to an ordered representation.
# Note that the most significant fraction bits are placed in the first 7 bits.
#
# Below are the DEC layout in accordance with the references:
# ___________________________________________________________________________________
# | Mantissa (16:0) | SIGN | Exponent (8:0) | Mantissa (23:17) |
# ___________________________________________________________________________________
# |32- -16| 15 |14- -7|6- -0|
#
# Legend:
# _______________________________________________________
# | Part (left bit of segment : right bit) | Part | ..
# _______________________________________________________
# |Bit adress - .. - Bit adress | Bit adress - ..
####
# Swap the first and last 16 bits for a consistent alignment of the fraction
reshuffled = ((uint_32 & 0xFFFF0000) >> 16) | ((uint_32 & 0x0000FFFF) << 16)
# After the shuffle each part are in little-endian and ordered as: SIGN-Exponent-Fraction
exp_bits = ((reshuffled & 0xFF000000) - 1) & 0xFF000000
reshuffled = (reshuffled & 0x00FFFFFF) | exp_bits
return UNPACK_FLOAT_IEEE(reshuffled)
def DEC_to_IEEE_BYTES(bytes):
'''Convert byte array containing 32 bit DEC floats to IEEE format.
Params:
----
bytes : Byte array where every 4 bytes represent a single precision DEC float.
Returns : IEEE formated floating point of the same shape as the input.
'''
# See comments in DEC_to_IEEE() for DEC format definition
# Reshuffle
bytes = np.frombuffer(bytes, dtype=np.dtype('B'))
reshuffled = np.empty(len(bytes), dtype=np.dtype('B'))
reshuffled[0::4] = bytes[2::4]
reshuffled[1::4] = bytes[3::4]
reshuffled[2::4] = bytes[0::4]
reshuffled[3::4] = bytes[1::4] + ((bytes[1::4] & 0x7f == 0) - 1) # Decrement exponent by 2, if exp. > 1
# There are different ways to adjust for differences in DEC/IEEE representation
# after reshuffle. Two simple methods are:
# 1) Decrement exponent bits by 2, then convert to IEEE.
# 2) Convert to IEEE directly and divide by four.
# 3) Handle edge cases, expensive in python...
# However these are simple methods, and do not accurately convert when:
# 1) Exponent < 2 (without bias), impossible to decrement exponent without adjusting fraction/mantissa.
# 2) Exponent == 0, DEC numbers are then 0 or undefined while IEEE is not. NaN are produced when exponent == 255.
# Here method 1) is used, which mean that only small numbers will be represented incorrectly.
return np.frombuffer(reshuffled.tobytes(),
dtype=np.float32,
count=int(len(bytes) / 4))
class Header(object):
'''Header information from a C3D file.
Attributes
----------
event_block : int
Index of the 512-byte block where labels (metadata) are found.
parameter_block : int
Index of the 512-byte block where parameters (metadata) are found.
data_block : int
Index of the 512-byte block where data starts.
point_count : int
Number of motion capture channels recorded in this file.
analog_count : int
Number of analog values recorded per frame of 3D point data.
first_frame : int
Index of the first frame of data.
last_frame : int
Index of the last frame of data.
analog_per_frame : int
Number of analog frames per frame of 3D point data. The analog frame
rate (ANALOG:RATE) apparently equals the point frame rate (POINT:RATE)
times this value.
frame_rate : float
The frame rate of the recording, in frames per second.
scale_factor : float
Multiply values in the file by this scale parameter.
long_event_labels : bool
max_gap : int
.. note::
The ``scale_factor`` attribute is not used in Phasespace C3D files;
instead, use the POINT.SCALE parameter.
.. note::
The ``first_frame`` and ``last_frame`` header attributes are not used in
C3D files generated by Phasespace. Instead, the first and last
frame numbers are stored in the POINTS:ACTUAL_START_FIELD and
POINTS:ACTUAL_END_FIELD parameters.
'''
# Read/Write header formats, read values as unsigned ints rather then floats.
BINARY_FORMAT_WRITE = '<BBHHHHHfHHf274sHHH164s44s'
BINARY_FORMAT_READ = '<BBHHHHHIHHI274sHHH164s44s'
BINARY_FORMAT_READ_BIG_ENDIAN = '>BBHHHHHIHHI274sHHH164s44s'
def __init__(self, handle=None):
'''Create a new Header object.
Parameters
----------
handle : file handle, optional
If given, initialize attributes for the Header from this file
handle. The handle must be seek-able and readable. If `handle` is
not given, Header attributes are initialized with default values.
'''
self.parameter_block = 2
self.data_block = 3
self.point_count = 50
self.analog_count = 0
self.first_frame = 1
self.last_frame = 1
self.analog_per_frame = 0
self.frame_rate = 60.0
self.max_gap = 0
self.scale_factor = -1.0
self.long_event_labels = False
self.event_count = 0
self.event_block = b''
self.event_timings = np.zeros(0, dtype=np.float32)
self.event_disp_flags = np.zeros(0, dtype=np.bool)
self.event_labels = []
if handle:
self.read(handle)
def write(self, handle):
'''Write binary header data to a file handle.
This method writes exactly 512 bytes to the beginning of the given file
handle.
Parameters
----------
handle : file handle
The given handle will be reset to 0 using `seek` and then 512 bytes
will be written to describe the parameters in this Header. The
handle must be writeable.
'''
handle.seek(0)
handle.write(struct.pack(self.BINARY_FORMAT_WRITE,
# Pack vars:
self.parameter_block,
0x50,
self.point_count,
self.analog_count,
self.first_frame,
self.last_frame,
self.max_gap,
self.scale_factor,
self.data_block,
self.analog_per_frame,
self.frame_rate,
b'',
self.long_event_labels and 0x3039 or 0x0, # If True write long_event_key else 0
self.event_count,
0x0,
self.event_block,
b''))
def __str__(self):
'''Return a string representation of this Header's attributes.'''
return '''\
parameter_block: {0.parameter_block}
point_count: {0.point_count}
analog_count: {0.analog_count}
first_frame: {0.first_frame}
last_frame: {0.last_frame}
max_gap: {0.max_gap}
scale_factor: {0.scale_factor}
data_block: {0.data_block}
analog_per_frame: {0.analog_per_frame}
frame_rate: {0.frame_rate}
long_event_labels: {0.long_event_labels}
event_block: {0.event_block}'''.format(self)
def read(self, handle, fmt=BINARY_FORMAT_READ):
'''Read and parse binary header data from a file handle.
This method reads exactly 512 bytes from the beginning of the given file
handle.
Parameters
----------
handle : file handle
The given handle will be reset to 0 using `seek` and then 512 bytes
will be read to initialize the attributes in this Header. The handle
must be readable.
fmt : Formating string used to read the header.
Raises
------
AssertionError
If the magic byte from the header is not 80 (the C3D magic value).
'''
handle.seek(0)
raw = handle.read(512)
(self.parameter_block,
magic,
self.point_count,
self.analog_count,
self.first_frame,
self.last_frame,
self.max_gap,
self.scale_factor,
self.data_block,
self.analog_per_frame,
self.frame_rate,
_,
self.long_event_labels,
self.event_count,
__,
self.event_block,
_) = struct.unpack(fmt, raw)
# Check magic number if reading in little endian
assert magic == 80, 'C3D magic {} != 80 !'.format(magic)
# Check long event key
self.long_event_labels = self.long_event_labels == 0x3039
def processor_convert(self, dtypes, handle):
''' Function interpreting the header once processor type has been determined.
'''
if dtypes.is_dec:
self.scale_factor = DEC_to_IEEE(self.scale_factor)
self.frame_rate = DEC_to_IEEE(self.frame_rate)
float_unpack = DEC_to_IEEE
elif dtypes.is_ieee:
self.scale_factor = UNPACK_FLOAT_IEEE(self.scale_factor)
self.frame_rate = UNPACK_FLOAT_IEEE(self.frame_rate)
float_unpack = UNPACK_FLOAT_IEEE
elif dtypes.is_mips:
# Re-read header in big-endian
self.read(handle, Header.BINARY_FORMAT_READ_BIG_ENDIAN)
# Then unpack
self.scale_factor = UNPACK_FLOAT_IEEE(self.scale_factor)
self.frame_rate = UNPACK_FLOAT_IEEE(self.frame_rate)
float_unpack = UNPACK_FLOAT_IEEE
self.interpret_events(dtypes, float_unpack)
def interpret_events(self, dtypes, float_unpack):
''' Function interpreting the event section of the header.
'''
# Event section byte blocks
time_bytes = self.event_block[:72]
disp_bytes = self.event_block[72:90]
label_bytes = self.event_block[92:]
if dtypes.is_mips:
unpack_fmt = '>I'
else:
unpack_fmt = '<I'
read_count = self.event_count
self.event_timings = np.zeros(read_count, dtype=np.float32)
self.event_disp_flags = np.zeros(read_count, dtype=np.bool)
self.event_labels = np.empty(read_count, dtype=object)
for i in range(read_count):
ilong = i*4
# Unpack
self.event_disp_flags[i] = disp_bytes[i] > 0
self.event_timings[i] = float_unpack(struct.unpack(unpack_fmt, time_bytes[ilong:ilong+4])[0])
self.event_labels[i] = dtypes.decode_string(label_bytes[ilong:ilong+4])
@property
def events(self):
''' Get an iterable over displayed events defined in the header. Iterable items are on form (timing, label).
Note*:
Time as defined by the 'timing' is relative to frame 1 and not the 'first_frame' parameter.
Frame 1 therefor has the time 0.0 in relation to the event timing.
'''
return zip(self.event_timings[self.event_disp_flags], self.event_labels[self.event_disp_flags])
class Param(object):
'''A class representing a single named parameter from a C3D file.
Attributes
----------
name : str
Name of this parameter.
dtype: DataTypes
Reference to the DataTypes object associated with the file.
desc : str
Brief description of this parameter.
bytes_per_element : int, optional
For array data, this describes the size of each element of data. For
string data (including arrays of strings), this should be -1.
dimensions : list of int
For array data, this describes the dimensions of the array, stored in
column-major order. For arrays of strings, the dimensions here will be
the number of columns (length of each string) followed by the number of
rows (number of strings).
bytes : str
Raw data for this parameter.
handle :
File handle positioned at the first byte of a .c3d parameter description.
'''
def __init__(self,
name,
dtype,
desc='',
bytes_per_element=1,
dimensions=None,
bytes=b'',
handle=None):
'''Set up a new parameter, only the name is required.'''
self.name = name
self.dtype = dtype
self.desc = desc
self.bytes_per_element = bytes_per_element
self.dimensions = dimensions or []
self.bytes = bytes
if handle:
self.read(handle)
def __repr__(self):
return '<Param: {}>'.format(self.desc)
@property
def num_elements(self):
'''Return the number of elements in this parameter's array value.'''
e = 1
for d in self.dimensions:
e *= d
return e
@property
def total_bytes(self):
'''Return the number of bytes used for storing this parameter's data.'''
return self.num_elements * abs(self.bytes_per_element)
def binary_size(self):
'''Return the number of bytes needed to store this parameter.'''
return (
1 + # group_id
2 + # next offset marker
1 + len(self.name.encode('utf-8')) + # size of name and name bytes
1 + # data size
# size of dimensions and dimension bytes
1 + len(self.dimensions) +
self.total_bytes + # data
1 + len(self.desc.encode('utf-8')) # size of desc and desc bytes
)
def write(self, group_id, handle):
'''Write binary data for this parameter to a file handle.
Parameters
----------
group_id : int
The numerical ID of the group that holds this parameter.
handle : file handle
An open, writable, binary file handle.
'''
name = self.name.encode('utf-8')
handle.write(struct.pack('bb', len(name), group_id))
handle.write(name)
handle.write(struct.pack('<h', self.binary_size() - 2 - len(name)))
handle.write(struct.pack('b', self.bytes_per_element))
handle.write(struct.pack('B', len(self.dimensions)))
handle.write(struct.pack('B' * len(self.dimensions), *self.dimensions))
if self.bytes:
handle.write(self.bytes)
desc = self.desc.encode('utf-8')
handle.write(struct.pack('B', len(desc)))
handle.write(desc)
def read(self, handle):
'''Read binary data for this parameter from a file handle.
This reads exactly enough data from the current position in the file to
initialize the parameter.
'''
self.bytes_per_element, = struct.unpack('b', handle.read(1))
dims, = struct.unpack('B', handle.read(1))
self.dimensions = [struct.unpack('B', handle.read(1))[
0] for _ in range(dims)]
self.bytes = b''
if self.total_bytes:
self.bytes = handle.read(self.total_bytes)
desc_size, = struct.unpack('B', handle.read(1))
self.desc = desc_size and self.dtype.decode_string(handle.read(desc_size)) or ''
def _as(self, dtype):
'''Unpack the raw bytes of this param using the given struct format.'''
return np.frombuffer(self.bytes, count=1, dtype=dtype)[0]
def _as_array(self, dtype):
'''Unpack the raw bytes of this param using the given data format.'''
assert self.dimensions, \
'{}: cannot get value as {} array!'.format(self.name, dtype)
elems = np.frombuffer(self.bytes, dtype=dtype)
# Reverse shape as the shape is defined in fortran format
return elems.reshape(self.dimensions[::-1])
def _as_any(self, dtype):
'''Unpack the raw bytes of this param as either array or single value.'''
if 0 in self.dimensions[:]: # Check if any dimension is 0 (empty buffer)
return [] # Buffer is empty
if len(self.dimensions) == 0: # Parse data as a single value
if dtype == np.float32: # Floats need to be parsed separately!
return self.float_value
return self._as(dtype)
else: # Parse data as array
if dtype == np.float32:
data = self.float_array
else:
data = self._as_array(dtype)
if len(self.dimensions) < 2: # Check if data is contained in a single dimension
return data.flatten()
return data
@property
def _as_integer_value(self):
''' Get the param as either 32 bit float or unsigned integer.
Evaluates if an integer is stored as a floating point representation.
Note: This is implemented purely for parsing start/end frames.
'''
if self.total_bytes >= 4:
# Check if float value representation is an integer
value = self.float_value
if int(value) == value:
return value
return self.uint32_value
elif self.total_bytes >= 2:
return self.uint16_value
else:
return self.uint8_value
@property
def int8_value(self):
'''Get the param as an 8-bit signed integer.'''
return self._as(self.dtype.int8)
@property
def uint8_value(self):
'''Get the param as an 8-bit unsigned integer.'''
return self._as(self.dtype.uint8)
@property
def int16_value(self):
'''Get the param as a 16-bit signed integer.'''
return self._as(self.dtype.int16)
@property
def uint16_value(self):
'''Get the param as a 16-bit unsigned integer.'''
return self._as(self.dtype.uint16)
@property
def int32_value(self):
'''Get the param as a 32-bit signed integer.'''
return self._as(self.dtype.int32)
@property
def uint32_value(self):
'''Get the param as a 32-bit unsigned integer.'''
return self._as(self.dtype.uint32)
@property
def float_value(self):
'''Get the param as a 32-bit float.'''
if self.dtype.is_dec:
return DEC_to_IEEE(self._as(np.uint32))
else: # is_mips or is_ieee
return self._as(self.dtype.float32)
@property
def bytes_value(self):
'''Get the param as a raw byte string.'''
return self.bytes
@property
def string_value(self):
'''Get the param as a unicode string.'''
return self.dtype.decode_string(self.bytes)
@property
def int8_array(self):
'''Get the param as an array of 8-bit signed integers.'''
return self._as_array(self.dtype.int8)
@property
def uint8_array(self):
'''Get the param as an array of 8-bit unsigned integers.'''
return self._as_array(self.dtype.uint8)
@property
def int16_array(self):
'''Get the param as an array of 16-bit signed integers.'''
return self._as_array(self.dtype.int16)
@property
def uint16_array(self):
'''Get the param as an array of 16-bit unsigned integers.'''
return self._as_array(self.dtype.uint16)
@property
def int32_array(self):
'''Get the param as an array of 32-bit signed integers.'''
return self._as_array(self.dtype.int32)
@property
def uint32_array(self):
'''Get the param as an array of 32-bit unsigned integers.'''
return self._as_array(self.dtype.uint32)
@property
def float_array(self):
'''Get the param as an array of 32-bit floats.'''
# Convert float data if not IEEE processor
if self.dtype.is_dec:
# _as_array but for DEC
assert self.dimensions, \
'{}: cannot get value as {} array!'.format(self.name, self.dtype.float32)
return DEC_to_IEEE_BYTES(self.bytes).reshape(self.dimensions[::-1]) # Reverse fortran format
else: # is_ieee or is_mips
return self._as_array(self.dtype.float32)
@property
def bytes_array(self):
'''Get the param as an array of raw byte strings.'''
# Decode different dimensions
if len(self.dimensions) == 0:
return np.array([])
elif len(self.dimensions) == 1:
return np.array(self.bytes)
else:
# Convert Fortran shape (data in memory is identical, shape is transposed)
word_len = self.dimensions[0]
dims = self.dimensions[1:][::-1] # Identical to: [:0:-1]
byte_steps = np.cumprod(self.dimensions[:-1])[::-1]
# Generate mult-dimensional array and parse byte words
byte_arr = np.empty(dims, dtype=object)
for i in np.ndindex(*dims):
# Calculate byte offset as sum of each array index times the byte step of each dimension.
off = np.sum(np.multiply(i, byte_steps))
byte_arr[i] = self.bytes[off:off+word_len]
return byte_arr
@property
def string_array(self):
'''Get the param as a python array of unicode strings.'''
# Decode different dimensions
if len(self.dimensions) == 0:
return np.array([])
elif len(self.dimensions) == 1:
return np.array([self.string_value])
else:
# Parse byte sequences
byte_arr = self.bytes_array
# Decode sequences
for i in np.ndindex(byte_arr.shape):
byte_arr[i] = self.dtype.decode_string(byte_arr[i])
return byte_arr
class Group(object):
'''A group of parameters from a C3D file.
In C3D files, parameters are organized in groups. Each group has a name, a
description, and a set of named parameters.
Attributes
----------
name : str
Name of this parameter group.
desc : str
Description for this parameter group.
'''
def __init__(self, name=None, desc=None):
self.name = name
self.desc = desc
self.params = {}
def __repr__(self):
return '<Group: {}>'.format(self.desc)
def get(self, key, default=None):
'''Get a parameter by key.
Parameters
----------
key : any
Parameter key to look up in this group.
default : any, optional
Value to return if the key is not found. Defaults to None.
Returns
-------
param : :class:`Param`
A parameter from the current group.
'''
return self.params.get(key, default)
def add_param(self, name, dtypes, **kwargs):
'''Add a parameter to this group.
Parameters
----------
name : str
Name of the parameter to add to this group. The name will
automatically be case-normalized.
dtypes : DataTypes
Object struct containing the data types used for reading parameter data.
Additional keyword arguments will be passed to the `Param` constructor.
'''
self.params[name.upper()] = Param(name.upper(), dtypes, **kwargs)
def binary_size(self):
'''Return the number of bytes to store this group and its parameters.'''
return (
1 + # group_id
1 + len(self.name.encode('utf-8')) + # size of name and name bytes
2 + # next offset marker
1 + len(self.desc.encode('utf-8')) + # size of desc and desc bytes
sum(p.binary_size() for p in self.params.values()))
def write(self, group_id, handle):
'''Write this parameter group, with parameters, to a file handle.
Parameters
----------
group_id : int
The numerical ID of the group.
handle : file handle
An open, writable, binary file handle.
'''
name = self.name.encode('utf-8')
desc = self.desc.encode('utf-8')
handle.write(struct.pack('bb', len(name), -group_id))
handle.write(name)
handle.write(struct.pack('<h', 3 + len(desc)))
handle.write(struct.pack('B', len(desc)))
handle.write(desc)
for param in self.params.values():
param.write(group_id, handle)
def get_int8(self, key):
'''Get the value of the given parameter as an 8-bit signed integer.'''
return self.params[key.upper()].int8_value
def get_uint8(self, key):
'''Get the value of the given parameter as an 8-bit unsigned integer.'''
return self.params[key.upper()].uint8_value
def get_int16(self, key):
'''Get the value of the given parameter as a 16-bit signed integer.'''
return self.params[key.upper()].int16_value
def get_uint16(self, key):
'''Get the value of the given parameter as a 16-bit unsigned integer.'''
return self.params[key.upper()].uint16_value
def get_int32(self, key):
'''Get the value of the given parameter as a 32-bit signed integer.'''
return self.params[key.upper()].int32_value
def get_uint32(self, key):
'''Get the value of the given parameter as a 32-bit unsigned integer.'''
return self.params[key.upper()].uint32_value
def get_float(self, key):
'''Get the value of the given parameter as a 32-bit float.'''
return self.params[key.upper()].float_value
def get_bytes(self, key):
'''Get the value of the given parameter as a byte array.'''
return self.params[key.upper()].bytes_value
def get_string(self, key):
'''Get the value of the given parameter as a string.'''
return self.params[key.upper()].string_value
class Manager(object):
'''A base class for managing C3D file metadata.
This class manages a C3D header (which contains some stock metadata fields)
as well as a set of parameter groups. Each group is accessible using its
name.
Attributes
----------
header : `Header`
Header information for the C3D file.
'''
def __init__(self, header=None):
'''Set up a new Manager with a Header.'''
self.header = header or Header()
self.groups = {}
def check_metadata(self):
'''Ensure that the metadata in our file is self-consistent.'''
assert self.header.point_count == self.point_used, (
'inconsistent point count! {} header != {} POINT:USED'.format(
self.header.point_count,
self.point_used,
))
assert self.header.scale_factor == self.point_scale, (
'inconsistent scale factor! {} header != {} POINT:SCALE'.format(
self.header.scale_factor,
self.point_scale,
))
assert self.header.frame_rate == self.point_rate, (
'inconsistent frame rate! {} header != {} POINT:RATE'.format(
self.header.frame_rate,
self.point_rate,
))
if self.point_rate:
ratio = self.analog_rate / self.point_rate
else:
ratio = 0
assert self.header.analog_per_frame == ratio, (
'inconsistent analog rate! {} header != {} analog-fps / {} point-fps'.format(
self.header.analog_per_frame,
self.analog_rate,
self.point_rate,
))
count = self.analog_used * self.header.analog_per_frame
assert self.header.analog_count == count, (
'inconsistent analog count! {} header != {} analog used * {} per-frame'.format(
self.header.analog_count,
self.analog_used,
self.header.analog_per_frame,
))
try:
start = self.get_uint16('POINT:DATA_START')
if self.header.data_block != start:
warnings.warn('inconsistent data block! {} header != {} POINT:DATA_START'.format(
self.header.data_block, start))
except AttributeError:
warnings.warn('''no pointer available in POINT:DATA_START indicating the start of the data block, using
header pointer as fallback''')
def check_parameters(params):
for name in params:
if self.get(name) is None:
warnings.warn('missing parameter {}'.format(name))
if self.point_used > 0:
check_parameters(('POINT:LABELS', 'POINT:DESCRIPTIONS'))
else:
warnings.warn('No point data found in file.')
if self.analog_used > 0:
check_parameters(('ANALOG:LABELS', 'ANALOG:DESCRIPTIONS'))
else:
warnings.warn('No analog data found in file.')
def add_group(self, group_id, name, desc):
'''Add a new parameter group.
Parameters
----------
group_id : int
The numeric ID for a group to check or create.
name : str, optional
If a group is created, assign this name to the group.
desc : str, optional
If a group is created, assign this description to the group.
Returns
-------
group : :class:`Group`
A group with the given ID, name, and description.
Raises
------
KeyError
If a group with a duplicate ID or name already exists.
'''
if group_id in self.groups:
raise KeyError(group_id)
name = name.upper()
if name in self.groups:
raise KeyError(name)
group = self.groups[name] = self.groups[group_id] = Group(name, desc)
return group
def get(self, group, default=None):
'''Get a group or parameter.
Parameters
----------
group : str
If this string contains a period (.), then the part before the
period will be used to retrieve a group, and the part after the
period will be used to retrieve a parameter from that group. If this
string does not contain a period, then just a group will be
returned.
default : any
Return this value if the named group and parameter are not found.
Returns
-------
value : :class:`Group` or :class:`Param`
Either a group or parameter with the specified name(s). If neither
is found, returns the default value.
'''
if isinstance(group, int):
return self.groups.get(group, default)
group = group.upper()
param = None
if '.' in group:
group, param = group.split('.', 1)
if ':' in group:
group, param = group.split(':', 1)
if group not in self.groups:
return default
group = self.groups[group]
if param is not None:
return group.get(param, default)
return group
def get_int8(self, key):
'''Get a parameter value as an 8-bit signed integer.'''
return self.get(key).int8_value
def get_uint8(self, key):
'''Get a parameter value as an 8-bit unsigned integer.'''
return self.get(key).uint8_value
def get_int16(self, key):
'''Get a parameter value as a 16-bit signed integer.'''
return self.get(key).int16_value
def get_uint16(self, key):
'''Get a parameter value as a 16-bit unsigned integer.'''
return self.get(key).uint16_value
def get_int32(self, key):
'''Get a parameter value as a 32-bit signed integer.'''
return self.get(key).int32_value
def get_uint32(self, key):
'''Get a parameter value as a 32-bit unsigned integer.'''
return self.get(key).uint32_value
def get_float(self, key):
'''Get a parameter value as a 32-bit float.'''
return self.get(key).float_value
def get_bytes(self, key):
'''Get a parameter value as a byte string.'''
return self.get(key).bytes_value
def get_string(self, key):
'''Get a parameter value as a string.'''
return self.get(key).string_value
def parameter_blocks(self):
'''Compute the size (in 512B blocks) of the parameter section.'''
bytes = 4. + sum(g.binary_size() for g in self.groups.values())
return int(np.ceil(bytes / 512))
@property
def point_rate(self):
''' Number of sampled 3D coordinates per second.
'''
try:
return self.get_float('POINT:RATE')
except AttributeError:
return self.header.frame_rate
@property
def point_scale(self):
try:
return self.get_float('POINT:SCALE')
except AttributeError:
return self.header.scale_factor
@property
def point_used(self):
''' Number of sampled 3D point coordinates per frame.
'''
try:
return self.get_uint16('POINT:USED')
except AttributeError:
return self.header.point_count
@property
def analog_used(self):
''' Number of analog measurements, or channels, for each analog data sample.
'''
try:
return self.get_uint16('ANALOG:USED')
except AttributeError:
return self.header.analog_count
@property
def analog_rate(self):
''' Number of analog data samples per second.
'''
try:
return self.get_float('ANALOG:RATE')
except AttributeError:
return self.header.analog_per_frame * self.point_rate
@property
def analog_per_frame(self):
''' Number of analog samples per 3D frame (point sample).
'''
return int(self.analog_rate / self.point_rate)
@property
def analog_sample_count(self):
''' Number of analog samples per channel.
'''
has_analog = self.analog_used > 0
return int(self.frame_count * self.analog_per_frame) * has_analog
@property
def point_labels(self):
return self.get('POINT:LABELS').string_array
@property
def analog_labels(self):
return self.get('ANALOG:LABELS').string_array
@property
def frame_count(self):
return self.last_frame - self.first_frame + 1 # Add 1 since range is inclusive [first, last]
@property
def first_frame(self):
# Start frame seems to be less of an issue to determine.
# this is a hack for phasespace files ... should put it in a subclass.
param = self.get('TRIAL:ACTUAL_START_FIELD')
if param is not None:
return param.uint32_value
return self.header.first_frame
@property
def last_frame(self):
# Number of frames can be represented in many formats, first check if valid header values
if self.header.first_frame < self.header.last_frame and self.header.last_frame != 65535:
return self.header.last_frame
# Check different parameter options where the frame can be encoded
end_frame = [self.header.last_frame, 0.0, 0.0, 0.0]
param = self.get('TRIAL:ACTUAL_END_FIELD')
if param is not None:
end_frame[1] = param._as_integer_value
param = self.get('POINT:LONG_FRAMES')
if param is not None:
end_frame[2] = param._as_integer_value
param = self.get('POINT:FRAMES')
if param is not None:
# Can be encoded either as 32 bit float or 16 bit uint
end_frame[3] = param._as_integer_value
# Return the largest of the all (queue bad reading...)
return int(np.max(end_frame))
class Reader(Manager):
'''This class provides methods for reading the data in a C3D file.
A C3D file contains metadata and frame-based data describing 3D motion.
You can iterate over the frames in the file by calling `read_frames()` after
construction:
>>> r = c3d.Reader(open('capture.c3d', 'rb'))
>>> for frame_no, points, analog in r.read_frames():
... print('{0.shape} points in this frame'.format(points))
'''
def __init__(self, handle):
'''Initialize this C3D file by reading header and parameter data.
Parameters
----------
handle : file handle
Read metadata and C3D motion frames from the given file handle. This
handle is assumed to be `seek`-able and `read`-able. The handle must
remain open for the life of the `Reader` instance. The `Reader` does
not `close` the handle.
Raises
------
ValueError
If the processor metadata in the C3D file is anything other than 84
(Intel format).
'''
super(Reader, self).__init__(Header(handle))
self._handle = handle
def seek_param_section_header():
''' Seek to and read the first 4 byte of the parameter header section '''
self._handle.seek((self.header.parameter_block - 1) * 512)
# metadata header
return self._handle.read(4)
# Begin by reading the processor type:
buf = seek_param_section_header()
_, _, parameter_blocks, self.processor = struct.unpack('BBBB', buf)
self.dtypes = DataTypes(self.processor)
# Convert header parameters in accordance with the processor type (MIPS format re-reads the header)
self.header.processor_convert(self.dtypes, handle)
# Restart reading the parameter header after parsing processor type
buf = seek_param_section_header()
is_mips = self.processor == PROCESSOR_MIPS
start_byte = self._handle.tell()
endbyte = start_byte + 512 * parameter_blocks - 4
while self._handle.tell() < endbyte:
chars_in_name, group_id = struct.unpack('bb', self._handle.read(2))
if group_id == 0 or chars_in_name == 0:
# we've reached the end of the parameter section.
break
name = self.dtypes.decode_string(self._handle.read(abs(chars_in_name))).upper()
# Read the byte segment associated with the parameter and create a
# separate binary stream object from the data.
offset_to_next, = struct.unpack(['<h', '>h'][is_mips], self._handle.read(2))
if offset_to_next == 0:
# Last parameter, as number of bytes are unknown,
# read the remaining bytes in the parameter section.
bytes = self._handle.read(endbyte - self._handle.tell())
else:
bytes = self._handle.read(offset_to_next - 2)
buf = io.BytesIO(bytes)
if group_id > 0:
# we've just started reading a parameter. if its group doesn't
# exist, create a blank one. add the parameter to the group.
self.groups.setdefault(
group_id, Group()).add_param(name, self.dtypes, handle=buf)
else:
# we've just started reading a group. if a group with the
# appropriate id exists already (because we've already created
# it for a parameter), just set the name of the group.
# otherwise, add a new group.
group_id = abs(group_id)
size, = struct.unpack('B', buf.read(1))
desc = size and buf.read(size) or ''
group = self.get(group_id)
if group is not None:
group.name = name
group.desc = desc
self.groups[name] = group
else:
self.add_group(group_id, name, desc)
self.check_metadata()
def read_frames(self, copy=True):
'''Iterate over the data frames from our C3D file handle.
Parameters
----------
copy : bool
If False, the reader returns a reference to the same data buffers
for every frame. The default is True, which causes the reader to
return a unique data buffer for each frame. Set this to False if you
consume frames as you iterate over them, or True if you store them
for later.
Returns
-------
frames : sequence of (frame number, points, analog)
This method generates a sequence of (frame number, points, analog)
tuples, one tuple per frame. The first element of each tuple is the
frame number. The second is a numpy array of parsed, 5D point data
and the third element of each tuple is a numpy array of analog
values that were recorded during the frame. (Often the analog data
are sampled at a higher frequency than the 3D point data, resulting
in multiple analog frames per frame of point data.)
The first three columns in the returned point data are the (x, y, z)
coordinates of the observed motion capture point. The fourth column
is an estimate of the error for this particular point, and the fifth
column is the number of cameras that observed the point in question.
Both the fourth and fifth values are -1 if the point is considered
to be invalid.
'''
# Point magnitude scalar, if scale parameter is < 0 data is floating point
# (in which case the magnitude is the absolute value)
scale_mag = abs(self.point_scale)
is_float = self.point_scale < 0
if is_float:
point_word_bytes = 4
point_dtype = self.dtypes.uint32
else:
point_word_bytes = 2
point_dtype = self.dtypes.int16
points = np.zeros((self.point_used, 5), np.float32)
# TODO: handle ANALOG:BITS parameter here!
p = self.get('ANALOG:FORMAT')
analog_unsigned = p and p.string_value.strip().upper() == 'UNSIGNED'
if is_float:
analog_dtype = self.dtypes.float32
analog_word_bytes = 4
elif analog_unsigned:
# Note*: Floating point is 'always' defined for both analog and point data, according to the standard.
analog_dtype = self.dtypes.uint16
analog_word_bytes = 2
# Verify BITS parameter for analog
p = self.get('ANALOG:BITS')
if p and p._as_integer_value / 8 != analog_word_bytes:
raise NotImplementedError('Analog data using {} bits is not supported.'.format(p._as_integer_value))
else:
analog_dtype = self.dtypes.int16
analog_word_bytes = 2
analog = np.array([], float)
offsets = np.zeros((self.analog_used, 1), int)
param = self.get('ANALOG:OFFSET')
if param is not None:
offsets = param.int16_array[:self.analog_used, None]
analog_scales = np.ones((self.analog_used, 1), float)
param = self.get('ANALOG:SCALE')
if param is not None:
analog_scales[:, :] = param.float_array[:self.analog_used, None]
gen_scale = 1.
param = self.get('ANALOG:GEN_SCALE')
if param is not None:
gen_scale = param.float_value
# Seek to the start point of the data blocks
self._handle.seek((self.header.data_block - 1) * 512)
# Number of values (words) read in regard to POINT/ANALOG data
N_point = 4 * self.point_used
N_analog = self.analog_used * self.analog_per_frame
# Total bytes per frame
point_bytes = N_point * point_word_bytes
analog_bytes = N_analog * analog_word_bytes
# Parse the data blocks
for frame_no in range(self.first_frame, self.last_frame + 1):
# Read the byte data (used) for the block
raw_bytes = self._handle.read(N_point * point_word_bytes)
raw_analog = self._handle.read(N_analog * analog_word_bytes)
# Verify read pointers (any of the two can be assumed to be 0)
if len(raw_bytes) < point_bytes:
warnings.warn('''reached end of file (EOF) while reading POINT data at frame index {}
and file pointer {}!'''.format(frame_no - self.first_frame, self._handle.tell()))
return
if len(raw_analog) < analog_bytes:
warnings.warn('''reached end of file (EOF) while reading POINT data at frame index {}
and file pointer {}!'''.format(frame_no - self.first_frame, self._handle.tell()))
return
if is_float:
# Convert every 4 byte words to a float-32 reprensentation
# (the fourth column is still not a float32 representation)
if self.processor == PROCESSOR_DEC:
# Convert each of the first 6 16-bit words from DEC to IEEE float
points[:, :4] = DEC_to_IEEE_BYTES(raw_bytes).reshape((self.point_used, 4))
else: # If IEEE or MIPS:
# Re-read the raw byte representation directly
points[:, :4] = np.frombuffer(raw_bytes,
dtype=self.dtypes.float32,
count=N_point).reshape((int(self.point_used), 4))
# Parse the camera-observed bits and residuals.
# Notes:
# - Invalid sample if residual is equal to -1.
# - A residual of 0.0 represent modeled data (filtered or interpolated).
# - The same format should be used internally when a float or integer representation is used,
# with the difference that the words are 16 and 8 bit respectively (see the MLS guide).
# - While words are 16 bit, residual and camera mask is always interpreted as 8 packed in a single word!
# - 16 or 32 bit may represent a sign (indication that certain files write a -1 floating point only)
last_word = points[:, 3].astype(np.int32)
valid = (last_word & 0x80008000) == 0
points[~valid, 3:5] = -1.0
c = last_word[valid]
else:
# Convert the bytes to a unsigned 32 bit or signed 16 bit representation
raw = np.frombuffer(raw_bytes,
dtype=point_dtype,
count=N_point).reshape((self.point_used, 4))
# Read point 2 byte words in int-16 format
points[:, :3] = raw[:, :3] * scale_mag
# Parse last 16-bit word as two 8-bit words
valid = raw[:, 3] > -1
points[~valid, 3:5] = -1
c = raw[valid, 3].astype(self.dtypes.uint16)
# Convert coordinate data
# fourth value is floating-point (scaled) error estimate (residual)
points[valid, 3] = (c & 0xff).astype(np.float32) * scale_mag
# fifth value is number of bits set in camera-observation byte
points[valid, 4] = sum((c & (1 << k)) >> k for k in range(8, 15))
# Get value as is: points[valid, 4] = (c >> 8)
# Check if analog data exist, and parse if so
if N_analog > 0:
if is_float and self.processor == PROCESSOR_DEC:
# Convert each of the 16-bit words from DEC to IEEE float
analog = DEC_to_IEEE_BYTES(raw_analog)
else:
# Integer or INTEL/MIPS floating point data can be parsed directly
analog = np.frombuffer(raw_analog, dtype=analog_dtype, count=N_analog)
# Reformat and convert
analog = analog.reshape((-1, self.analog_used)).T
analog = analog.astype(float)
# Convert analog
analog = (analog - offsets) * analog_scales * gen_scale
# Output buffers
if copy:
yield frame_no, points.copy(), analog # .copy(), a new array is generated per frame for analog data.
else:
yield frame_no, points, analog
# Function evaluating EOF, note that data section is written in blocks of 512
final_byte_index = self._handle.tell()
self._handle.seek(0, 2) # os.SEEK_END)
# Check if more then 1 block remain
if self._handle.tell() - final_byte_index >= 512:
warnings.warn('incomplete reading of data blocks. {} bytes remained after all datablocks were read!'.format(
self._handle.tell() - final_byte_index))
@property
def proc_type(self):
"""
Get the processory type associated with the data format in the file.
"""
processor_type = ['PROCESSOR_INTEL', 'PROCESSOR_DEC', 'PROCESSOR_MIPS']
return processor_type[self.processor-PROCESSOR_INTEL]
class Writer(Manager):
'''This class writes metadata and frames to a C3D file.
For example, to read an existing C3D file, apply some sort of data
processing to the frames, and write out another C3D file::
>>> r = c3d.Reader(open('data.c3d', 'rb'))
>>> w = c3d.Writer()
>>> w.add_frames(process_frames_somehow(r.read_frames()))
>>> with open('smoothed.c3d', 'wb') as handle:
>>> w.write(handle)
Parameters
----------
point_rate : float, optional
The frame rate of the data. Defaults to 480.
analog_rate : float, optional
The number of analog samples per frame. Defaults to 0.
point_scale : float, optional
The scale factor for point data. Defaults to -1 (i.e., "check the
POINT:SCALE parameter").
point_units : str, optional
The units that the point numbers represent. Defaults to ``'mm '``.
gen_scale : float, optional
General scaling factor for data. Defaults to 1.
'''
def __init__(self,
point_rate=480.,
analog_rate=0.,
point_scale=-1.,
point_units='mm ',
gen_scale=1.):
'''Set metadata for this writer.
'''
super(Writer, self).__init__()
self._point_rate = point_rate
self._analog_rate = analog_rate
self._analog_per_frame = analog_rate / point_rate
self._point_scale = point_scale
self._point_units = point_units
self._gen_scale = gen_scale
self._frames = []
def add_frames(self, frames):
'''Add frames to this writer instance.
Parameters
----------
frames : sequence of (point, analog) tuples
A sequence of frame data to add to the writer.
'''
self._frames.extend(frames)
def _pad_block(self, handle):
'''Pad the file with 0s to the end of the next block boundary.'''
extra = handle.tell() % 512
if extra:
handle.write(b'\x00' * (512 - extra))
def _write_metadata(self, handle):
'''Write metadata to a file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle.
'''
self.check_metadata()
# header
self.header.write(handle)
self._pad_block(handle)
assert handle.tell() == 512
# groups
handle.write(struct.pack(
'BBBB', 0, 0, self.parameter_blocks(), PROCESSOR_INTEL))
id_groups = sorted(
(i, g) for i, g in self.groups.items() if isinstance(i, int))
for group_id, group in id_groups:
group.write(group_id, handle)
# padding
self._pad_block(handle)
while handle.tell() != 512 * (self.header.data_block - 1):
handle.write(b'\x00' * 512)
def _write_frames(self, handle):
'''Write our frame data to the given file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle.
'''
assert handle.tell() == 512 * (self.header.data_block - 1)
scale = abs(self.point_scale)
is_float = self.point_scale < 0
if is_float:
point_dtype = np.float32
point_format = 'f'
point_scale = 1.0
else:
point_dtype = np.int16
point_format = 'i'
point_scale = scale
raw = np.empty((self.point_used, 4), point_dtype)
for points, analog in self._frames:
valid = points[:, 3] > -1
raw[~valid, 3] = -1
raw[valid, :3] = points[valid, :3] / point_scale
raw[valid, 3] = (
((points[valid, 4]).astype(np.uint8) << 8) |
(points[valid, 3] / scale).astype(np.uint16)
)
point = array.array(point_format)
point.extend(raw.flatten())
point.tofile(handle)
analog = array.array(point_format)
analog.extend(analog)
analog.tofile(handle)
self._pad_block(handle)
def write(self, handle, labels):
'''Write metadata and point + analog frames to a file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle.
'''
if not self._frames:
return
dtypes = DataTypes(PROCESSOR_INTEL)
def add(name, desc, bpe, format, bytes, *dimensions):
group.add_param(name,
dtypes,
desc=desc,
bytes_per_element=bpe,
bytes=struct.pack(format, bytes),
dimensions=list(dimensions))
def add_str(name, desc, bytes, *dimensions):
group.add_param(name,
dtypes,
desc=desc,
bytes_per_element=-1,
bytes=bytes.encode('utf-8'),
dimensions=list(dimensions))
def add_empty_array(name, desc, bpe):
group.add_param(name, dtypes, desc=desc,
bytes_per_element=bpe, dimensions=[0])
points, analog = self._frames[0]
ppf = len(points)
labels = np.ravel(labels)
# POINT group
# Get longest label name
label_max_size = 0
label_max_size = max(label_max_size, np.max([len(label) for label in labels]))
group = self.add_group(1, 'POINT', 'POINT group')
add('USED', 'Number of 3d markers', 2, '<H', ppf)
add('FRAMES', 'frame count', 2, '<H', min(65535, len(self._frames)))
add('DATA_START', 'data block number', 2, '<H', 0)
add('SCALE', '3d scale factor', 4, '<f', np.float32(self._point_scale))
add('RATE', '3d data capture rate', 4, '<f', np.float32(self._point_rate))
add_str('X_SCREEN', 'X_SCREEN parameter', '+X', 2)
add_str('Y_SCREEN', 'Y_SCREEN parameter', '+Y', 2)
add_str('UNITS', '3d data units',
self._point_units, len(self._point_units))
add_str('LABELS', 'labels', ''.join(labels[i].ljust(label_max_size)
for i in range(ppf)), label_max_size, ppf)
add_str('DESCRIPTIONS', 'descriptions', ' ' * 16 * ppf, 16, ppf)
# ANALOG group
group = self.add_group(2, 'ANALOG', 'ANALOG group')
add('USED', 'analog channel count', 2, '<H', analog.shape[0])
add('RATE', 'analog samples per second', 4, '<f', np.float32(self._analog_rate))
add('GEN_SCALE', 'analog general scale factor', 4, '<f', np.float32(self._gen_scale))
add_empty_array('SCALE', 'analog channel scale factors', 4)
add_empty_array('OFFSET', 'analog channel offsets', 2)
# TRIAL group
group = self.add_group(3, 'TRIAL', 'TRIAL group')
add('ACTUAL_START_FIELD', 'actual start frame', 2, '<I', 1, 2)
add('ACTUAL_END_FIELD', 'actual end frame', 2, '<I', len(self._frames), 2)
# sync parameter information to header.
blocks = self.parameter_blocks()
self.get('POINT:DATA_START').bytes = struct.pack('<H', 2 + blocks)
self.header.data_block = np.uint16(2 + blocks)
self.header.frame_rate = np.float32(self._point_rate)
self.header.last_frame = np.uint16(min(len(self._frames), 65535))
self.header.point_count = np.uint16(ppf)
self.header.analog_count = np.uint16(np.prod(analog.shape))
self.header.analog_per_frame = np.uint16(self._analog_per_frame)
self.header.scale_factor = np.float32(self._point_scale)
self._write_metadata(handle)
self._write_frames(handle)
| {
"repo_name": "EmbodiedCognition/py-c3d",
"path": "c3d.py",
"copies": "1",
"size": "60917",
"license": "mit",
"hash": 7908974567071348000,
"line_mean": 37.4334384858,
"line_max": 120,
"alpha_frac": 0.5713019354,
"autogenerated": false,
"ratio": 4.102707435344827,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5174009370744828,
"avg_score": null,
"num_lines": null
} |
'''A Python module for reading and writing C3D files.'''
from __future__ import unicode_literals
import array
import io
import numpy as np
import struct
import warnings
PROCESSOR_INTEL = 84
PROCESSOR_DEC = 85
PROCESSOR_MIPS = 86
class Header(object):
'''Header information from a C3D file.
Attributes
----------
label_block : int
Index of the 512-byte block where labels (metadata) are found.
parameter_block : int
Index of the 512-byte block where parameters (metadata) are found.
data_block : int
Index of the 512-byte block where data starts.
point_count : int
Number of motion capture channels recorded in this file.
analog_count : int
Number of analog values recorded per frame of 3D point data.
first_frame : int
Index of the first frame of data.
last_frame : int
Index of the last frame of data.
analog_per_frame : int
Number of analog frames per frame of 3D point data. The analog frame
rate (ANALOG:RATE) apparently equals the point frame rate (POINT:RATE)
times this value.
frame_rate : float
The frame rate of the recording, in frames per second.
scale_factor : float
Multiply values in the file by this scale parameter.
long_event_labels : bool
max_gap : int
.. note::
The ``scale_factor`` attribute is not used in Phasespace C3D files;
instead, use the POINT.SCALE parameter.
.. note::
The ``first_frame`` and ``last_frame`` header attributes are not used in
C3D files generated by Phasespace. Instead, the first and last
frame numbers are stored in the POINTS:ACTUAL_START_FIELD and
POINTS:ACTUAL_END_FIELD parameters.
'''
BINARY_FORMAT = '<BBHHHHHfHHf270sHH214s'
def __init__(self, handle=None):
'''Create a new Header object.
Parameters
----------
handle : file handle, optional
If given, initialize attributes for the Header from this file
handle. The handle must be seek-able and readable. If `handle` is
not given, Header attributes are initialized with default values.
'''
self.label_block = 0
self.parameter_block = 2
self.data_block = 3
self.point_count = 50
self.analog_count = 0
self.first_frame = 1
self.last_frame = 1
self.analog_per_frame = 0
self.frame_rate = 120.0
self.max_gap = 0
self.scale_factor = -1.0
self.long_event_labels = False
if handle:
self.read(handle)
def write(self, handle):
'''Write binary header data to a file handle.
This method writes exactly 512 bytes to the beginning of the given file
handle.
Parameters
----------
handle : file handle
The given handle will be reset to 0 using `seek` and then 512 bytes
will be written to describe the parameters in this Header. The
handle must be writeable.
'''
handle.seek(0)
handle.write(struct.pack(self.BINARY_FORMAT,
self.parameter_block,
0x50,
self.point_count,
self.analog_count,
self.first_frame,
self.last_frame,
self.max_gap,
self.scale_factor,
self.data_block,
self.analog_per_frame,
self.frame_rate,
b'',
self.long_event_labels and 0x3039 or 0x0,
self.label_block,
b''))
def __str__(self):
'''Return a string representation of this Header's attributes.'''
return '''\
parameter_block: {0.parameter_block}
point_count: {0.point_count}
analog_count: {0.analog_count}
first_frame: {0.first_frame}
last_frame: {0.last_frame}
max_gap: {0.max_gap}
scale_factor: {0.scale_factor}
data_block: {0.data_block}
analog_per_frame: {0.analog_per_frame}
frame_rate: {0.frame_rate}
long_event_labels: {0.long_event_labels}
label_block: {0.label_block}'''.format(self)
def read(self, handle):
'''Read and parse binary header data from a file handle.
This method reads exactly 512 bytes from the beginning of the given file
handle.
Parameters
----------
handle : file handle
The given handle will be reset to 0 using `seek` and then 512 bytes
will be read to initialize the attributes in this Header. The handle
must be readable.
Raises
------
AssertionError
If the magic byte from the header is not 80 (the C3D magic value).
'''
handle.seek(0)
(self.parameter_block,
magic,
self.point_count,
self.analog_count,
self.first_frame,
self.last_frame,
self.max_gap,
self.scale_factor,
self.data_block,
self.analog_per_frame,
self.frame_rate,
_,
self.long_event_labels,
self.label_block,
_) = struct.unpack(self.BINARY_FORMAT, handle.read(512))
assert magic == 80, 'C3D magic {} != 80 !'.format(magic)
class Param(object):
'''A class representing a single named parameter from a C3D file.
Attributes
----------
name : str
Name of this parameter.
desc : str
Brief description of this parameter.
bytes_per_element : int, optional
For array data, this describes the size of each element of data. For
string data (including arrays of strings), this should be -1.
dimensions : list of int
For array data, this describes the dimensions of the array, stored in
column-major order. For arrays of strings, the dimensions here will be
the number of columns (length of each string) followed by the number of
rows (number of strings).
bytes : str
Raw data for this parameter.
'''
def __init__(self,
name,
desc='',
bytes_per_element=1,
dimensions=None,
bytes=b'',
handle=None):
'''Set up a new parameter, only the name is required.'''
self.name = name
self.desc = desc
self.bytes_per_element = bytes_per_element
self.dimensions = dimensions or []
self.bytes = bytes
if handle:
self.read(handle)
def __repr__(self):
return '<Param: {}>'.format(self.desc)
@property
def num_elements(self):
'''Return the number of elements in this parameter's array value.'''
e = 1
for d in self.dimensions:
e *= d
return e
@property
def total_bytes(self):
'''Return the number of bytes used for storing this parameter's data.'''
return self.num_elements * abs(self.bytes_per_element)
def binary_size(self):
'''Return the number of bytes needed to store this parameter.'''
return (
1 + # group_id
2 + # next offset marker
1 + len(self.name.encode('utf-8')) + # size of name and name bytes
1 + # data size
1 + len(self.dimensions) + # size of dimensions and dimension bytes
self.total_bytes + # data
1 + len(self.desc.encode('utf-8')) # size of desc and desc bytes
)
def write(self, group_id, handle):
'''Write binary data for this parameter to a file handle.
Parameters
----------
group_id : int
The numerical ID of the group that holds this parameter.
handle : file handle
An open, writable, binary file handle.
'''
name = self.name.encode('utf-8')
handle.write(struct.pack('bb', len(name), group_id))
handle.write(name)
handle.write(struct.pack('<h', self.binary_size() - 2 - len(name)))
handle.write(struct.pack('b', self.bytes_per_element))
handle.write(struct.pack('B', len(self.dimensions)))
handle.write(struct.pack('B' * len(self.dimensions), *self.dimensions))
if self.bytes:
handle.write(self.bytes)
desc = self.desc.encode('utf-8')
handle.write(struct.pack('B', len(desc)))
handle.write(desc)
def read(self, handle):
'''Read binary data for this parameter from a file handle.
This reads exactly enough data from the current position in the file to
initialize the parameter.
'''
self.bytes_per_element, = struct.unpack('b', handle.read(1))
dims, = struct.unpack('B', handle.read(1))
self.dimensions = [struct.unpack('B', handle.read(1))[0] for _ in range(dims)]
self.bytes = b''
if self.total_bytes:
self.bytes = handle.read(self.total_bytes)
size, = struct.unpack('B', handle.read(1))
self.desc = size and handle.read(size).decode('utf-8') or ''
def _as(self, fmt):
'''Unpack the raw bytes of this param using the given struct format.'''
return struct.unpack('<' + fmt, self.bytes)[0]
@property
def int8_value(self):
'''Get the param as an 8-bit signed integer.'''
return self._as('b')
@property
def uint8_value(self):
'''Get the param as an 8-bit unsigned integer.'''
return self._as('B')
@property
def int16_value(self):
'''Get the param as a 16-bit signed integer.'''
return self._as('h')
@property
def uint16_value(self):
'''Get the param as a 16-bit unsigned integer.'''
return self._as('H')
@property
def int32_value(self):
'''Get the param as a 32-bit signed integer.'''
return self._as('i')
@property
def uint32_value(self):
'''Get the param as a 32-bit unsigned integer.'''
return self._as('I')
@property
def float_value(self):
'''Get the param as a 32-bit float.'''
return self._as('f')
@property
def bytes_value(self):
'''Get the param as a raw byte string.'''
return self.bytes
@property
def string_value(self):
'''Get the param as a unicode string.'''
return self.bytes.decode('utf-8')
def _as_array(self, fmt):
'''Unpack the raw bytes of this param using the given data format.'''
assert self.dimensions, \
'{}: cannot get value as {} array!'.format(self.name, fmt)
elems = array.array(fmt)
elems.fromstring(self.bytes)
return np.array(elems).reshape(self.dimensions)
@property
def int8_array(self):
'''Get the param as an array of 8-bit signed integers.'''
return self._as_array('b')
@property
def uint8_array(self):
'''Get the param as an array of 8-bit unsigned integers.'''
return self._as_array('B')
@property
def int16_array(self):
'''Get the param as an array of 16-bit signed integers.'''
return self._as_array('h')
@property
def uint16_array(self):
'''Get the param as an array of 16-bit unsigned integers.'''
return self._as_array('H')
@property
def int32_array(self):
'''Get the param as an array of 32-bit signed integers.'''
return self._as_array('i')
@property
def uint32_array(self):
'''Get the param as an array of 32-bit unsigned integers.'''
return self._as_array('I')
@property
def float_array(self):
'''Get the param as an array of 32-bit floats.'''
return self._as_array('f')
@property
def bytes_array(self):
'''Get the param as an array of raw byte strings.'''
assert len(self.dimensions) == 2, \
'{}: cannot get value as bytes array!'.format(self.name)
l, n = self.dimensions
return [self.bytes[i*l:(i+1)*l] for i in range(n)]
@property
def string_array(self):
'''Get the param as a array of unicode strings.'''
assert len(self.dimensions) == 2, \
'{}: cannot get value as string array!'.format(self.name)
l, n = self.dimensions
return [self.bytes[i*l:(i+1)*l].decode('utf-8') for i in range(n)]
class Group(object):
'''A group of parameters from a C3D file.
In C3D files, parameters are organized in groups. Each group has a name, a
description, and a set of named parameters.
Attributes
----------
name : str
Name of this parameter group.
desc : str
Description for this parameter group.
'''
def __init__(self, name=None, desc=None):
self.name = name
self.desc = desc
self.params = {}
def __repr__(self):
return '<Group: {}>'.format(self.desc)
def get(self, key, default=None):
'''Get a parameter by key.
Parameters
----------
key : any
Parameter key to look up in this group.
default : any, optional
Value to return if the key is not found. Defaults to None.
Returns
-------
param : :class:`Param`
A parameter from the current group.
'''
return self.params.get(key, default)
def add_param(self, name, **kwargs):
'''Add a parameter to this group.
Parameters
----------
name : str
Name of the parameter to add to this group. The name will
automatically be case-normalized.
Additional keyword arguments will be passed to the `Param` constructor.
'''
self.params[name.upper()] = Param(name.upper(), **kwargs)
def binary_size(self):
'''Return the number of bytes to store this group and its parameters.'''
return (
1 + # group_id
1 + len(self.name.encode('utf-8')) + # size of name and name bytes
2 + # next offset marker
1 + len(self.desc.encode('utf-8')) + # size of desc and desc bytes
sum(p.binary_size() for p in self.params.values()))
def write(self, group_id, handle):
'''Write this parameter group, with parameters, to a file handle.
Parameters
----------
group_id : int
The numerical ID of the group.
handle : file handle
An open, writable, binary file handle.
'''
name = self.name.encode('utf-8')
desc = self.desc.encode('utf-8')
handle.write(struct.pack('bb', len(name), -group_id))
handle.write(name)
handle.write(struct.pack('<h', 3 + len(desc)))
handle.write(struct.pack('B', len(desc)))
handle.write(desc)
for param in self.params.values():
param.write(group_id, handle)
def get_int8(self, key):
'''Get the value of the given parameter as an 8-bit signed integer.'''
return self.params[key.upper()].int8_value
def get_uint8(self, key):
'''Get the value of the given parameter as an 8-bit unsigned integer.'''
return self.params[key.upper()].uint8_value
def get_int16(self, key):
'''Get the value of the given parameter as a 16-bit signed integer.'''
return self.params[key.upper()].int16_value
def get_uint16(self, key):
'''Get the value of the given parameter as a 16-bit unsigned integer.'''
return self.params[key.upper()].uint16_value
def get_int32(self, key):
'''Get the value of the given parameter as a 32-bit signed integer.'''
return self.params[key.upper()].int32_value
def get_uint32(self, key):
'''Get the value of the given parameter as a 32-bit unsigned integer.'''
return self.params[key.upper()].uint32_value
def get_float(self, key):
'''Get the value of the given parameter as a 32-bit float.'''
return self.params[key.upper()].float_value
def get_bytes(self, key):
'''Get the value of the given parameter as a byte array.'''
return self.params[key.upper()].bytes_value
def get_string(self, key):
'''Get the value of the given parameter as a string.'''
return self.params[key.upper()].string_value
class Manager(object):
'''A base class for managing C3D file metadata.
This class manages a C3D header (which contains some stock metadata fields)
as well as a set of parameter groups. Each group is accessible using its
name.
Attributes
----------
header : `Header`
Header information for the C3D file.
'''
def __init__(self, header=None):
'''Set up a new Manager with a Header.'''
self.header = header or Header()
self.groups = {}
def check_metadata(self):
'''Ensure that the metadata in our file is self-consistent.'''
assert self.header.point_count == self.point_used, (
'inconsistent point count! {} header != {} POINT:USED'.format(
self.header.point_count,
self.point_used,
))
assert self.header.scale_factor == self.point_scale, (
'inconsistent scale factor! {} header != {} POINT:SCALE'.format(
self.header.scale_factor,
self.point_scale,
))
assert self.header.frame_rate == self.point_rate, (
'inconsistent frame rate! {} header != {} POINT:RATE'.format(
self.header.frame_rate,
self.point_rate,
))
ratio = self.analog_rate / self.point_rate
assert True or self.header.analog_per_frame == ratio, (
'inconsistent analog rate! {} header != {} analog-fps / {} point-fps'.format(
self.header.analog_per_frame,
self.analog_rate,
self.point_rate,
))
count = self.analog_used * self.header.analog_per_frame
assert True or self.header.analog_count == count, (
'inconsistent analog count! {} header != {} analog used * {} per-frame'.format(
self.header.analog_count,
self.analog_used,
self.header.analog_per_frame,
))
start = self.get_uint16('POINT:DATA_START')
assert self.header.data_block == start, (
'inconsistent data block! {} header != {} POINT:DATA_START'.format(
self.header.data_block, start))
for name in ('POINT:LABELS', 'POINT:DESCRIPTIONS'):
if self.get(name) is None:
warnings.warn('missing parameter {}'.format(name))
def add_group(self, group_id, name, desc):
'''Add a new parameter group.
Parameters
----------
group_id : int
The numeric ID for a group to check or create.
name : str, optional
If a group is created, assign this name to the group.
desc : str, optional
If a group is created, assign this description to the group.
Returns
-------
group : :class:`Group`
A group with the given ID, name, and description.
Raises
------
KeyError
If a group with a duplicate ID or name already exists.
'''
if group_id in self.groups:
raise KeyError(group_id)
name = name.upper()
if name in self.groups:
raise KeyError(name)
group = self.groups[name] = self.groups[group_id] = Group(name, desc)
return group
def get(self, group, default=None):
'''Get a group or parameter.
Parameters
----------
group : str
If this string contains a period (.), then the part before the
period will be used to retrieve a group, and the part after the
period will be used to retrieve a parameter from that group. If this
string does not contain a period, then just a group will be
returned.
default : any
Return this value if the named group and parameter are not found.
Returns
-------
value : :class:`Group` or :class:`Param`
Either a group or parameter with the specified name(s). If neither
is found, returns the default value.
'''
if isinstance(group, int):
return self.groups.get(group, default)
group = group.upper()
param = None
if '.' in group:
group, param = group.split('.', 1)
if ':' in group:
group, param = group.split(':', 1)
if group not in self.groups:
return default
group = self.groups[group]
if param is not None:
return group.get(param, default)
return group
def get_int8(self, key):
'''Get a parameter value as an 8-bit signed integer.'''
return self.get(key).int8_value
def get_uint8(self, key):
'''Get a parameter value as an 8-bit unsigned integer.'''
return self.get(key).uint8_value
def get_int16(self, key):
'''Get a parameter value as a 16-bit signed integer.'''
return self.get(key).int16_value
def get_uint16(self, key):
'''Get a parameter value as a 16-bit unsigned integer.'''
return self.get(key).uint16_value
def get_int32(self, key):
'''Get a parameter value as a 32-bit signed integer.'''
return self.get(key).int32_value
def get_uint32(self, key):
'''Get a parameter value as a 32-bit unsigned integer.'''
return self.get(key).uint32_value
def get_float(self, key):
'''Get a parameter value as a 32-bit float.'''
return self.get(key).float_value
def get_bytes(self, key):
'''Get a parameter value as a byte string.'''
return self.get(key).bytes_value
def get_string(self, key):
'''Get a parameter value as a string.'''
return self.get(key).string_value
def parameter_blocks(self):
'''Compute the size (in 512B blocks) of the parameter section.'''
bytes = 4. + sum(g.binary_size() for g in self.groups.values())
return int(np.ceil(bytes / 512))
@property
def point_rate(self):
return self.get_float('POINT:RATE')
@property
def point_scale(self):
return self.get_float('POINT:SCALE')
@property
def point_used(self):
return self.get_uint16('POINT:USED')
@property
def analog_used(self):
try:
return self.get_uint16('ANALOG:USED')
except AttributeError:
return 0
@property
def analog_rate(self):
try:
return self.get_float('ANALOG:RATE')
except AttributeError:
return 0
@property
def point_labels(self):
return self.get('POINT:LABELS').string_array
def first_frame(self):
# this is a hack for phasespace files ... should put it in a subclass.
param = self.get('TRIAL:ACTUAL_START_FIELD')
if param is not None:
return param.int32_value
return self.header.first_frame
def last_frame(self):
# this is a hack for phasespace files ... should put it in a subclass.
param = self.get('TRIAL:ACTUAL_END_FIELD')
if param is not None:
return param.int32_value
return self.header.last_frame
class Reader(Manager):
'''This class provides methods for reading the data in a C3D file.
A C3D file contains metadata and frame-based data describing 3D motion.
You can iterate over the frames in the file by calling `read_frames()` after
construction:
>>> r = c3d.Reader(open('capture.c3d', 'rb'))
>>> for frame_no, points, analog in r.read_frames():
... print('{0.shape} points in this frame'.format(points))
'''
def __init__(self, handle):
'''Initialize this C3D file by reading header and parameter data.
Parameters
----------
handle : file handle
Read metadata and C3D motion frames from the given file handle. This
handle is assumed to be `seek`-able and `read`-able. The handle must
remain open for the life of the `Reader` instance. The `Reader` does
not `close` the handle.
Raises
------
ValueError
If the processor metadata in the C3D file is anything other than 84
(Intel format).
'''
super(Reader, self).__init__(Header(handle))
self._handle = handle
self._handle.seek((self.header.parameter_block - 1) * 512)
# metadata header
buf = self._handle.read(4)
_, _, parameter_blocks, processor = struct.unpack('BBBB', buf)
if processor != PROCESSOR_INTEL:
raise ValueError(
'we only read Intel C3D files (got processor {})'.
format(processor))
# read all parameter blocks as a single chunk to avoid block
# boundary issues.
bytes = self._handle.read(512 * parameter_blocks - 4)
while bytes:
buf = io.BytesIO(bytes)
chars_in_name, group_id = struct.unpack('bb', buf.read(2))
if group_id == 0 or chars_in_name == 0:
# we've reached the end of the parameter section.
break
name = buf.read(abs(chars_in_name)).decode('utf-8').upper()
offset_to_next, = struct.unpack('<h', buf.read(2))
if group_id > 0:
# we've just started reading a parameter. if its group doesn't
# exist, create a blank one. add the parameter to the group.
self.groups.setdefault(group_id, Group()).add_param(name, handle=buf)
else:
# we've just started reading a group. if a group with the
# appropriate id exists already (because we've already created
# it for a parameter), just set the name of the group.
# otherwise, add a new group.
group_id = abs(group_id)
size, = struct.unpack('B', buf.read(1))
desc = size and buf.read(size) or ''
group = self.get(group_id)
if group is not None:
group.name = name
group.desc = desc
self.groups[name] = group
else:
self.add_group(group_id, name, desc)
bytes = bytes[2 + abs(chars_in_name) + offset_to_next:]
self.check_metadata()
def read_frames(self, copy=True):
'''Iterate over the data frames from our C3D file handle.
Parameters
----------
copy : bool
If False, the reader returns a reference to the same data buffers
for every frame. The default is True, which causes the reader to
return a unique data buffer for each frame. Set this to False if you
consume frames as you iterate over them, or True if you store them
for later.
Returns
-------
frames : sequence of (frame number, points, analog)
This method generates a sequence of (frame number, points, analog)
tuples, one tuple per frame. The first element of each tuple is the
frame number. The second is a numpy array of parsed, 5D point data
and the third element of each tuple is a numpy array of analog
values that were recorded during the frame. (Often the analog data
are sampled at a higher frequency than the 3D point data, resulting
in multiple analog frames per frame of point data.)
The first three columns in the returned point data are the (x, y, z)
coordinates of the observed motion capture point. The fourth column
is an estimate of the error for this particular point, and the fifth
column is the number of cameras that observed the point in question.
Both the fourth and fifth values are -1 if the point is considered
to be invalid.
'''
scale = abs(self.point_scale)
is_float = self.point_scale < 0
point_bytes = [2, 4][is_float]
point_dtype = [np.int16, np.float32][is_float]
point_scale = [scale, 1][is_float]
points = np.zeros((self.point_used, 5), float)
# TODO: handle ANALOG:BITS parameter here!
p = self.get('ANALOG:FORMAT')
analog_unsigned = p and p.string_value.strip().upper() == 'UNSIGNED'
analog_dtype = np.int16
analog_bytes = 2
if is_float:
analog_dtype = np.float32
analog_bytes = 4
elif analog_unsigned:
analog_dtype = np.uint16
analog_bytes = 2
analog = np.array([], float)
offsets = np.zeros((self.analog_used, 1), int)
param = self.get('ANALOG:OFFSET')
if param is not None:
offsets = param.int16_array[:self.analog_used, None]
scales = np.ones((self.analog_used, 1), float)
param = self.get('ANALOG:SCALE')
if param is not None:
scales = param.float_array[:self.analog_used, None]
gen_scale = 1.
param = self.get('ANALOG:GEN_SCALE')
if param is not None:
gen_scale = param.float_value
self._handle.seek((self.header.data_block - 1) * 512)
for frame_no in range(self.first_frame(), self.last_frame() + 1):
n = 4 * self.header.point_count
raw = np.fromstring(self._handle.read(n * point_bytes),
dtype=point_dtype,
count=n).reshape((self.point_used, 4))
points[:, :3] = raw[:, :3] * point_scale
valid = raw[:, 3] > -1
points[~valid, 3:5] = -1
c = raw[valid, 3].astype(np.uint16)
# fourth value is floating-point (scaled) error estimate
points[valid, 3] = (c & 0xff).astype(float) * scale
# fifth value is number of bits set in camera-observation byte
points[valid, 4] = sum((c & (1 << k)) >> k for k in range(8, 17))
if self.header.analog_count > 0:
n = self.header.analog_count
raw = np.fromstring(self._handle.read(n * analog_bytes),
dtype=analog_dtype,
count=n).reshape((self.analog_used, -1))
analog = (raw.astype(float) - offsets) * scales * gen_scale
if copy:
yield frame_no, points.copy(), analog.copy()
else:
yield frame_no, points, analog
class Writer(Manager):
'''This class writes metadata and frames to a C3D file.
For example, to read an existing C3D file, apply some sort of data
processing to the frames, and write out another C3D file::
>>> r = c3d.Reader(open('data.c3d', 'rb'))
>>> w = c3d.Writer()
>>> w.add_frames(process_frames_somehow(r.read_frames()))
>>> with open('smoothed.c3d', 'wb') as handle:
>>> w.write(handle)
Parameters
----------
point_rate : float, optional
The frame rate of the data. Defaults to 480.
analog_rate : float, optional
The number of analog samples per frame. Defaults to 0.
point_scale : float, optional
The scale factor for point data. Defaults to -1 (i.e., "check the
POINT:SCALE parameter").
point_units : str, optional
The units that the point numbers represent. Defaults to ``'mm '``.
gen_scale : float, optional
General scaling factor for data. Defaults to 1.
'''
def __init__(self,
point_rate=1200.,
analog_rate=0.,
point_scale=-1.,
point_units='mm ',
gen_scale=1.):
'''Set metadata for this writer.
'''
super(Writer, self).__init__()
self._point_rate = point_rate
self._analog_rate = analog_rate
self._point_scale = point_scale
self._point_units = point_units
self._gen_scale = gen_scale
self._frames = []
def add_frames(self, frames):
'''Add frames to this writer instance.
Parameters
----------
frames : sequence of (point, analog) tuples
A sequence of frame data to add to the writer.
'''
self._frames.extend(frames)
def _pad_block(self, handle):
'''Pad the file with 0s to the end of the next block boundary.'''
extra = handle.tell() % 512
if extra:
handle.write(b'\x00' * (512 - extra))
def _write_metadata(self, handle):
'''Write metadata to a file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle.
'''
self.check_metadata()
# header
self.header.write(handle)
self._pad_block(handle)
assert handle.tell() == 512
# groups
handle.write(struct.pack(
'BBBB', 0, 0, self.parameter_blocks(), PROCESSOR_INTEL))
id_groups = sorted(
(i, g) for i, g in self.groups.items() if isinstance(i, int))
for group_id, group in id_groups:
group.write(group_id, handle)
# padding
self._pad_block(handle)
while handle.tell() != 512 * (self.header.data_block - 1):
handle.write(b'\x00' * 512)
def _write_frames(self, handle):
'''Write our frame data to the given file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle.
'''
assert handle.tell() == 512 * (self.header.data_block - 1)
scale = abs(self.point_scale)
is_float = self.point_scale < 0
point_dtype = [np.int16, np.float32][is_float]
point_scale = [scale, 1][is_float]
raw = np.empty((self.point_used, 4), point_dtype)
for points in self._frames:
valid = points[:, 3] > -1
raw[~valid, 3] = -1
raw[valid, :3] = points[valid, :3] / self._point_scale
raw[valid, 3] = (
((points[valid, 4]).astype(np.uint8) << 8) |
(points[valid, 3] / scale).astype(np.uint16)
)
point = array.array(b'f')
point.extend(raw.flatten())
point.tofile(handle)
#analog = array.array(point_format)
#analog.extend(analog)
#analog.tofile(handle)
self._pad_block(handle)
def write(self, handle, labels):
'''Write metadata and point + analog frames to a file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle.
'''
if not self._frames:
return
def add(name, desc, bpe, format, bytes, *dimensions):
group.add_param(name,
desc=desc,
bytes_per_element=bpe,
bytes=struct.pack(format, bytes),
dimensions=list(dimensions))
def add_str(name, desc, bytes, *dimensions):
group.add_param(name,
desc=desc,
bytes_per_element=-1,
bytes=bytes.encode('utf-8'),
dimensions=list(dimensions))
def add_empty_array(name, desc, bpe):
group.add_param(name, desc=desc, bytes_per_element=bpe, dimensions=[0])
points = self._frames[0]
ppf = len(points)
print len(self._frames)
print self._point_rate
# POINT group
group = self.add_group(1, 'POINT', 'POINT group')
add('USED', 'Number of 3d markers', 2, '<H', ppf)
add('FRAMES', 'frame count', 2, '<H', min(65535, len(self._frames)))
add('DATA_START', 'data block number', 2, '<H', 0)
add('SCALE', '3d scale factor', 4, '<f', self._point_scale)
add('RATE', '3d data capture rate', 4, '<f', self._point_rate)
add_str('X_SCREEN', 'X_SCREEN parameter', '+X', 2)
add_str('Y_SCREEN', 'Y_SCREEN parameter', '+Y', 2)
add_str('UNITS', '3d data units', self._point_units, len(self._point_units))
add_str('LABELS', 'labels', ' '.join(labels)+' ', 9, ppf)
add_str('DESCRIPTIONS', 'descriptions', ' ' * 16 * ppf, 16, ppf)
# ANALOG group
#group = self.add_group(2, 'ANALOG', 'ANALOG group')
#add('USED', 'analog channel count', 2, '<H', analog.shape[0])
#add('RATE', 'analog samples per 3d frame', 4, '<f', analog.shape[1])
#add('GEN_SCALE', 'analog general scale factor', 4, '<f', self._gen_scale)
#add_empty_array('SCALE', 'analog channel scale factors', 4)
#add_empty_array('OFFSET', 'analog channel offsets', 2)
# TRIAL group
group = self.add_group(3, 'TRIAL', 'TRIAL group')
add('ACTUAL_START_FIELD', 'actual start frame', 2, '<I', 1, 2)
add('ACTUAL_END_FIELD', 'actual end frame', 2, '<I', len(self._frames), 2)
# sync parameter information to header.
blocks = self.parameter_blocks()
self.get('POINT:DATA_START').bytes = struct.pack('<H', 2 + blocks)
self.header.data_block = 2 + blocks
self.header.frame_rate = self._point_rate
self.header.last_frame = min(len(self._frames), 65535)
self.header.point_count = ppf
self.header.analog_count = 0
self.header.analog_per_frame = 0
self.header.scale_factor = self._point_scale
self._write_metadata(handle)
self._write_frames(handle)
| {
"repo_name": "elggem/iemocap-mapper",
"path": "c3d-conversion/c3d.py",
"copies": "1",
"size": "38454",
"license": "unlicense",
"hash": 4244636545081582000,
"line_mean": 34.2465627864,
"line_max": 91,
"alpha_frac": 0.5621261767,
"autogenerated": false,
"ratio": 4.119335832886985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006597669403497388,
"num_lines": 1091
} |
"""A Python module for talking to an Arduino over a serial port."""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='arduinoserial',
# Versions should comply with PEP440. For a discussion on
# single-sourcing the version across setup.py and the project
# code, see https://packaging.python.org/en/latest/single_source_version.html
version='1.0.1',
description='A Python interface to Arduino serial ports.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/wiseman/arduino-serial',
# Author details
author='John Wiseman',
author_email='jjwiseman@gmail.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='nlp nlu naturallanguage text classification development',
py_modules=['arduinoserial'],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['pytest', 'check-manifest', 'zest.releaser']
}
)
| {
"repo_name": "wiseman/arduino-serial",
"path": "setup.py",
"copies": "1",
"size": "2749",
"license": "mit",
"hash": 2984274756268248600,
"line_mean": 33.3625,
"line_max": 81,
"alpha_frac": 0.6584212441,
"autogenerated": false,
"ratio": 4.12143928035982,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5279860524459821,
"avg_score": null,
"num_lines": null
} |
"""A Python module that provides the API client component for the elsapy package.
Additional resources:
* https://github.com/ElsevierDev/elsapy
* https://dev.elsevier.com
* https://api.elsevier.com"""
import requests, json, time
from . import log_util
from .__init__ import version
try:
import pathlib
except ImportError:
import pathlib2 as pathlib
logger = log_util.get_logger(__name__)
class ElsClient:
"""A class that implements a Python interface to api.elsevier.com"""
# class variables
__url_base = "https://api.elsevier.com/" ## Base URL for later use
__user_agent = "elsapy-v%s" % version ## Helps track library use
__min_req_interval = 1 ## Min. request interval in sec
__ts_last_req = time.time() ## Tracker for throttling
# constructors
def __init__(self, api_key, inst_token = None, num_res = 25, local_dir = None):
# TODO: make num_res configurable for searches and documents/authors view
# - see https://github.com/ElsevierDev/elsapy/issues/32
"""Initializes a client with a given API Key and, optionally, institutional
token, number of results per request, and local data path."""
self.api_key = api_key
self.inst_token = inst_token
self.num_res = num_res
if not local_dir:
self.local_dir = pathlib.Path.cwd() / 'data'
else:
self.local_dir = pathlib.Path(local_dir)
if not self.local_dir.exists():
self.local_dir.mkdir()
# properties
@property
def api_key(self):
"""Get the apiKey for the client instance"""
return self._api_key
@api_key.setter
def api_key(self, api_key):
"""Set the apiKey for the client instance"""
self._api_key = api_key
@property
def inst_token(self):
"""Get the instToken for the client instance"""
return self._inst_token
@inst_token.setter
def inst_token(self, inst_token):
"""Set the instToken for the client instance"""
self._inst_token = inst_token
@property
def num_res(self):
"""Gets the max. number of results to be used by the client instance"""
return self._num_res
@num_res.setter
def num_res(self, numRes):
"""Sets the max. number of results to be used by the client instance"""
self._num_res = numRes
@property
def local_dir(self):
"""Gets the currently configured local path to write data to."""
return self._local_dir
@property
def req_status(self):
'''Return the status of the request response, '''
return {'status_code': self._status_code, 'status_msg': self._status_msg}
@local_dir.setter
def local_dir(self, path_str):
"""Sets the local path to write data to."""
self._local_dir = pathlib.Path(path_str)
# access functions
def getBaseURL(self):
"""Returns the ELSAPI base URL currently configured for the client"""
return self.__url_base
# request/response execution functions
def exec_request(self, URL):
"""Sends the actual request; returns response."""
## Throttle request, if need be
interval = time.time() - self.__ts_last_req
if (interval < self.__min_req_interval):
time.sleep( self.__min_req_interval - interval )
## Construct and execute request
headers = {
"X-ELS-APIKey" : self.api_key,
"User-Agent" : self.__user_agent,
"Accept" : 'application/json'
}
if self.inst_token:
headers["X-ELS-Insttoken"] = self.inst_token
logger.info('Sending GET request to ' + URL)
r = requests.get(
URL,
headers = headers
)
self.__ts_last_req = time.time()
self._status_code=r.status_code
if r.status_code == 200:
self._status_msg='data retrieved'
return json.loads(r.text)
else:
self._status_msg="HTTP " + str(r.status_code) + " Error from " + URL + " and using headers " + str(headers) + ": " + r.text
raise requests.HTTPError("HTTP " + str(r.status_code) + " Error from " + URL + "\nand using headers " + str(headers) + ":\n" + r.text)
| {
"repo_name": "ElsevierDev/elsapy",
"path": "elsapy/elsclient.py",
"copies": "1",
"size": "4481",
"license": "bsd-3-clause",
"hash": -2641787465012672000,
"line_mean": 35.0330578512,
"line_max": 146,
"alpha_frac": 0.5753180094,
"autogenerated": false,
"ratio": 3.944542253521127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5019860262921128,
"avg_score": null,
"num_lines": null
} |
"""A Python module that uses PyGame to bring Scratch-like features to Python.
https://github.com/PySlither/Slither
"""
# Always prefer setuptools over distutils
from setuptools import setup#, find_packages
# To use a consistent encoding
#from codecs import open
from os import path
import glob
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
#with open(path.join(here, 'README.md'), encoding='utf-8') as f:
# long_description = f.read()
setup(
name='slither',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.3.0',
description='A Python module that uses PyGame to bring Scratch-like features to Python',
long_description='A Python module that uses PyGame to bring Scratch-like features to Python. PyGame IS required for this module to work.',
# The project's main homepage.
url='https://github.com/PySlither/Slither',
# Author details
author='The PySlither Devs',
author_email='none',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Other Audience',
'Topic :: Games/Entertainment',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='scratch pygame',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=["slither", "slither.examples"],
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'slither': ['snakey.png'],
'slither.examples': list(map(path.basename, glob.glob("slither/examples/*.png") + glob.glob("slither/examples/*.wav")))
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
)
| {
"repo_name": "PySlither/Slither",
"path": "setup.py",
"copies": "1",
"size": "3854",
"license": "mit",
"hash": -8584131002816909000,
"line_mean": 35.7047619048,
"line_max": 142,
"alpha_frac": 0.665282823,
"autogenerated": false,
"ratio": 3.977296181630547,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5142579004630546,
"avg_score": null,
"num_lines": null
} |
"""A Python package for accessing hnbex.eu service"""
import json
import re
import requests
from datetime import datetime
from decimal import Decimal
class Rate(object):
RE_VALIDATION_DICT = {
'currency_code': r'^[A-Z]{3}$',
'buying_rate': r'^[0-9]+.[0-9]{6}$',
'selling_rate': r'^[0-9]+.[0-9]{6}$',
'median_rate': r'^[0-9]+.[0-9]{6}$',
}
URL = 'http://hnbex.eu/api/v1/rates/daily/'
def __init__(self, currency_code, rate_date, buying_rate, selling_rate,
median_rate, unit_value):
self._validate(currency_code, rate_date, buying_rate, selling_rate,
median_rate, unit_value)
self.code = currency_code
self.date = rate_date
self.buy = Decimal(buying_rate)
self.sell = Decimal(selling_rate)
self.median = Decimal(median_rate)
self.unit_value = unit_value
def _validate(self, currency_code, rate_date, buying_rate, selling_rate,
median_rate, unit_value):
"""
Raise ValueError if params do not match regex as defined in
RE_VALIDATION_DICT or if unit_value is not in allowed set.
"""
for var, regex in self.RE_VALIDATION_DICT.items():
if re.match(regex, eval(var)) is None:
raise ValueError('{}: wrong format'.format(var))
if unit_value not in [1, 100]:
raise ValueError('unit_value must be 1 or 100')
def from_hrk(self, amount_hrk):
"""
Convert amount in HRK to amount in given currency.
:param amount_hrk: int, float, Decimal or string representation of
Decimal
:return: Decimal
"""
amount = Decimal(amount_hrk) / self.median * self.unit_value
return amount.quantize(Decimal('0.000001'))
def to_hrk(self, amount):
"""
Convert amount in given currency to HRK.
:param amount: int, float, Decimal or string representation of Decimal
:return: Decimal
"""
amount_hrk = Decimal(amount) * self.median / self.unit_value
return amount_hrk.quantize(Decimal('0.000001'))
@classmethod
def get_rates(cls, rate_date=None):
"""
Wrap call to 'http://hnbex.eu/api/v1/rates/daily/'. Create dictionary
where keys are currency codes and values are `Rate` objects.
If api returns status code 4xx or 5xx raise ValueError or IOError.
:param rate_date: datetime.date
:return: dict
"""
if rate_date is None:
rate_date = datetime.today()
date_str = rate_date.strftime('%Y-%m-%d')
result = requests.get(url=cls.URL, params={'date': date_str})
if result.ok:
rates = {}
content = json.loads(result.content)
for rate_dict in content:
rate_dict['rate_date'] = rate_date
try:
rates[rate_dict['currency_code']] = cls(**rate_dict)
except (ValueError, KeyError):
continue
return rates
if 400 <= result.status_code < 500:
raise ValueError("Server returned status code {}"
"".format(result.status_code))
if 500 <= result.status_code < 600:
raise IOError("Server returned status code {}"
"".format(result.status_code))
| {
"repo_name": "dobarkod/hnbex-python",
"path": "hnbex/__init__.py",
"copies": "1",
"size": "3414",
"license": "mit",
"hash": 6467871535538348000,
"line_mean": 34.1958762887,
"line_max": 78,
"alpha_frac": 0.5679554774,
"autogenerated": false,
"ratio": 3.960556844547564,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 97
} |
"""A Python package for analysing and manipulating OOMMF vector fields.
This module is a Python package that provides:
- Opening OOMMF vector field files (.omf and .ohf)
- Analysing vector fields, such as sampling, averaging, plotting, etc.
- Saving arbitrary vector fields to OOMMF vector field files.
oommffield is a member of JOOMMF project - a part of OpenDreamKit
Horizon 2020 European Research Infrastructure project
"""
import random
import numpy as np
import matplotlib.pyplot as plt
import finitedifferencefield
import struct
class Field(finitedifferencefield.Field):
def write_oommf_file(self, filename, datatype='text'):
"""Write the FD field to the OOMMF (omf, ohf) file.
This method writes all necessary data to the omf or ohf file,
so that it can be read by OOMMF.
Args:
filename (str): filename including extension
type(str): Either 'text' or 'binary'
Example:
.. code-block:: python
>>> from oommffield import Field
>>> field = Field((0, 0, 0), (5, 4, 3), (1, 1, 1))
>>> field.set((1, 0, 5))
>>> field.write_oommf_file('fdfield.omf')
"""
oommf_file = open(filename, 'w')
# Define header lines.
header_lines = ['OOMMF OVF 2.0',
'',
'Segment count: 1',
'',
'Begin: Segment',
'Begin: Header',
'',
'Title: Field generated omf file',
'Desc: File generated by Field class',
'meshunit: m',
'meshtype: rectangular',
'xbase: {}'.format(self.d[0]),
'ybase: {}'.format(self.d[1]),
'zbase: {}'.format(self.d[2]),
'xnodes: {}'.format(self.n[0]),
'ynodes: {}'.format(self.n[1]),
'znodes: {}'.format(self.n[2]),
'xstepsize: {}'.format(self.d[0]),
'ystepsize: {}'.format(self.d[1]),
'zstepsize: {}'.format(self.d[2]),
'xmin: {}'.format(self.cmin[0]),
'ymin: {}'.format(self.cmin[1]),
'zmin: {}'.format(self.cmin[2]),
'xmax: {}'.format(self.cmax[0]),
'ymax: {}'.format(self.cmax[1]),
'zmax: {}'.format(self.cmax[2]),
'valuedim: {}'.format(self.dim),
'valuelabels: Magnetization_x Magnetization_y Magnetization_z',
'valueunits: A/m A/m A/m',
'',
'End: Header',
'']
if datatype == 'binary':
header_lines.append('Begin: Data Binary 8')
footer_lines = ['End: Data Binary 8',
'End: Segment']
if datatype == 'text':
header_lines.append('Begin: Data Text')
footer_lines = ['End: Data Text',
'End: Segment']
# Write header lines to OOMMF file.
for line in header_lines:
if line == '':
oommf_file.write('#\n')
else:
oommf_file.write('# ' + line + '\n')
if datatype == 'binary':
# Close the file and reopen with binary write
# appending to end of file.
oommf_file.close()
oommf_file = open(filename, 'ab')
# Add the 8 bit binary check value that OOMMF uses
packarray = [123456789012345.0]
# Write data lines to OOMMF file.
for iz in range(self.n[2]):
for iy in range(self.n[1]):
for ix in range(self.n[0]):
[packarray.append(vi) for vi in self.f[ix, iy, iz, :]]
v_binary = struct.pack('d'*len(packarray), *packarray)
oommf_file.write(v_binary)
oommf_file.close()
oommf_file = open(filename, 'a')
else:
for iz in range(self.n[2]):
for iy in range(self.n[1]):
for ix in range(self.n[0]):
v = [str(vi) for vi in self.f[ix, iy, iz, :]]
for vi in v:
oommf_file.write(' ' + vi)
oommf_file.write('\n')
# Write footer lines to OOMMF file.
for line in footer_lines:
oommf_file.write('# ' + line + '\n')
# Close the file.
oommf_file.close()
def read_oommf_file(filename, name='unnamed'):
try:
f = open(filename)
if 'Begin: Data Text' in f.read():
return read_oommf_file_text(filename, name)
else:
return read_oommf_file_binary(filename, name)
except UnicodeDecodeError:
return read_oommf_file_binary(filename, name)
def read_oommf_file_text(filename, name='unnamed'):
"""Read the OOMMF file and create an Field object.
Args:
filename (str): OOMMF file name
name (str): name of the Field object
Return:
Field object.
Example:
.. code-block:: python
from oommffield import read_oommf_file
oommf_filename = 'vector_field.omf'
field = read_oommf_file(oommf_filename, name='magnetisation')
"""
# Open and read the file.
f = open(filename, 'r')
lines = f.readlines()
f.close()
# Load metadata.
dic = {'xmin': None, 'ymin': None, 'zmin': None,
'xmax': None, 'ymax': None, 'zmax': None,
'xstepsize': None, 'ystepsize': None, 'zstepsize': None,
'xbase': None, 'ybase': None, 'zbase': None,
'xnodes': None, 'ynodes': None, 'znodes': None,
'valuedim': None}
for line in lines[0:50]:
for key in dic.keys():
if line.find(key) != -1:
dic[key] = float(line.split()[2])
cmin = (dic['xmin'], dic['ymin'], dic['zmin'])
cmax = (dic['xmax'], dic['ymax'], dic['zmax'])
d = (dic['xstepsize'], dic['ystepsize'], dic['zstepsize'])
cbase = (dic['xbase'], dic['ybase'], dic['zbase'])
n = (int(round(dic['xnodes'])),
int(round(dic['ynodes'])),
int(round(dic['znodes'])))
dim = int(dic['valuedim'])
field = Field(cmin, cmax, d, dim, name=name)
for j in range(len(lines)):
if lines[j].find('Begin: Data Text') != -1:
data_first_line = j+1
counter = 0
for iz in range(n[2]):
for iy in range(n[1]):
for ix in range(n[0]):
i = (ix, iy, iz)
line_data = lines[data_first_line+counter]
value = [float(vi) for vi in line_data.split()]
field.set_at_index(i, value)
counter += 1
return field
def read_oommf_file_binary(filename, name='unnamed'):
"""Read the OOMMF file and create an Field object.
Args:
filename (str): OOMMF file name
name (str): name of the Field object
Return:
Field object.
Example:
.. code-block:: python
from oommffield import read_oommf_file
oommf_filename = 'vector_field.omf'
field = read_oommf_file(oommf_filename, name='magnetisation')
"""
# Open and read the file.
with open(filename, 'rb') as f:
file = f.read()
lines = file.split(b'\n')
# Load metadata.
dic = {'xmin': None, 'ymin': None, 'zmin': None,
'xmax': None, 'ymax': None, 'zmax': None,
'xstepsize': None, 'ystepsize': None, 'zstepsize': None,
'xbase': None, 'ybase': None, 'zbase': None,
'xnodes': None, 'ynodes': None, 'znodes': None,
'valuedim': None}
for line in lines[0:50]:
for key in dic.keys():
if line.find(bytes(key, 'utf-8')) != -1:
dic[key] = float(line.split()[2])
cmin = (dic['xmin'], dic['ymin'], dic['zmin'])
cmax = (dic['xmax'], dic['ymax'], dic['zmax'])
d = (dic['xstepsize'], dic['ystepsize'], dic['zstepsize'])
cbase = (dic['xbase'], dic['ybase'], dic['zbase'])
n = (int(round(dic['xnodes'])),
int(round(dic['ynodes'])),
int(round(dic['znodes'])))
dim = int(dic['valuedim'])
field = Field(cmin, cmax, d, dim, name=name)
binary_header = b'# Begin: Data Binary '
# Here we find the start and end points of the
# binary data, in terms of byte position.
data_start = file.find(binary_header)
header = file[data_start:data_start + len(binary_header) + 1]
if b'8' in header:
bytesize = 8
elif b'4' in header:
bytesize = 4
data_start += len(b'# Begin: Data Binary 8\n')
data_end = file.find(b'# End: Data Binary ')
if bytesize == 4:
listdata = list(struct.iter_unpack('@f', file[data_start:data_end]))
try:
assert listdata[0] == 1234567.0
except:
raise AssertionError('Something has gone wrong'
' with reading Binary Data')
elif bytesize == 8:
listdata = list(struct.iter_unpack('@d', file[data_start:data_end]))
try:
assert listdata[0][0] == 123456789012345.0
except:
raise AssertionError('Something has gone wrong'
' with reading Binary Data')
counter = 1
for iz in range(n[2]):
for iy in range(n[1]):
for ix in range(n[0]):
i = (ix, iy, iz)
value = (listdata[counter][0],
listdata[counter+1][0],
listdata[counter+2][0])
field.set_at_index(i, value)
counter += 3
return field
| {
"repo_name": "joommf/oommffield",
"path": "oommffield/oommffield.py",
"copies": "1",
"size": "9924",
"license": "bsd-2-clause",
"hash": 6574709888744015000,
"line_mean": 33.2206896552,
"line_max": 87,
"alpha_frac": 0.4996977025,
"autogenerated": false,
"ratio": 3.6769173768062244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9673932417140804,
"avg_score": 0.0005365324330841571,
"num_lines": 290
} |
"""A python package for time series classification."""
import pyts
from setuptools import find_packages, setup
DISTNAME = 'pyts'
DESCRIPTION = 'A python package for time series classification'
with open('README.md') as f:
LONG_DESCRIPTION = f.read()
LONG_DESCRIPTION_CONTENT_TYPE = 'text/markdown'
MAINTAINER = 'Johann Faouzi'
MAINTAINER_EMAIL = 'johann.faouzi@gmail.com'
URL = 'https://github.com/johannfaouzi/pyts'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'https://github.com/johannfaouzi/pyts'
VERSION = pyts.__version__
INSTALL_REQUIRES = ['numpy>=1.17.5',
'scipy>=1.3.0',
'scikit-learn>=0.22.1',
'joblib>=0.12',
'numba>=0.48.0']
CLASSIFIERS = ['Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9']
EXTRAS_REQUIRE = {
'tests': [
'pytest',
'pytest-cov'],
'docs': [
'docutils==0.14',
'sphinx==1.8.5',
'sphinx-gallery',
'numpydoc',
'matplotlib'
]
}
PACKAGE_DATA = {
'pyts': ['datasets/cached_datasets/UCR/Coffee/*.txt',
'datasets/cached_datasets/UCR/GunPoint/*.txt',
'datasets/cached_datasets/UCR/PigCVP/*.txt',
'datasets/cached_datasets/UEA/BasicMotions/*.arff',
'datasets/cached_datasets/UEA/BasicMotions/*.txt',
'datasets/info/*.pickle']
}
setup(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE,
zip_safe=False,
classifiers=CLASSIFIERS,
packages=find_packages(),
package_data=PACKAGE_DATA,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE)
| {
"repo_name": "johannfaouzi/pyts",
"path": "setup.py",
"copies": "1",
"size": "2647",
"license": "bsd-3-clause",
"hash": -3710699966486103000,
"line_mean": 33.7702702703,
"line_max": 66,
"alpha_frac": 0.5678126181,
"autogenerated": false,
"ratio": 3.841799709724238,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9909612327824238,
"avg_score": 0,
"num_lines": 74
} |
# A Python port of the C++ port of the NTIA ITS ITM FORTRAN code.
#
# Original FORTRAN code documentation is at:
# http://www.its.bldrdoc.gov/media/50674/itm.pdf
#
# Section numbers referenced here correspond to the FORTRAN code document.
#
# Andrew Clegg
# October 2016
# Last update: Nov 5, 2016
import math
# Static function variables in C++ implemented via global variables in Python
global wd1, xd1, afo, qk, aht, xht # Function adiff
global ad, rr, etq, h0s # Function ascat
global wls # Function alos
global wlos, wscat, dmin, xae # Function lrprop
global kdv # Function avar
global dexa, de, vmd, vs0, sgl, sgtm, sgtp, sgtd, tgtd # Function avar
global gm, gp, cv1, cv2, yv1, yv2, yv3, csm1, csm2, ysm1 # Function avar
global ysm2, ysm3, csp1, csp2, ysp1, ysp2, ysp3, csd1, zd # Function avar
global cfm1, cfm2, cfm3, cfp1, cfp2, cfp3 # Function avar
global ws, w1 # Function avar
# Initialize the static function variables
wd1 = xd1 = afo = qk = aht = xht = 0.
ad = rr = etq = h0s = 0.
wls = 0.
wlos = wscat = False
dmin = xae = 0.
kdv = 0
dexa = de = vmd = vs0 = sgl = sgtm = sgtp = sgtd = tgtd = 0.0
gm = gp = cv1 = cv2 = yv1 = yv2 = yv3 = csm1 = csm2 = ysm1 = 0.0
ysm2 = ysm3 = csp1 = csp2 = ysp1 = ysp2 = ysp3 = csd1 = zd = 0.0
cfm1 = cfm2 = cfm3 = cfp1 = cfp2 = cfp3 = 0.0
ws = w1 = False
class PropType:
# <Primary parameters 2> (Section 2)
def __init__(self):
self.aref = 0.0 # Reference attenuation
self.dist = 0.0 # Distance
self.hg = [0.0, 0.0] # Antenna structural heights, m
self.wn = 0.0 # Wave number (inverse length), 1/m
self.dh = 0.0 # Terrain irregularity parameter, m
self.ens = 0.0 # Surface refractivity, N-units
self.gme = 0.0 # Earth's effective curvature (inverse length), 1/m
self.zgndreal = 0.0 # Surface transfer impedance (real part)
self.zgndimag = 0.0 # Surface transfer impedance (imag part)
self.he = [0.0, 0.0] # Antenna effective heights, m
self.dl = [0.0, 0.0] # Horizon distances, m
self.the = [0.0, 0.0] # Horizon elevation angles
self.kwx = 0 # Error indicator
self.mdp = 0 # Controlling mode
class PropvType:
# <Variability parameters 27> (Section 27)
def __init__(self):
self.sgc = 0.0 # Std dev of situation variability (confidence)
self.lvar = 0 # Control switch
self.mdvar = 0 # Mode of variability
self.klim = 0 # Climate indicator
class PropaType:
# <Secondary parameters 3> (Section 3)
def __init__(self):
self.dlsa = 0.0 # Line-of-sight distance
self.dx = 0.0 # Scatter distance
self.ael = 0.0 # Line-of-sight coefficient
self.ak1 = 0.0 # Line-of-sight coefficient
self.ak2 = 0.0 # Line-of-sight coefficient
self.aed = 0.0 # Diffraction coefficient
self.emd = 0.0 # Diffraction coefficient
self.aes = 0.0 # Scatter coefficient
self.ems = 0.0 # Scatter coefficient
self.dls = [0.0, 0.0] # Smooth earth horizon distance
self.dla = 0.0 # Total horizon distance
self.tha = 0.0 # Total bending angle
# Integer and double functions 'mymin' replaced by Python's min() function
# Integer and double functions 'mymax' replaced by Python's max() function
def fortran_dim(x, y):
"""
This performs the FORTRAN DIM function.
result is x-y if x is greater than y otherwise result is 0.0
"""
if (x > y):
return x-y
else:
return 0.0
def aknfe(v2):
"""
The attenuation due to a single knife edge--the Fresnel integral (in dB)
as a function of v**2. The approximation is that given in [Alg 6.1].
(Section 13)
"""
if(v2 < 5.76):
return 6.02 + 9.11*(v2)**0.5 - 1.27*v2
else:
return 12.953 + 4.343*math.log(v2)
def fht(x, pk):
"""
The height-gain over a smooth spherical earth--to be used in the "three-radii"
method. The approximation is that given in [Alg 6.4].
(Section 14)
"""
if (x < 200.0):
w = -math.log(pk)
if (pk < 1.e-5) or (x * w**3.0 > 5495.0):
fhtv = -117.0
if (x > 1.0):
fhtv = 17.372*math.log(x) + fhtv
else:
fhtv = 2.5e-5*x*x/pk - 8.686*w - 15.0
else:
fhtv = 0.05751*x - 4.343*math.log(x)
if(x < 2000.0):
w = 0.0134*x*math.exp(-0.005*x)
fhtv = (1.0 - w) * fhtv + w*(17.372*math.log(x) - 117.0)
return fhtv
def h0f(r, et):
"""
This is the H01 function for scatter fields as defined in [Alg Section 6].
(Section 25)
"""
a = [25.0, 80.0, 177.0, 395.0, 705.0]
b = [24.0, 45.0, 68.0, 80.0, 105.0]
it = int(et)
if it <= 0:
it = 1
q = 0.0
elif it >= 5:
it = 5
q = 0.0
else:
q = et - it
x = (1.0/r)**2.0
h0fv = 4.343*math.log((a[it-1]*x + b[it-1])*x + 1.0)
if q <> 0.0:
h0fv = (1.0 - q)*h0fv + q*4.343*math.log((a[it]*x + b[it])*x + 1.0)
return h0fv
def ahd(td):
"""
This is the F(theta*d) function for scatter fields.
(Section 26)
"""
a = [ 133.4, 104.6, 71.8]
b = [0.332e-3, 0.212e-3, 0.157e-3]
c = [ -4.343, -1.086, 2.171]
if td <= 10.e3:
i=0
elif td <= 70.e3:
i=1
else:
i=2
return a[i] + b[i]*td + c[i]*math.log(td)
def adiff(d, prop, propa):
"""
The function adiff finds the "diffraction attenuation" at the distance d. It
uses a convex combination of smooth earth diffraction and double knife-
edge diffraction. A call with d = 0 sets up initial constants.
(Section 10)
"""
# To implement C++ static function variables.
# Function must first be called with d = 0 to initialize these variables.
global wd1, xd1, afo, qk, aht, xht
prop_zgnd = prop.zgndreal + prop.zgndimag * 1j
if d == 0:
q = prop.hg[0]*prop.hg[1]
qk = prop.he[0]*prop.he[1] - q
if prop.mdp < 0.0:
q += 10.0
wd1 = (1.0 + qk/q)**0.5
xd1 = propa.dla + propa.tha/prop.gme
q = (1.0 - 0.8*math.exp(-propa.dlsa/50e3))*prop.dh
q *= 0.78*math.exp(- (q/16.)**0.25)
afo = min(15.0, \
2.171*math.log(1.0 + 4.77e-4*prop.hg[0]*prop.hg[1]*prop.wn*q))
qk = 1.0/abs(prop_zgnd)
aht = 20.0
xht = 0.0
for j in range(2):
a = (0.5*prop.dl[j]**2.0)/prop.he[j]
wa = (a*prop.wn)**(1./3.)
pk = qk/wa
q = (1.607 - pk)*151.0*wa*prop.dl[j]/a
xht += q
aht += fht(q,pk)
adiffv = 0.0
else:
th = propa.tha + d*prop.gme
ds = d - propa.dla
q = 0.0795775*prop.wn*ds*(th**2.0)
adiffv = aknfe(q*prop.dl[0]/(ds+prop.dl[0])) \
+ aknfe(q*prop.dl[1]/(ds+prop.dl[1]))
a = ds/th
wa = (a*prop.wn)**(1./3.)
pk = qk/wa
q = (1.607 - pk)*151.0*wa*th+xht
ar = 0.05751*q - 4.343*math.log(q) - aht
q = (wd1 + xd1/d) \
*min(((1.0 - 0.8*math.exp(-d/50e3))*prop.dh*prop.wn),6283.2)
wd = 25.1/(25.1 + q**0.5)
adiffv = ar*wd+(1.0 - wd)*adiffv + afo
return adiffv
def ascat(d, prop, propa):
"""
The function ascat finds the "scatter attenuation" at the distance d. It uses
an approximation to the methods of NBS Tech Note 101 with checks for inadmissable
situations. For proper operation, the larger distance (d = d_6) must be the
first called. A call with d = 0 sets up initial constants.
(Section 22)
"""
# To implement C++ static function variables.
# Function must first be called with d = 0 to initialize.
global ad, rr, etq, h0s
prop_zgnd = prop.zgndreal + prop.zgndimag*1j
if d == 0.0:
ad = prop.dl[0] - prop.dl[1]
rr = prop.he[1]/prop.he[0]
if ad < 0.0:
ad = -ad
rr = 1.0/rr
etq = (5.67e-6*prop.ens - 2.32e-3)*prop.ens + 0.031
h0s = -15.0
ascatv = 0.0
else:
if h0s > 15.0:
h0 = h0s
else:
th = prop.the[0] + prop.the[1] + d*prop.gme
r2 = 2.0*prop.wn*th
r1 = r2*prop.he[0]
r2 *= prop.he[1]
if r1 < 0.2 and r2 < 0.2:
# Early return
return 1001.0
ss = (d - ad)/(d + ad)
q = rr/ss
ss = max(0.1, ss)
q = min(max(0.1, q), 10.0)
z0 = (d - ad)*(d + ad)*th*0.25/d
et=(etq*math.exp(-pow(min(1.7,z0/8.0e3),6.0))+1.0)*z0/1.7556e3
ett = max(et, 1.0)
h0 = (h0f(r1, ett) + h0f(r2, ett))*0.5
h0 += min(h0, (1.38 - math.log(ett))*math.log(ss)*math.log(q)*0.49)
h0 = fortran_dim(h0, 0.0)
if et < 1.0:
h0 = et*h0+(1.0-et)*4.343* \
math.log(pow((1.0+1.4142/r1)*(1.0+1.4142/r2),2.0)*(r1+r2)/(r1+r2+2.8284))
if h0 > 15.0 and h0s >= 0.0:
h0 = h0s
h0s = h0
th = propa.tha+d*prop.gme
ascatv = ahd(th*d)+4.343*math.log(47.7*prop.wn*pow(th,4.0)) - 0.1 \
* (prop.ens-301.0)*math.exp(-th*d/40e3) + h0
return ascatv
def qerfi(q):
"""
The invese of qerf -- the solution for x to q = Q(x). The approximation is due
to C. Hastings, Jr. ("Approximations for digital computers," Princeton Univ.
Press, 1955) and the maximum error should be 4.5e-4.
(Section 51)
"""
c0 = 2.515516698
c1 = 0.802853
c2 = 0.010328
d1 = 1.432788
d2 = 0.189269
d3 = 0.001308
x = 0.5 - q
t = max(0.5 - abs(x), 0.000001)
t = (-2.0 * math.log(t))**0.5
v = t - ((c2 * t + c1) * t + c0) / (((d3 * t + d2) * t + d1) * t + 1.0)
if (x < 0.0):
v = -v
return v
def qlrps(fmhz, zsys, en0, ipol, eps, sgm, prop):
"""
This routine converts the frequency fmhz, the surface refractivity reduced to
sea level en0, and general system elevation zsys, and the polarization and ground
constants eps, sgm, to wave number un, surface refractivity ens, effective earth
curvature gme, and surface impedance zgnd. It may be used with either the area
prediction or the point-to-point mode.
(Section 41)
"""
gma = 157e-9
prop.wn = fmhz/47.7
prop.ens = en0
if zsys <> 0.0:
prop.ens *= math.exp(-zsys/9460.0)
prop.gme = gma*(1.0 - 0.04665*math.exp(prop.ens/179.3))
prop_zgnd = prop.zgndreal + prop.zgndimag*1j
zq = eps + 376.62*sgm/prop.wn *1j
prop_zgnd = (zq-1.0)**0.5
if ipol <> 0.0:
prop_zgnd = prop_zgnd/zq
prop.zgndreal = prop_zgnd.real
prop.zgndimag = prop_zgnd.imag
def abq_alos(r):
return r.real*r.real + r.imag*r.imag
def alos(d, prop, propa):
"""
The function alos finds the "line-of-sight" attenuation at the distance d. It
uses a convex combination of plane earth fields and diffracted fields. A call
with d = 0 sets up initial constants.
(Section 17)
"""
# To implement C++ static function variables.
# Function must first be called with d = 0 to initialize.
global wls
prop_zgnd = prop.zgndreal + prop.zgndimag*1j
if d == 0.0:
wls = 0.021/(0.021+prop.wn*prop.dh/max(10e3,propa.dlsa))
alosv = 0.0
else:
q = (1.0-0.8*math.exp(-d/50.e3))*prop.dh
s = 0.78*q*math.exp(-pow(q/16.0,0.25))
q = prop.he[0] + prop.he[1]
sps = q/(d*d + q*q)**0.5
r = (sps - prop_zgnd)/(sps + prop_zgnd)*math.exp(-min(10.0,prop.wn*s*sps))
q = abq_alos(r)
if q < 0.25 or q < sps:
r = r*(sps/q)**0.5
alosv = propa.emd*d + propa.aed
q = prop.wn*prop.he[0]*prop.he[1]*2.0/d
if q > 1.57:
q = 3.14-2.4649/q
alosv = (-4.343*math.log(abq_alos((math.cos(q) - math.sin(q)*1j) + r)) - alosv) * wls \
+ alosv
return alosv
def qlra(kst, klimx, mdvarx, prop, propv):
"""
This is used to prepare the model in the area prediction mode. Normally,
one first calls qlrps and then qlra. Before calling the latter, one should
have defined in the <Primary Parameters 2> the antenna heights hg, the
terrain irregularity dh, and (probably through qlrps) the variables wn,
ens, gme, and zgnd. The input kst will define siting criteria for the
terminals, klimx the climate, and mdvarx the mode of variability. If
klimx <= 0 or mdvarx < 0 the associated parameters remain unchanged.
(Section 42)
"""
prop_zgnd = prop.zgndreal + prop.zgndimag * 1j
for j in range(2):
if kst[j] <= 0:
prop.he[j] = prop.hg[j]
else:
q = 4.0
if kst[j] <> 1:
q = 9.0
if prop.hg[j] < 5.0:
q *= sin(0.3141593*prop.hg[j])
prop.he[j] = prop.hg[j] + (1.0 + q) \
*math.exp(-min(20.0,2.0*prop.hg[j]/max(1e-3,prop.dh)))
q = (2.0*prop.he[j]/prop.gme)**0.5
prop.dl[j] = q*math.exp(-0.07*(prop.dh/max(prop.he[j],5.0))**0.5)
prop.the[j] = (0.65*prop.dh*(q/prop.dl[j]-1.0)-2.0*prop.he[j])/q
prop.mdp = 1
propv.lvar = max(propv.lvar, 3)
if mdvarx >= 0:
propv.mdvar = mdvarx
propv.lvar = max(propv.lvar, 4)
if klimx > 0:
propv.klim = klimx
propv.lvar = 5
return 0
def lrprop(d, prop, propa): # // PaulM_lrprop
"""
The Longley Rice propagation program. This is the basic program it
returns the reference attenuation aref.
(Section 4)
AWC Notes
"""
global wlos, wscat
global dmin, xae
prop_zgnd = prop.zgndreal + prop.zgndimag * 1j
if prop.mdp <> 0:
for j in range(2):
propa.dls[j] = (2.0*prop.he[j]/prop.gme)**0.5
propa.dlsa = propa.dls[0] + propa.dls[1]
propa.dla = prop.dl[0] + prop.dl[1]
propa.tha = max(prop.the[0]+prop.the[1], -propa.dla*prop.gme)
wlos = False
wscat = False
if prop.wn < 0.838 or prop.wn > 210.0:
prop.kwx = max(prop.kwx, 1)
for j in range(2):
if prop.hg[j] < 1.0 or prop.hg[j] > 1000.0:
prop.kwx = max(prop.kwx, 1)
for j in range(2):
if (abs(prop.the[j]) > 200e-3
or prop.dl[j] < 0.1*propa.dls[j]
or prop.dl[j] > 3.0*propa.dls[j]):
prop.kwx = max(prop.kwx, 3)
if (prop.ens < 250.0 or prop.ens > 400.0
or prop.gme < 75e-9 or prop.gme > 250e-9
or prop_zgnd.real <= abs(prop_zgnd.imag)
or prop.wn < 0.419 or prop.wn > 420.0):
prop.kwx=4
for j in range(2):
if prop.hg[j] < 0.5 or prop.hg[j] > 3000.0:
prop.kwx=4
dmin = abs(prop.he[0] - prop.he[1])/200e-3
q = adiff(0.0, prop, propa)
xae = pow(prop.wn*pow(prop.gme, 2), -(1.0/3.0))
d3 = max(propa.dlsa, 1.3787*xae + propa.dla)
d4 = d3 + 2.7574*xae
a3 = adiff(d3, prop, propa)
a4 = adiff(d4, prop, propa)
propa.emd = (a4 - a3)/(d4 - d3)
propa.aed = a3 - propa.emd*d3
if prop.mdp >= 0:
prop.mdp = 0
prop.dist = d
if prop.dist > 0.0:
if prop.dist > 1000e3:
prop.kwx = max(prop.kwx,1)
if prop.dist < dmin:
prop.kwx = max(prop.kwx,3)
if prop.dist < 1e3 or prop.dist > 2000e3:
prop.kwx = 4
if prop.dist < propa.dlsa:
if not wlos:
q = alos(0.0, prop, propa)
d2 = propa.dlsa
a2 = propa.aed + d2*propa.emd
d0 = 1.908*prop.wn*prop.he[0]*prop.he[1]
if propa.aed >= 0.0:
d0 = min(d0, 0.5*propa.dla)
d1 = d0 + 0.25*(propa.dla-d0)
else:
d1 = max(-propa.aed/propa.emd, 0.25*propa.dla)
a1 = alos(d1, prop, propa)
wq = False
if d0 < d1:
a0 = alos(d0, prop, propa)
q = math.log(d2/d0)
propa.ak2 = max(0.0, ((d2 - d0)*(a1 - a0)-(d1 - d0)*(a2 - a0)) \
/ ((d2-d0)*math.log(d1/d0)-(d1-d0)*q))
wq = (propa.aed >= 0.0 or propa.ak2 > 0.0)
if wq:
propa.ak1 = (a2 - a0 - propa.ak2*q)/(d2 - d0)
if propa.ak1 < 0.0:
propa.ak1 = 0.0
propa.ak2 = fortran_dim(a2, a0)/q
if propa.ak2 == 0.0:
propa.ak1=propa.emd
if not wq:
propa.ak1 = fortran_dim(a2, a1)/(d2 - d1)
propa.ak2 = 0.0
if propa.ak1 == 0.0:
propa.ak1=propa.emd
propa.ael = a2 - propa.ak1*d2 - propa.ak2*math.log(d2)
wlos = True
if prop.dist > 0.0:
prop.aref = propa.ael + propa.ak1*prop.dist \
+ propa.ak2*math.log(prop.dist)
if prop.dist <= 0.0 or prop.dist >= propa.dlsa:
if not wscat:
q = ascat(0.0, prop, propa)
d5 = propa.dla + 200e3
d6 = d5+200e3
a6 = ascat(d6, prop, propa)
a5 = ascat(d5, prop, propa)
if a5 < 1000.0:
propa.ems = (a6 - a5)/200e3
propa.dx = max(propa.dlsa, max(propa.dla+0.3*xae \
*math.log(47.7*prop.wn), (a5-propa.aed-propa.ems*d5) \
/(propa.emd-propa.ems)))
propa.aes=(propa.emd-propa.ems)*propa.dx+propa.aed
else:
propa.ems = propa.emd
propa.aes = propa.aed
propa.dx = 10.e6
wscat = True
if prop.dist > propa.dx:
prop.aref = propa.aes + propa.ems*prop.dist
else:
prop.aref = propa.aed + propa.emd*prop.dist
prop.aref = max(prop.aref, 0.0)
return 0
def curve(c1, c2, x1, x2, x3, de):
"""
(Section 30)
"""
return (c1 + c2/(1.0 + pow((de - x2)/x3, 2.0)))*pow(de/x1, 2.0) \
/ (1.0 + pow(de/x1, 2.0))
def avar(zzt, zzl, zzc, prop, propv):
"""
When in the area prediction mode, one needs a threefold quantile of
attenuation which corresponds to the fraction q_T of time, the fraction
q_L of locations, and the fraction q_S of "situations." In the point to
point mode, one needs only q_T and q_S. For efficiency, avar is written as
a function of the "standard normal deviates" z_T, z_L, and z_S corresponding
to the requested fractions. Thus, for example, q_T = Q(z_T) where Q(z) is
the "complementary standard normal distribution." For the point to point
mode one sets z_L = 0 which corresponds to the median q_L = 0.50.
The subprogram is written trying to reduce duplicate calculations. This is
done through the switch lvar. On first entering, set lvar = 5. Then all
parameters will be initialized, and lvar will be changed to 0. If the
program is to be used to find several quantiles with different values of
z_T, z_L, or z_S, then lvar whould be 0, as it is. If the distance is
changed, set lvar = 1 and parameters that depend on the distance will be
recomputed. If antenna heights are changed, set lvar = 2 if the frequency,
lvar = 3 if the mode variability mdvar, set lvar = 4 and finally, if the
climate is changed, set lvar = 5. The higher the value of lvar, the more
parameters will be recomputed.
(Section 28)
"""
global kdv
global dexa, de, vmd, vs0, sgl, sgtm, sgtp, sgtd, tgtd
global gm, gp, cv1, cv2, yv1, yv2, yv3, csm1, csm2, ysm1
global ysm2, ysm3, csp1, csp2, ysp1, ysp2, ysp3, csd1, zd
global cfm1, cfm2, cfm3, cfp1, cfp2, cfp3
global ws, w1
bv1 = [-9.67,-0.62,1.26,-9.21,-0.62,-0.39,3.15]
bv2 = [12.7,9.19,15.5,9.05,9.19,2.86,857.9]
xv1 = [144.9e3,228.9e3,262.6e3,84.1e3,228.9e3,141.7e3,2222.e3]
xv2 = [190.3e3,205.2e3,185.2e3,101.1e3,205.2e3,315.9e3,164.8e3]
xv3 = [133.8e3,143.6e3,99.8e3,98.6e3,143.6e3,167.4e3,116.3e3]
bsm1 = [2.13,2.66,6.11,1.98,2.68,6.86,8.51]
bsm2 = [159.5,7.67,6.65,13.11,7.16,10.38,169.8]
xsm1 = [762.2e3,100.4e3,138.2e3,139.1e3,93.7e3,187.8e3,609.8e3]
xsm2 = [123.6e3,172.5e3,242.2e3,132.7e3,186.8e3,169.6e3,119.9e3]
xsm3 = [94.5e3,136.4e3,178.6e3,193.5e3,133.5e3,108.9e3,106.6e3]
bsp1 = [2.11,6.87,10.08,3.68,4.75,8.58,8.43]
bsp2 = [102.3,15.53,9.60,159.3,8.12,13.97,8.19]
xsp1 = [636.9e3,138.7e3,165.3e3,464.4e3,93.2e3,216.0e3,136.2e3]
xsp2 = [134.8e3,143.7e3,225.7e3,93.1e3,135.9e3,152.0e3,188.5e3]
xsp3 = [95.6e3,98.6e3,129.7e3,94.2e3,113.4e3,122.7e3,122.9e3]
bsd1 = [1.224,0.801,1.380,1.000,1.224,1.518,1.518]
bzd1 = [1.282,2.161,1.282,20.,1.282,1.282,1.282]
bfm1 = [1.0,1.0,1.0,1.0,0.92,1.0,1.0]
bfm2 = [0.0,0.0,0.0,0.0,0.25,0.0,0.0]
bfm3 = [0.0,0.0,0.0,0.0,1.77,0.0,0.0]
bfp1 = [1.0,0.93,1.0,0.93,0.93,1.0,1.0]
bfp2 = [0.0,0.31,0.0,0.19,0.31,0.0,0.0]
bfp3 = [0.0,2.00,0.0,1.79,2.00,0.0,0.0]
rt = 7.8
rl = 24.0
temp_klim = propv.klim - 1
if propv.lvar > 0:
if propv.lvar not in [1, 2, 3, 4]:
if propv.klim <= 0 or propv.klim > 7:
propv.klim = 5
temp_klim = 4
prop.kwx = max(prop.kwx,2)
cv1 = bv1[temp_klim]
cv2 = bv2[temp_klim]
yv1 = xv1[temp_klim]
yv2 = xv2[temp_klim]
yv3 = xv3[temp_klim]
csm1 = bsm1[temp_klim]
csm2 = bsm2[temp_klim]
ysm1 = xsm1[temp_klim]
ysm2 = xsm2[temp_klim]
ysm3 = xsm3[temp_klim]
csp1 = bsp1[temp_klim]
csp2 = bsp2[temp_klim]
ysp1 = xsp1[temp_klim]
ysp2 = xsp2[temp_klim]
ysp3 = xsp3[temp_klim]
csd1 = bsd1[temp_klim]
zd = bzd1[temp_klim]
cfm1 = bfm1[temp_klim]
cfm2 = bfm2[temp_klim]
cfm3 = bfm3[temp_klim]
cfp1 = bfp1[temp_klim]
cfp2 = bfp2[temp_klim]
cfp3 = bfp3[temp_klim]
if propv.lvar == 4 or propv.lvar not in [1, 2, 3, 4]:
kdv = propv.mdvar
ws = (kdv >= 20)
if ws:
kdv -= 20
w1 = (kdv >= 10)
if w1:
kdv -= 10
if kdv < 0 or kdv > 3:
kdv = 0
prop.kwx = max(prop.kwx,2)
if propv.lvar in [3, 4] or propv.lvar not in [1, 2, 3, 4]:
q = math.log(0.133*prop.wn)
gm = cfm1 + cfm2/(pow(cfm3*q, 2.0) + 1.0)
gp = cfp1 + cfp2/(pow(cfp3*q, 2.0) + 1.0)
if propv.lvar in [2, 3, 4] or propv.lvar not in [1, 2, 3, 4]:
dexa = (18.e6*prop.he[0])**0.5 + (18.e6*prop.he[1])**0.5 \
+ pow((575.7e12/prop.wn), (1./3.))
if propv.lvar in [1, 2, 3, 4] or propv.lvar not in [1, 2, 3, 4]:
if prop.dist < dexa:
de = 130.e3*prop.dist/dexa
else:
de = 130.e3+prop.dist-dexa
vmd = curve(cv1, cv2, yv1, yv2, yv3, de)
sgtm = curve(csm1,csm2,ysm1,ysm2,ysm3,de) * gm
sgtp = curve(csp1,csp2,ysp1,ysp2,ysp3,de) * gp
sgtd = sgtp*csd1
tgtd = (sgtp - sgtd)*zd
if w1:
sgl = 0.0
else:
q = (1.0 - 0.8*math.exp(-prop.dist/50.e3))*prop.dh*prop.wn
sgl = 10.0*q/(q + 13.0)
if ws:
vs0 = 0.0
else:
vs0 = pow(5.0 + 3.0*math.exp(-de/100.e3), 2.0)
propv.lvar=0
zt = zzt
zl = zzl
zc = zzc
if kdv == 0:
zt = zc
zl = zc
elif kdv == 1:
zl = zc
elif kdv == 2:
zl = zt
if abs(zt) > 3.1 or abs(zl) > 3.1 or abs(zc) > 3.1:
prop.kwx = max(prop.kwx, 1)
if zt < 0.0:
sgt = sgtm
elif zt <= zd:
sgt = sgtp
else:
sgt = sgtd + tgtd/zt
vs = vs0 + pow(sgt*zt,2.0)/(rt + zc*zc) + pow(sgl*zl, 2.0)/(rl + zc*zc)
if kdv == 0:
yr = 0.0
propv.sgc = (sgt*sgt + sgl*sgl + vs)**0.5
elif kdv == 1:
yr = sgt*zt
propv.sgc = (sgl*sgl + vs)**0.5
elif kdv == 2:
yr = zt * (sgt*sgt + sgl*sgl)**0.5
propv.sgc = vs**0.5
else:
yr = sgt*zt + sgl*zl
propv.sgc = vs**0.5
avarv = prop.aref - vmd - yr - propv.sgc*zc
if avarv < 0.0:
avarv = avarv*(29.0 - avarv)/(29.0 - 10.0*avarv)
return avarv
def hzns(pfl, prop):
"""
Here we use the terrain profile pfl to find the two horizons. Output consists
of the horizon distances dl and the horizon take-off angles the. If the path is
line-of-sight, the routine sets both horizon distances equal to dist.
(Section 47)
"""
np = int(pfl[0])
xi = pfl[1]
za = pfl[2] + prop.hg[0]
zb = pfl[np+2] + prop.hg[1]
qc = 0.5*prop.gme
q = qc*prop.dist
prop.the[1] = (zb-za)/prop.dist
prop.the[0] = prop.the[1] - q
prop.the[1] = -prop.the[1] - q
prop.dl[0] = prop.dist
prop.dl[1] = prop.dist
if np >= 2:
sa = 0.0
sb = prop.dist
wq = True
for i in range(1, np):
sa += xi
sb -= xi
q = pfl[i+2] - (qc*sa + prop.the[0])*sa - za
if q > 0.0:
prop.the[0] += q/sa
prop.dl[0] = sa
wq = False
if not wq:
q = pfl[i+2] - (qc*sb + prop.the[1])*sb - zb
if q > 0.0:
prop.the[1] += q/sb
prop.dl[1] = sb
def z1sq1 (z, x1, x2, z0, zn):
"""
A linear least squares fit between x1, x2 to the function described by the
array z. This array must have a special format: z(1) = en, the number of
equally large intervals, z(2) = epsilon, the interval length, and z(j+3),
j = 0, ..., n, function values. The output consists of values of the required
line, z0 at 0, zn at xt = n*epsilon.
(Section 53)
[Note: Changed to a function that returns z0 and zn, since Python functions
cannot return modified parameters that are immutable objects. Because of this
change, the code has been changed elsewhere, wherever z1sq1 is called. -- AWC]
"""
xn = z[0]
xa = int(fortran_dim(x1/z[1], 0.0))
xb = xn - int(fortran_dim(xn, x2/z[1]))
if xb <= xa:
xa = fortran_dim(xa, 1.0)
xb = xn - fortran_dim(xn, xb+1.0)
ja = int(xa)
jb = int(xb)
n = jb - ja
xa = xb - xa
x = -0.5*xa
xb += x
a = 0.5*(z[ja+2] + z[jb+2])
b = 0.5*(z[ja+2] - z[jb+2])*x
for i in range(2, n+1):
ja = ja + 1
x += 1.0
a += z[ja + 2]
b += z[ja + 2]*x
a /= xa
b = b*12.0/((xa*xa + 2.0)*xa)
z0 = a - b*xb
zn = a + b*(xn-xb)
return z0, zn
def qtile(nn, a, ir):
"""
This routine provides a quantile. It reorders the array a so that a(j),
j = 1...i_r are all greater than or equal to all a(i), i = i_r ... nn. In
particular, a(i_r) will have the same value it would have if a were completely
sorted in descending order. The returned value is qtile = a(i_r).
(Section 52)
"""
m = 0
n = nn
j1 = n
i0 = m
done = False
goto10 = True
m = 0
n = nn
k = min(max(0, ir), n)
q = a[k]
while not done:
if goto10:
q = a[k]
i0 = m
j1 = n
i = i0
while i <= n and a[i] >= q:
i += 1
if i > n:
i = n
j = j1
while j >= m and a[j] <= q:
j -= 1
if j < m:
j = m
if i < j:
r = a[i]
a[i] = a[j]
a[j] = r
i0 = i+1
j1 = j-1
goto10 = False
elif i < k:
a[k] = a[i]
a[i] = q
m = i+1
goto10 = True
elif j > k:
a[k] = a[j]
a[j] = q
n = j-1
goto10 = True
else:
done = True
return q
def qerf(z):
"""
The standard normal complementary probability -- the function Q(x) =
1/sqrt(2pi) int_x^inf e^(-t^2/2)dt. The approximation is
due to C. Hastings, Jr. ("Approximations for digital computers," Princeton
University Press, 1955) and the maximum error should be 7.5E-8.
(Section 50)
"""
b1 = 0.319381530
b2 = -0.356563782
b3 = 1.781477937
b4 = -1.821255987
b5 = 1.330274429
rp = 4.317008
rrt2pi = 0.398942280
x = z
t = abs(x)
if t >= 10.0:
qerfv = 0.0
else:
t = rp/(t + rp)
qerfv = math.exp(-0.5*x*x)*rrt2pi*((((b5*t + b4)*t + b3)*t + b2)*t + b1)*t
if x < 0.0:
qerfv = 1.0 - qerfv
return qerfv
def d1thx(pfl, x1, x2):
"""
Using the terrain profile pfl we find deltah, the interdecile range of
elevations between the two points x1 and x2.
(Section 48)
"""
np = int(pfl[0])
xa = x1/pfl[1]
xb = x2/pfl[1]
d1thxv = 0.0
if xb - xa < 2.0: # exit out
return d1thxv
ka = int(0.1*(xb - xa + 8.0))
ka = min(max(4, ka), 25)
n = 10*ka - 5
kb = n-ka + 1
sn = n-1
s = []
s.append(sn)
s.append(1.0)
xb = (xb - xa)/sn
k = int(xa + 1.0)
xa -= float(k)
for j in range(n):
while xa > 0.0 and k < np:
xa -= 1.0
k += 1
s.append(pfl[k+2] + (pfl[k+2] - pfl[k+1])*xa)
xa = xa + xb
xa, xb = z1sq1(s,0.0,sn,xa,xb) # Revised call to z1sq1
xb = (xb - xa)/sn
for j in range(n):
s[j+2] -= xa
xa = xa + xb
spartial = s[2:]
d1thxv = qtile(n-1, spartial, ka-1) - qtile(n-1, spartial, kb-1)
d1thxv /= 1.0 - 0.8*math.exp(-(x2 - x1)/50.0e3)
return d1thxv
def qlrpfl(pfl, klimx, mdvarx, prop, propa, propv):
"""
This subroutine may be used to prepare for the point-to-point mode. Since the
path is fixed, it has only one value of aref and therefore at the end of the
routine there is a call to lrprop. To complete the process one needs to call avar
for whatever quantiles are desired.
(Section 43)
"""
xl = []
prop.dist = pfl[0] * pfl[1]
np = int(pfl[0])
hzns(pfl, prop)
for j in range(2):
xl.append(min(15.0*prop.hg[j], 0.1*prop.dl[j]))
xl[1] = prop.dist - xl[1]
prop.dh = d1thx(pfl, xl[0], xl[1])
if prop.dl[0] + prop.dl[1] > 1.5*prop.dist:
za = 0 # Must initialize before calling z1sq1
zb = 0 # Must initialize before calling z1sq1
za, zb = z1sq1(pfl, xl[0], xl[1], za, zb) # Revised call to z1sq1
prop.he[0] = prop.hg[0] + fortran_dim(pfl[2], za)
prop.he[1] = prop.hg[1] + fortran_dim(pfl[np+2], zb)
for j in range(2):
prop.dl[j] = (2.0*prop.he[j]/prop.gme)**0.5 \
* math.exp(-0.07*(prop.dh/max(prop.he[j],5.0))**0.5)
q = prop.dl[0] + prop.dl[1]
if q <= prop.dist:
q = pow(prop.dist/q, 2.0)
for j in range(2):
prop.he[j] *= q
prop.dl[j] = (2.0*prop.he[j]/prop.gme)**0.5 \
* math.exp(-0.07*(prop.dh/max(prop.he[j],5.0))**0.5)
for j in range(2):
q = (2.0*prop.he[j]/prop.gme)**0.5
prop.the[j] = (0.65*prop.dh*(q/prop.dl[j]-1.0)-2.0 \
* prop.he[j])/q
else:
za = 0 # Must initialize before using in function call
q = 0 # Must initialize before using in function call
za, q = z1sq1(pfl, xl[0], 0.9*prop.dl[0], za, q) # Revised call to z1sq1
zb = 0 # Must initialize before using in function call
q, zb = z1sq1(pfl, prop.dist-0.9*prop.dl[1], xl[1], q, zb) # Revised call
prop.he[0] = prop.hg[0] + fortran_dim(pfl[2], za)
prop.he[1] = prop.hg[1] + fortran_dim(pfl[np+2], zb)
prop.mdp = -1
propv.lvar = max(propv.lvar, 3)
if mdvarx >= 0:
propv.mdvar = mdvarx
propv.lvar = max(propv.lvar, 4)
if klimx > 0:
propv.klim = klimx
propv.lvar = 5
lrprop(0.0, prop, propa)
return 0
def deg2rad(d):
"""
Legacy function to convert degrees to radians.
"""
return math.radians(d)
#//********************************************************
#//* Point-To-Point Mode Calculations *
#//********************************************************
def point_to_point(elev, tht_m, rht_m, eps_dielect, sgm_conductivity,
eno_ns_surfref, frq_mhz, radio_climate, pol, conf, rel,
dbloss, strmode, errnum):
## pol: 0-Horizontal, 1-Vertical
## radio_climate: 1-Equatorial, 2-Continental Subtropical, 3-Maritime Tropical,
## 4-Desert, 5-Continental Temperate, 6-Maritime Temperate, Over Land,
## 7-Maritime Temperate, Over Sea
## conf, rel: .01 to .99
## elev[]: [num points - 1], [delta dist(meters)], [height(meters) point 1], ..., [height(meters) point n]
## errnum: 0- No Error.
## 1- Warning: Some parameters are nearly out of range.
## Results should be used with caution.
## 2- Note: Default parameters have been substituted for impossible ones.
## 3- Warning: A combination of parameters is out of range.
## Results are probably invalid.
## Other- Warning: Some parameters are out of range.
## Results are probably invalid.
prop = PropType()
propv = PropvType()
propa = PropaType()
zsys = 0
prop.hg[0] = tht_m
prop.hg[1] = rht_m
propv.klim = radio_climate
prop.kwx = 0
propv.lvar = 5
prop.mdp = -1
zc = qerfi(conf)
zr = qerfi(rel)
np = int(elev[0])
eno = eno_ns_surfref
enso = 0.0
q = enso
if q <= 0.0:
ja = int(3.0 + 0.1 * elev[0])
jb = np - ja + 6
for i in range(ja-1, jb):
zsys += elev[i]
zsys /= (jb - ja + 1)
q = eno
propv.mdvar = 13 # WinnForum mod. ORIGINAL CODE HAS mdvar = 12 ***
qlrps(frq_mhz, zsys, q, pol, eps_dielect, sgm_conductivity,prop)
qlrpfl(elev, propv.klim, propv.mdvar, prop, propa, propv)
fs = 32.45 + 20.0 * math.log10(frq_mhz) + 20.0 * math.log10(prop.dist / 1000.0)
q = prop.dist - propa.dla
if int(q) < 0.0:
strmode = "Line-Of-Sight Mode"
else:
if int(q) == 0.0:
strmode = "Single Horizon"
elif int(q) > 0.0:
strmode = "Double Horizon"
if prop.dist <= propa.dlsa or prop.dist <= propa.dx:
strmode += ", Diffraction Dominant"
elif prop.dist > propa.dx:
strmode += ", Troposcatter Dominant"
dbloss = avar(zr, 0.0, zc, prop, propv) + fs
errnum = prop.kwx
return dbloss, strmode, errnum
def point_to_pointMDH(elev, tht_m, rht_m, eps_dielect, sgm_conductivity,
eno_ns_surfref, frq_mhz, radio_climate, pol, timepct,
locpct, confpct, dbloss, propmode, deltaH, errnum):
## pol: 0-Horizontal, 1-Vertical
## radio_climate: 1-Equatorial, 2-Continental Subtropical, 3-Maritime Tropical,
## 4-Desert, 5-Continental Temperate, 6-Maritime Temperate, Over Land,
## 7-Maritime Temperate, Over Sea
## timepct, locpct, confpct: .01 to .99
## elev[]: [num points - 1], [delta dist(meters)], [height(meters) point 1], ..., [height(meters) point n]
## propmode: Value Mode
## -1 mode is undefined
## 0 Line of Sight
## 5 Single Horizon, Diffraction
## 6 Single Horizon, Troposcatter
## 9 Double Horizon, Diffraction
## 10 Double Horizon, Troposcatter
## errnum: 0- No Error.
## 1- Warning: Some parameters are nearly out of range.
## Results should be used with caution.
## 2- Note: Default parameters have been substituted for impossible ones.
## 3- Warning: A combination of parameters is out of range.
## Results are probably invalid.
## Other- Warning: Some parameters are out of range.
## Results are probably invalid.
prop = PropType()
propv = PropvType()
propa = PropaType()
zsys = 0
propmode = -1 # mode is undefined
prop.hg[0] = tht_m
prop.hg[1] = rht_m
propv.klim = radio_climate
prop.kwx = 0
propv.lvar = 5
prop.mdp = -1
ztime = qerfi(timepct)
zloc = qerfi(locpct)
zconf = qerfi(confpct)
np = int(elev[0])
eno = eno_ns_surfref
enso = 0.0
q = enso
if q <= 0.0:
ja = int(3.0 + 0.1 * elev[0])
jb = np - ja + 6
for i in range(ja-1, jb):
zsys += elev[i]
zsys /= (jb - ja + 1)
q = eno
propv.mdvar = 12
qlrps(frq_mhz, zsys, q, pol, eps_dielect, sgm_conductivity, prop)
qlrpfl(elev, propv.klim, propv.mdvar, prop,propa, propv)
fs = 32.45 + 20.0 * math.log10(frq_mhz) + 20.0 * math.log10(prop.dist / 1000.0)
deltaH = prop.dh
q = prop.dist - propa.dla
if int(q) < 0.0:
propmode = 0 # Line-Of-Sight Mode
else:
if int(q) == 0.0:
propmode = 4 # Single Horizon
elif int(q) > 0.0:
propmode = 8 # Double Horizon
if prop.dist <= propa.dlsa or prop.dist <= propa.dx:
propmode += 1 # Diffraction Dominant
elif prop.dist > propa.dx:
propmode += 2 # Troposcatter Dominant
dbloss = avar(ztime, zloc, zconf, prop, propv) + fs # avar(time,location,confidence)
errnum = prop.kwx
return dbloss, propmode, deltaH, errnum
def point_to_pointDH (elev, tht_m, rht_m, eps_dielect, sgm_conductivity,
eno_ns_surfref, frq_mhz, radio_climate, pol, conf, rel,
dbloss, deltaH, errnum):
## pol: 0-Horizontal, 1-Vertical
## radio_climate: 1-Equatorial, 2-Continental Subtropical, 3-Maritime Tropical,
## 4-Desert, 5-Continental Temperate, 6-Maritime Temperate, Over Land,
## 7-Maritime Temperate, Over Sea
## conf, rel: .01 to .99
## elev[]: [num points - 1], [delta dist(meters)], [height(meters) point 1], ..., [height(meters) point n]
## errnum: 0- No Error.
## 1- Warning: Some parameters are nearly out of range.
## Results should be used with caution.
## 2- Note: Default parameters have been substituted for impossible ones.
## 3- Warning: A combination of parameters is out of range.
## Results are probably invalid.
## Other- Warning: Some parameters are out of range.
## Results are probably invalid.
prop = PropType()
propv = PropvType()
propa = PropaType()
zsys = 0
prop.hg[0] = tht_m
prop.hg[1] = rht_m
propv.klim = radio_climate
prop.kwx = 0
propv.lvar = 5
prop.mdp = -1
zc = qerfi(conf)
zr = qerfi(rel)
np = int(elev[0])
eno = eno_ns_surfref
enso = 0.0
q = enso
if q <= 0.0:
ja = int(3.0 + 0.1 * elev[0])
jb = np - ja + 6
for i in range(ja-1, jb): # (i=ja-1;i<jb;++i)
zsys += elev[i]
zsys /= (jb - ja + 1)
q = eno
propv.mdvar = 12
qlrps(frq_mhz, zsys, q, pol, eps_dielect, sgm_conductivity, prop)
qlrpfl(elev, propv.klim, propv.mdvar, prop,propa, propv)
fs = 32.45 + 20.0 * math.log10(frq_mhz) + 20.0 * math.log10(prop.dist / 1000.0)
deltaH = prop.dh
q = prop.dist - propa.dla
if int(q) < 0.0:
strmode = "Line-Of-Sight Mode"
else:
if int(q) == 0.0:
strmode = "Single Horizon"
elif int(q) > 0.0:
strmode = "Double Horizon"
if prop.dist <= propa.dlsa or prop.dist <= propa.dx:
strmode += ", Diffraction Dominant"
elif prop.dist > propa.dx:
strmode += ", Troposcatter Dominant"
dbloss = avar(zr, 0.0, zc, prop, propv) + fs # avar(time,location,confidence)
errnum = prop.kwx
return dbloss, deltaH, errnum, strmode # Original routine never returns strmode
##//********************************************************
##//* Area Mode Calculations *
##//********************************************************
def area(ModVar, deltaH, tht_m, rht_m, dist_km, TSiteCriteria, RSiteCriteria,
eps_dielect, sgm_conductivity, eno_ns_surfref, frq_mhz, radio_climate,
pol, pctTime, pctLoc, pctConf, dbloss, strmode, errnum):
## pol: 0-Horizontal, 1-Vertical
## TSiteCriteria, RSiteCriteria:
## 0 - random, 1 - careful, 2 - very careful
## radio_climate: 1-Equatorial, 2-Continental Subtropical, 3-Maritime Tropical,
## 4-Desert, 5-Continental Temperate, 6-Maritime Temperate, Over Land,
## 7-Maritime Temperate, Over Sea
## ModVar: 0 - Single: pctConf is "Time/Situation/Location", pctTime, pctLoc not used
## 1 - Individual: pctTime is "Situation/Location", pctConf is "Confidence", pctLoc not used
## 2 - Mobile: pctTime is "Time/Locations (Reliability)", pctConf is "Confidence", pctLoc not used
## 3 - Broadcast: pctTime is "Time", pctLoc is "Location", pctConf is "Confidence"
## pctTime, pctLoc, pctConf: .01 to .99
## errnum: 0- No Error.
## 1- Warning: Some parameters are nearly out of range.
## Results should be used with caution.
## 2- Note: Default parameters have been substituted for impossible ones.
## 3- Warning: A combination of parameters is out of range.
## Results are probably invalid.
## Other- Warning: Some parameters are out of range.
## Results are probably invalid.
## NOTE: strmode is not used at this time.
prop = PropType()
propv = PropvType()
propa = PropaType()
kst = [int(TSiteCriteria), int(RSiteCriteria)]
zt = qerfi(pctTime)
zl = qerfi(pctLoc)
zc = qerfi(pctConf)
eps = eps_dielect
sgm = sgm_conductivity
eno = eno_ns_surfref
prop.dh = deltaH
prop.hg[0] = tht_m
prop.hg[1] = rht_m
propv.klim = int(radio_climate)
prop.ens = eno
prop.kwx = 0
ivar = int(ModVar)
ipol = int(pol)
qlrps(frq_mhz, 0.0, eno, ipol, eps, sgm, prop)
qlra(kst, propv.klim, ivar, prop, propv)
if propv.lvar < 1:
propv.lvar = 1
lrprop(dist_km * 1000.0, prop, propa)
fs = 32.45 + 20.0 * math.log10(frq_mhz) + 20.0 * math.log10(prop.dist / 1000.0)
xlb = fs + avar(zt, zl, zc, prop, propv)
dbloss = xlb
if prop.kwx == 0:
errnum = 0
else:
errnum = prop.kwx
return dbloss, errnum
def ITMAreadBLoss(ModVar, deltaH, tht_m, rht_m, dist_km, TSiteCriteria,
RSiteCriteria, eps_dielect, sgm_conductivity, eno_ns_surfref,
frq_mhz, radio_climate, pol, pctTime, pctLoc, pctConf):
# Initialize dbloss, errnum, and strmode before using in function call
dbloss = 0.
errnum = 0
strmode = ''
dbloss, errnum = \
area(ModVar, deltaH, tht_m, rht_m, dist_km, TSiteCriteria, RSiteCriteria,
eps_dielect, sgm_conductivity, eno_ns_surfref, frq_mhz, radio_climate,
pol, pctTime, pctLoc, pctConf, dbloss, strmode, errnum)
return dbloss, errnum
def ITMDLLVersion():
return 7.0
# Test code:
p2pTest = False
p2pMDHtest = False
p2pDHtest = False
areaTest = False
def setElevation():
"""
Returns an elevation profile for testing point-to-point prop mode.
Andrew Clegg
November 2016
"""
# This is the GLOBE terrain profile from MSAM, from 39N 77W to 39N 77.5W
## return [95, 454.7352316, 89., 92., 89., 92., 100., 104., 106., 108., 106.,
## 100., 88., 80., 75., 78., 80., 80., 86., 91., 98., 105., 110., 107.,
## 103., 97., 91., 89., 92., 87., 81., 79., 77., 75., 80., 85., 89., 98.,
## 105., 107., 107., 106., 102., 105., 112., 108., 99., 84., 61., 51.,
## 74., 86., 93., 97., 100., 102., 109., 114., 116., 117., 117., 112.,
## 113., 117., 122., 129., 138., 131., 119., 103., 93., 87., 83., 86., 97.,
## 99., 103., 111., 108., 101., 97., 95., 95., 94., 90., 85., 81., 78.,
## 78., 78., 78., 79., 83., 89., 89., 91., 96., 101.]
# This is the Crystal Palace to Mursley, England path from the ITM test code
return [156, 499, 96, 84, 65, 46, 46, 46, 61, 41, 33, 27, 23,
19, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
17, 19, 21, 23, 25, 27, 29, 35, 46, 41, 35, 30, 33,
35, 37, 40, 35, 30, 51, 62, 76, 46, 46, 46, 46, 46,
46, 50, 56, 67, 106, 83, 95, 112, 137, 137, 76, 103, 122,
122, 83, 71, 61, 64, 67, 71, 74, 77, 79, 86, 91, 83,
76, 68, 63, 76, 107, 107, 107, 119, 127, 133, 135, 137, 142,
148, 152, 152, 107, 137, 104, 91, 99, 120, 152, 152, 137, 168,
168, 122, 137, 137, 170, 183, 183, 187, 194, 201, 192, 152, 152,
166, 177, 198, 156, 127, 116, 107, 104, 101, 98, 95, 103, 91,
97, 102, 107, 107, 107, 103, 98, 94, 91, 105, 122, 122, 122,
122, 122, 137, 137, 137, 137, 137, 137, 137, 137, 140, 144, 147,
150, 152, 159]
if p2pTest:
#================================
# Example of running in p2p mode
#================================
elev = setElevation()
ModVar = 3 # Broadcast
deltaH = 91.
tht_m = 10.0 # Tx height
rht_m = 10. # Rx height
TSiteCriteria = 0 # Random
RSiteCriteria = 0 # Random
eps_dielect = 15
sgm_conductivity = 0.005
eno_ns_surfref = 301
frq_mhz = 3500.0
radio_climate = 5 # Continental Temperate
pol = 0 # Vertical
rel = 0.5
conf = 0.5
# Must initialize these variables since they are passed to the function
dbloss = 0
strmode = ''
errnum = 0
a_rel = [0.01, 0.1, 0.5, 0.9, 0.99]
a_conf = [0.5, 0.9, 0.1]
for rel in a_rel:
for conf in a_conf:
dbloss, strmode, errnum = \
point_to_point(elev, tht_m, rht_m, eps_dielect, sgm_conductivity,
eno_ns_surfref, frq_mhz, radio_climate, pol, conf, rel,
dbloss, strmode, errnum)
print rel, conf, dbloss, strmode, errnum
if p2pMDHtest:
elev = setElevation()
ModVar = 3 # Broadcast
deltaH = 0.
tht_m = 10. # Tx height
rht_m = 10. # Rx height
TSiteCriteria = 0 # Random
RSiteCriteria = 0 # Random
eps_dielect = 15
sgm_conductivity = 0.005
eno_ns_surfref = 301
frq_mhz = 3500.
radio_climate = 5 # Continental Temperate
pol = 1 # Vertical
timepct = 0.5
locpct = 0.5
confpct = 0.5
# Initialize before using in function call
dbloss = propmode = deltaH = errnum = 0
dbloss, propmode, deltaH, errnum = \
point_to_pointMDH(elev, tht_m, rht_m, eps_dielect, sgm_conductivity,
eno_ns_surfref, frq_mhz, radio_climate, pol, timepct,
locpct, confpct, dbloss, propmode, deltaH, errnum)
print dbloss, propmode, deltaH, errnum
if p2pDHtest:
elev = setElevation()
ModVar = 3 # Broadcast
deltaH = 0.
tht_m = 10. # Tx height
rht_m = 10. # Rx height
TSiteCriteria = 0 # Random
RSiteCriteria = 0 # Random
eps_dielect = 15
sgm_conductivity = 0.005
eno_ns_surfref = 301
frq_mhz = 3500.
radio_climate = 5 # Continental Temperate
pol = 1 # Vertical
conf = 0.5
rel = 0.5
# Initialize before using in function call
dbloss = deltaH = errnum = 0
strmode = ''
dbloss, deltaH, errnum, strmode = \
point_to_pointDH (elev, tht_m, rht_m, eps_dielect, sgm_conductivity,
eno_ns_surfref, frq_mhz, radio_climate, pol, conf, rel,
dbloss, deltaH, errnum)
print dbloss, deltaH, errnum, strmode
if areaTest:
#================================
# Example of running in area mode
#================================
ModVar = 3 # Broadcast
deltaH = 0.
tht_m = 10. # Tx height
rht_m = 10. # Rx height
TSiteCriteria = 0 # Random
RSiteCriteria = 0 # Random
eps_dielect = 15
sgm_conductivity = 0.005
eno_ns_surfref = 301
frq_mhz = 3500.
radio_climate = 5 # Continental Temperate
pol = 1 # Vertical
pctTime = 0.5
pctLoc = 0.5
pctConf = 0.5
for dist_km in range(10, 101):
temp = ITMAreadBLoss(ModVar, deltaH, tht_m, rht_m, dist_km, TSiteCriteria,
RSiteCriteria, eps_dielect, sgm_conductivity, eno_ns_surfref,
frq_mhz, radio_climate, pol, pctTime, pctLoc, pctConf)
print dist_km, temp[0]
#==================================
| {
"repo_name": "krlinus/Spectrum-Access-System",
"path": "src/prop_current/itm.py",
"copies": "1",
"size": "49748",
"license": "apache-2.0",
"hash": 1628990296415618300,
"line_mean": 29.5953259533,
"line_max": 106,
"alpha_frac": 0.5178298625,
"autogenerated": false,
"ratio": 2.703108019995653,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3720937882495653,
"avg_score": null,
"num_lines": null
} |
# A Python port of the MS knowledge base article Q157234
# "How to deal with localized and renamed user and group names"
# http://support.microsoft.com/default.aspx?kbid=157234
import sys
from win32net import NetUserModalsGet
from win32security import LookupAccountSid
import pywintypes
from ntsecuritycon import *
def LookupAliasFromRid(TargetComputer, Rid):
# Sid is the same regardless of machine, since the well-known
# BUILTIN domain is referenced.
sid = pywintypes.SID()
sid.Initialize(SECURITY_NT_AUTHORITY, 2)
for i, r in enumerate((SECURITY_BUILTIN_DOMAIN_RID, Rid)):
sid.SetSubAuthority(i, r)
name, domain, typ = LookupAccountSid(TargetComputer, sid)
return name
def LookupUserGroupFromRid(TargetComputer, Rid):
# get the account domain Sid on the target machine
# note: if you were looking up multiple sids based on the same
# account domain, only need to call this once.
umi2 = NetUserModalsGet(TargetComputer, 2)
domain_sid = umi2['domain_id']
SubAuthorityCount = domain_sid.GetSubAuthorityCount()
# create and init new sid with acct domain Sid + acct Rid
sid = pywintypes.SID()
sid.Initialize(domain_sid.GetSidIdentifierAuthority(),
SubAuthorityCount+1)
# copy existing subauthorities from account domain Sid into
# new Sid
for i in range(SubAuthorityCount):
sid.SetSubAuthority(i, domain_sid.GetSubAuthority(i))
# append Rid to new Sid
sid.SetSubAuthority(SubAuthorityCount, Rid)
name, domain, typ = LookupAccountSid(TargetComputer, sid)
return name
def main():
if len(sys.argv) == 2:
targetComputer = sys.argv[1]
else:
targetComputer = None
name = LookupUserGroupFromRid(targetComputer, DOMAIN_USER_RID_ADMIN)
print("'Administrator' user name = %s" % (name,))
name = LookupAliasFromRid(targetComputer, DOMAIN_ALIAS_RID_ADMINS)
print("'Administrators' local group/alias name = %s" % (name,))
if __name__=='__main__':
main()
| {
"repo_name": "sserrot/champion_relationships",
"path": "venv/Lib/site-packages/win32/Demos/security/localized_names.py",
"copies": "6",
"size": "2032",
"license": "mit",
"hash": 4555211805288631000,
"line_mean": 32.3114754098,
"line_max": 72,
"alpha_frac": 0.7057086614,
"autogenerated": false,
"ratio": 3.533913043478261,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7239621704878261,
"avg_score": null,
"num_lines": null
} |
"""A python program to check the github org file.
The purpose of this file is to guard against bad changes (e.g. adding someone)
as an admin who shouldn't be an admin.
"""
import fire
import logging
import yaml
class CheckConfig(object):
def check_config(seld, config):
"""Check that the config is valid
Args:
config: Path to YAML file
"""
with open(config) as hf:
org = yaml.load(hf)
admins = org.get("orgs").get("kubeflow").get("admins")
# There should be at least some admins
if not admins:
error = "config {0} is not valid; missing orgs.kubeflow.admins".format(
config)
logging.error(error)
raise ValueError(error)
# TODO(jlewi): We should load this in via config map
# Check that each admin is in a whitelist set of admins.
allowed_admins = ["Bobgy", "chensun", "google-admin", "googlebot",
"google-oss-robot", "james-jwu", "jlewi", "k8s-ci-robot",
"theadactyl", "zijianjoy"]
for a in admins:
if not a in allowed_admins:
error = ("{0} is not in the allowed set of admins. "
"Allowed admins is {1}").format(a, ", ".join(allowed_admins))
logging.error(error)
raise ValueError(error)
logging.info("config is valid")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(message)s|%(pathname)s|%(lineno)d|'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
fire.Fire(CheckConfig)
| {
"repo_name": "kubeflow/internal-acls",
"path": "github-orgs/manifests/validate_config.py",
"copies": "1",
"size": "1599",
"license": "apache-2.0",
"hash": 4540236344257859600,
"line_mean": 29.75,
"line_max": 79,
"alpha_frac": 0.5847404628,
"autogenerated": false,
"ratio": 3.6506849315068495,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47354253943068497,
"avg_score": null,
"num_lines": null
} |
""" A python script similar to the c_rehash script from the openssl package """
### ###
### Usage: ###
### python c_rehash.py --clean --ca-path /etc/ssl/certs ###
### python c_rehash.py --ca-path /etc/ssl/certs --openssl-path /bin/openssl ###
### ###
import argparse
import glob
import io
import logging
import mmap
import os
import platform
import re
import shlex
import shutil
import subprocess
import sys
from subprocess import CalledProcessError
def symlink(src, dst):
""" platform specific symlink """
try:
if platform.system() == "Windows":
logging.debug("symlink: windows, using cp")
shutil.copy2(src, dst)
else:
logging.debug("symlink: not windows, symlinking")
os.symlink(src, dst)
except OSError:
logging.exception("Error creating the hash file: " + dst)
def runcmd(commandline):
""" run a command and return its output """
try:
lexout = shlex.split(commandline)
logging.debug("runcmd: commandline: " + commandline)
logging.debug("runcmd: lexout: " + str(lexout))
processoutput = subprocess.check_output(lexout)
return processoutput
except CalledProcessError:
logging.exception("Command Error")
def isfilex(path):
""" is file exectuable """
logging.debug("isfilex: path: " + path)
return os.path.isfile(path) and os.access(path, os.X_OK)
def isvalidcertificate(fname):
""" does fname contain the certificate header and footer """
logging.debug("isvalidcertificate: fname: " + fname)
infile = io.open(fname, "r+")
buf = mmap.mmap(infile.fileno(), 0, access=mmap.ACCESS_READ)
if not (buf.find('-----BEGIN CERTIFICATE-----') == -1 or
buf.find('-----END CERTIFICATE-----') == -1):
logging.debug("isvalidcertificate: valid certificate")
return True
logging.debug("isvalidcertificate: invalid certificate")
return False
def isvalidcrl(fname):
""" does fname contain the crl header and footer """
logging.debug("isvalidcrl: fname: " + fname)
infile = io.open(fname, "r+")
buf = mmap.mmap(infile.fileno(), 0, access=mmap.ACCESS_READ)
if not (buf.find('-----BEGIN X509 CRL-----') == -1 or
buf.find('-----END X509 CRL-----') == -1):
logging.debug("isvalidcrl: valid crl")
return True
logging.debug("isvalidcrl: invalid crl")
return False
def rehash(inargs, root, fname, path, iscertificate):
""" handle collisions and rehash the certificate or crl as required """
if iscertificate:
cparam = 'x509'
else:
cparam = 'crl'
chash = runcmd(inargs.opath +
' %s -hash -noout -in %s' %(cparam, path))
chash = str(chash).strip()
logging.debug("rehash: chash: " + chash)
maxsuffix = -1
duplicatefile = False
hashfiles = glob.glob(os.path.join(path, chash + '*'))
if hashfiles:
#process for hash collisions here
hashfiles.sort()
logging.debug("rehash: collisions: " + str(hashfiles))
cfprint = runcmd(inargs.opath +
' %s -fingerprint -noout -in %s' %(cparam, path))
cfprint = str(cfprint).strip()
logging.debug("rehash: cfprint: " + cfprint)
for exfile in hashfiles:
efprint = runcmd(inargs.opath +
' %s -fingerprint -noout -in %s' %(cparam, path))
efprint = str(efprint).strip()
logging.debug("rehash: efprint: " + efprint)
if cfprint == efprint:
logging.debug("rehash: duplicate certificate")
duplicatefile = True
break
else:
maxsuffix = int(os.path.splitext(exfile)[1][1:])
maxsuffix = maxsuffix + 1
if not iscertificate:
#the symlinks to CRL files are of the format <hash>.rX
maxsuffix = 'r' + str(maxsuffix)
if not duplicatefile:
# ln -s <path> <hash>.0
src = fname
dst = os.path.join(root, chash) + '.' + str(maxsuffix)
logging.debug("creating symlink src: " + src + " dst: " + dst)
symlink(src, dst)
def walkdirectory(inargs):
""" walk through the cpath and perform the required actions """
hexpattern = re.compile(r'^[\da-f]+\.r{0,1}\d+$')
for root, _, files in os.walk(inargs.cpath):
for fname in files:
path = os.path.join(root, fname)
logging.info("processing " + path + "...")
hexfname = re.search(hexpattern, fname)
if inargs.clean == True:
# if cleanup is specified, unlink the <hash>.[r]X files
if (os.path.islink(path) or hexfname):
logging.debug("walkdirectory: unlinking " + path)
os.unlink(path)
continue
elif (isvalidcertificate(path) and not hexfname):
rehash(inargs, root, fname, path, True)
elif (isvalidcrl(path) and not hexfname):
rehash(inargs, root, fname, path, False)
def main():
""" the main method """
parser = argparse.ArgumentParser(description="""Run through the root
certificates and CRLs in the
CApath and create the
corresponding <hash>.[r]X
symlinks.""")
parser.add_argument('--openssl-path',
dest='opath',
required=True,
help='path to the openssl binary to use',
metavar='/path/to/openssl')
parser.add_argument('--ca-path',
dest='cpath',
required=True,
help='path to the root certificates and CRLs',
metavar='/path/to/rootcertificates')
parser.add_argument("--clean",
dest="clean",
action='store_true',
help="when specified unlinks existing symlinks")
args = parser.parse_args()
#
# validate the input
#
logging.debug("opath: " + args.opath)
logging.debug("cpath: " + args.cpath)
logging.debug("clean: " + str(args.clean))
if not isfilex(args.opath):
errmsg = "Input Error: %s is not a file or executable" %args.cpath
logging.exception(errmsg)
sys.exit(1)
if not os.path.exists(args.cpath) and not os.path.isdir(args.cpath):
errmsg = "Input Error: %s does not exist or not a directory" %args.cpath
logging.exception(errmsg)
sys.exit(1)
versionstr = runcmd(args.opath + " version")
logging.info("using " + versionstr)
walkdirectory(args)
if __name__ == "__main__":
try:
logging.basicConfig(level=logging.INFO)
main()
except Exception, ex:
logging.exception("Unhandled Exception")
sys.exit(1)
| {
"repo_name": "funcpointer/c_rehash.py",
"path": "c_rehash.py",
"copies": "1",
"size": "7285",
"license": "mit",
"hash": 1583515238473610800,
"line_mean": 31.2345132743,
"line_max": 80,
"alpha_frac": 0.5433081675,
"autogenerated": false,
"ratio": 4.094997189432266,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005631956082295251,
"num_lines": 226
} |
"""A Python script to analyze NIRSPEC data. Eventually it should be
a function (e.g. FGD's ultimate_automation), but for now it's just a
script.
This routine takes a set of NIRSPEC high-resolution data files and
uses IRAF and homebrew Python to extract meaningful spectral information.
Other routines will be used for manipulation of the data.
This will probably only run on Unix/Linux/Mac OSX platforms.
This to check on a new system:
1) You may want need to edit the default PYFITS.WRITETO function to
add an 'output_verify' keyword.
2) PyRAF's "apnormalize" routines contain a parameter file called
"apnorm1.par", which contains references to "apnorm.background" --
these must all be changed to "apnormalize.background"
Other notes:
1) If the spectral tracing keeps crashing ("Trace of aperture N lost
at line X"), try fiddling with the minsep/maxsep parameters.
2008-06-10 21:28 IJC: Created.
2008-07-22 15:04 IJC: Split up "procData" into "procCal" and "procTarg"
2008-07-25 16:19 IJC: Finished initial version; renamed ns_reduce
2008-11-25 15:29 IJC: Added fix_quadnoise step and individual frame
cosmic ray rejection.
2008-12-05 15:48 IJC: Switched to linear wavelength interpolation,
since this will simplify things for LSD
2008-12-16 17:12 IJC: Trying it for a second dataset
2009-04-28 10:03 IJC: Beginning to add the L-band data interface.
Updated interface to make better use of
nsdata.initobs. Flat field is now padded on
both sides for order-tracing.
2009-07-09 17:32 IJC: Switched pyfits.writeto calls to use 'output_verify=ignore'
2010-09-06 10:43 IJC: Added H-filter option to horizsamp; added
cleanec option to preprocess calls.
2012-04-04 15:05 IJMC: E puor si muove! Added flat_threshold option;
subtly changed a few options to new
defaults. Set shift=0 in calls to ecreidenfity.
2014-12-17 14:17 IJMC: Added new troubleshooting & alternative
flat-normalization approaches, since PyRAF's
apnormalize continues to give me trouble.
2016-10-15 02:09 IJMC: Trying it for ARIES. This script has been
around for a little while!
2016-10-18 13:50 IJMC: Now apply ARIES quad-detector crosstalk correction
2017-10-11 16:41 IJMC: Handing this off to Nicholas Mehrle. Good luck!
"""
import os, sys, shutil
from pyraf import iraf as ir
from scipy import interpolate, isnan, isinf
try:
from astropy.io import fits as pyfits
except:
import pyfits
import nsdata as ns
import spec
import numpy as ny
from pylab import find
import pdb
### Define some startup variables that could go in a GUI someday
data = '2016oct15' # GX And
data = '2016oct15b' # WASP-33
data = '2016oct19' # WASP-33
data = '2016oct20b' # WASP-33
local = True
makeDark = True
makeFlat = True
makeMask = True
processCal = True
processTarg = True
verbose = True
interactive = True
dispersion = 0.075 # Resampled dispersion, in angstroms per pixel (approximate)
flat_threshold = 500
dir0 = os.getcwd()
if local:
_iraf = ns._home + "/iraf/"
else:
_iraf = ns._home + "/atwork/iraf/"
# Eventually, get all initializations from initobs:
print data
obs = ns.initobs(data, remote=(not local))
_proc = obs[1]
_raw = obs[8]
n_ap = obs[14] # number of apertures (i.e., echelle orders)
filter = obs[15] # photometric band in which we're operating
prefn = obs[16] # filename prefix
calnod = obs[17] # whether A0V calibrators nod, or not
procData = processCal or processTarg
badval = 0
ir.task(bfixpix = _iraf+"bfixpix.cl")
ir.task(bfixpix_one = _iraf+"bfixpix_one.cl")
#ir.load('fitsutil')
ir.load('noao')
ir.load('astutil')
ir.load("imred")
ir.load('echelle')
ir.load('twodspec')
ir.load('apextract')
telluric_list = ns._home + '/proj/pcsa/data/atmo/telluric_hr_' + filter + '.dat'
if filter=='K' or filter=='H':
horizsamp = "10:500 550:995"
elif filter=='L':
horizsamp = "10:270 440:500 550:980"
elif filter=='Karies':
horizsamp = "10:995"
if filter=='Karies':
observ = 'flwo'
itime = 'exptime'
date = 'UTSTART'
time = None
dofix = True
t_width = 15.
trace_step = 10
trace_order = 3
quadcorrect = True # Correct for detector crosstalk
else:
observ = 'keck'
itime = 'itime'
date = 'date-obs'
time = 'UTC'
dofix = True
t_width = 115.
trace_step = 50
trace_order = 7
quadcorrect = False # Correct for detector crosstalk
if filter=='K':
cleanec = True
cleancr = False
qfix = True
csigma=25
cthreshold=400
rratio = 5
rthreshold = 300
elif filter=='H':
cleanec = False
cleancr = True
csigma=30
cthreshold=900
qfix = False
rratio = 5
rthreshold = 300
elif filter=='L':
cleanec = True
cleancr = False
qfix = True
csigma=25
cthreshold=400
rratio = 5
rthreshold = 300
elif filter=='Karies':
cleanec = True
cleancr = False
qfix = 'aries'
csigma=25
cthreshold=400
rratio = 5
rthreshold = 300
else:
qfix = True
bsamp = "-18:-10,10:18"
bfunc = 'chebyshev'
bord = 3 # background subtraction function order
idlexec = os.popen('which idl').read().strip()
postfn = ".fits"
maskfn = ".pl"
_sflat = _proc + prefn + "_flat"
_sflats = _proc + prefn + "_flat_sig"
_sflatdc = _proc + prefn + "_flatd"
_sflatdcn = _proc + prefn + "_flatdn"
_sdark = _proc + prefn + "_dark"
_sdarks = _proc + prefn + "_dark_sig"
_mask1 = _proc + prefn + "_badpixelmask1" + maskfn
_mask2 = _proc + prefn + "_badpixelmask2" + maskfn
_mask3 = _proc + prefn + "_badpixelmask3" + maskfn
_mask = _proc + prefn + "_badpixelmask" + maskfn
_fmask = _proc + prefn + "_flatpixelmask" + maskfn
_dmask = _proc + prefn + "_darkpixelmask" + postfn
_wldat = 'ec'
rawdark = ns.strl2f(_proc+'rawdark', obs[9], clobber=True)
rawflat_list = obs[10] #ns.strl2f(_proc+'rawflat', obs[10], clobber=True)
procflat_list = [el.replace(_raw, _proc) for el in obs[10]]
procflat = ns.strl2f(_proc+'procflat', procflat_list, clobber=True)
rawcal = ns.strl2f(_proc+'rawcal', obs[11][0], clobber=True)
proccal = ns.strl2f(_proc+'proccal', obs[11][1], clobber=True)
rawtarg = ns.strl2f(_proc+'rawtarg', obs[12][0], clobber=True)
proctarg = ns.strl2f(_proc+'proctarg', obs[12][1], clobber=True)
speccal = ns.strl2f(_proc+'speccal', obs[13][0], clobber=True)
spectarg = ns.strl2f(_proc+'spectarg', obs[13][1], clobber=True)
meancal = prefn + 'avgcal'
ir.unlearn('ccdproc')
ir.unlearn('imcombine')
ir.unlearn('echelle')
# Set parameters for aperture tracing, flat-field normalizing, etc.
ir.apextract.dispaxis = 1
ir.echelle.dispaxis = 1
ir.echelle.apedit.width = t_width
ir.echelle.apfind.minsep = 10.
ir.echelle.apfind.maxsep = 150.
ir.echelle.apfind.nfind = n_ap
ir.echelle.apfind.recenter = "Yes"
ir.echelle.apfind.nsum = -3
ir.apall.ylevel = "INDEF" #0.05
ir.apall.bkg = "Yes"
ir.apall.ulimit = 2
ir.apall.llimit = -2
ir.aptrace.order = trace_order
ir.aptrace.niterate = 3
ir.aptrace.step = trace_step
ir.aptrace.naverage = 1
ir.aptrace.nlost = 999
ir.aptrace.recenter = "yes"
# Set detector properties:
gain = 4.0 # photons (i.e., electrons) per data unit
readnoise = 10.0 # photons (i.e., electrons)
ir.imcombine.gain = gain
ir.imcombine.rdnoise = readnoise
ir.apall.gain = gain
ir.apall.readnoise = readnoise
ir.apnormalize.gain = gain
ir.apnormalize.readnoise = readnoise
ir.set(observatory=observ)
# Combine dark frames into a single dark frame:
if makeDark:
ir.imdelete(_sdark)
ir.imdelete(_sdarks)
print "rawdark file list>>" + rawdark
ir.imcombine("@"+rawdark, output=_sdark, combine="average",reject="avsigclip", sigmas=_sdarks, scale="none", weight="none", bpmasks="")
ns.write_exptime(_sdark, itime=itime)
if makeFlat: # 2008-06-04 09:21 IJC: dark-correct flats; then create super-flat
ir.imdelete(_sflat)
##ir.imdelete(_sflats)
ir.imdelete(_sflatdc)
#ns.correct_aries_crosstalk("@"+_proc +'rawflat', output='@'+_proc + 'procflat')
ns.correct_aries_crosstalk(rawflat_list, output=procflat_list)
# 2008-06-04 08:42 IJC: Scale and combine the flats appropriately (as lamp is warming up, flux changes)
ir.imcombine("@"+procflat, output=_sflat, combine="average",reject="crreject", scale="median", weight="median", bpmasks="") # sigmas=_sflats
ns.write_exptime(_sflat, itime=itime)
print _sflat, _sdark
ir.ccdproc(_sflat, output=_sflatdc, ccdtype="", fixpix="no", overscan="no",trim="no",zerocor="no",darkcor="yes",flatcor="no", dark=_sdark)
if verbose: print "Done making flat frame!"
ir.imdelete(_sflatdcn)
ir.imdelete(_sflatdcn+'big')
# ------------------------------------
# Do some FITS-file gymnastics to allow all 6 orders to be traced
# ------------------------------------
flatdat = pyfits.getdata( _sflatdc+postfn)
flathdr = pyfits.getheader(_sflatdc+postfn)
n_big = 1400
pad = (n_big-1024)/2
bigflat = ny.zeros([n_big,1024])
bigflat[pad:(pad+1024),:] = flatdat
pyfits.writeto(_sflatdc+'big'+postfn, bigflat, flathdr, clobber=True, output_verify='warn')
# Create normalized flat frame (remove continuum lamp profile)
# ir.apnorm1.background = ")apnormalize.background"
# ir.apnorm1.skybox = ")apnormalize.skybox"
# ir.apnorm1.weights = ")apnormalize.weights"
# ir.apnorm1.pfit = ")apnormalize.pfit"
# ir.apnorm1.saturation = ")apnormalize.saturation"
# ir.apnorm1.readnoise = ")apnormalize.readnoise"
# ir.apnorm1.gain = ")apnormalize.gain"
# ir.apnorm1.lsigma = ")apnormalize.lsigma"
# ir.apnorm1.usigma = ")apnormalize.usigma"
# ir.apnorm1.clean = ")apnormalize.clean"
if True: # the old, IRAF way:
#ir.apnormalize(_sflatdc+'big', _sflatdcn+'big', sample=horizsamp, niterate=1, threshold=flat_threshold, function="spline3", pfit = "fit1d", clean='yes', cennorm='no', recenter='yes', resize='yes', edit='yes', trace='yes', weights='variance', fittrace='yes', interactive=interactive, background='fit', order=3)
ir.apflatten(_sflatdc+'big', _sflatdcn+'big', sample=horizsamp, niterate=1, threshold=flat_threshold, function="spline3", pfit = "fit1d", clean='yes', recenter='yes', resize='yes', edit='yes', trace='yes', fittrace='yes', interactive=interactive, order=3)
else:
mudflat = pyfits.getdata(_sflatdc + 'big.fits')
mudhdr = pyfits.getheader(_sflatdc + 'big.fits')
trace = spec.traceorders(_sflatdc + 'big.fits', pord=2, nord=ir.aptrace.order, g=gain, rn=readnoise, fitwidth=100)
normflat = spec.normalizeSpecFlat(mudflat*gain, nspec=ir.aptrace.order, traces=trace)
pyfits.writeto(_sflatdcn + 'big.fits', normflat, header=mudhdr, output_verify='warn')
normflatdat = pyfits.getdata( _sflatdcn+'big'+postfn)
normflathdr = pyfits.getheader(_sflatdcn+'big'+postfn)
smallnormflat = normflatdat[pad:(pad+1024),:]
smallnormflat[smallnormflat==0] = 1.
pyfits.writeto(_sflatdcn+postfn, smallnormflat, normflathdr, clobber=True, output_verify='warn')
if verbose: print "Done making dark frame!"
if makeMask:
if verbose:
print "Beginning to make bad pixel masks..."
# iterate through the superflat 3 times to get bad pixels, then
# construct a super-bad pixel map.
ir.load('crutil')
ir.imdelete(_mask)
ir.imdelete(_fmask)
ir.imdelete(_dmask)
ir.imdelete(_mask.replace(maskfn, postfn))
ir.imdelete(_fmask.replace(maskfn, postfn))
ir.imdelete(_dmask.replace(postfn, maskfn))
ir.delete('blah.fits')
ir.delete('blahneg.fits')
#ir.cosmicrays(_sflatdc, 'blah', crmasks=_mask1, threshold=750, npasses=7q
# , \
# interactive=False) #interactive)
ns.cleanec(_sflatdc, 'blah', npasses=5, clobber=True, badmask=_mask1.replace(maskfn, postfn))
#ir.imcopy(_mask1, _mask1.replace(maskfn, postfn))
#pyfits.writeto(_mask1, ny.zeros(pyfits.getdata(_sflatdc+postfn).shape, dtype=int), clobber=True)
pyfits.writeto(_sflatdc+'neg', 0. - pyfits.getdata(_sflatdc+postfn), clobber=True)
#ir.cosmicrays(_sflatdc+'neg', 'blahneg', crmasks=_mask2, threshold=750, npasses=7) #, \
# interactive=interactive)
ns.cleanec(_sflatdc+'neg', 'blahneg', npasses=5, clobber=True, badmask=_mask2.replace(maskfn, postfn))
#pyfits.writeto(_mask2, ny.zeros(pyfits.getdata(_sflatdc+postfn).shape, dtype=int), clobber=True)
# create a final binary mask from the 2 masks:
#ir.imcalc(_mask1+","+_mask2, _fmask, "im1||im2")
pyfits.writeto(_fmask.replace(maskfn, postfn), ny.logical_or(pyfits.getdata(_mask1.replace(maskfn, postfn)), pyfits.getdata(_mask2.replace(maskfn, postfn))).astype(int), clobber=True)
#ir.imcopy(_fmask.replace(maskfn, postfn), _fmask)
# clean up after myself:
ir.imdelete(_mask1+','+_mask2+','+_sflatdc+'neg,blah,blahneg')
# Examine the dark frames for highly variable pixels:
ns.darkbpmap(obs[9], clipsigma=5, sigma=10, writeto=_dmask, clobber=True, verbose=verbose, outtype=float)
#pyfits.writeto(_dmask, ny.zeros(pyfits.getdata(_sflatdc+postfn).shape, dtype=int), clobber=True)
try:
ir.imcopy(_dmask, _dmask.replace(postfn, maskfn))
except:
print "couldn't imcopy " + _dmask
# Combine the flat-field- and dark-frame-derived pixel masks:
#ir.imcalc(_fmask+","+_dmask, _mask, "im1||im2")
pyfits.writeto(_mask.replace(maskfn, postfn), ny.logical_or(pyfits.getdata(_fmask.replace(maskfn, postfn)), pyfits.getdata(_dmask)).astype(float), clobber=True)
ir.imcopy(_mask.replace(maskfn, postfn), _mask)
if verbose: print "Done making bad pixel mask!"
if procData:
os.chdir(_proc)
ir.chdir(_proc)
if processCal:
# Add 'exptime' header to all cal, target, and lamp files:
ns.write_exptime(rawcal, itime=itime)
#ns.write_exptime(rawlamp)
# Correct for bad pixels and normalize all the frames by the flat field
ir.load('crutil')
ns.preprocess('@'+rawcal, '@'+proccal, qfix=qfix,
qpref='', flat=_sflatdcn, mask=_mask.replace(maskfn, postfn),
cleanec=cleanec, clobber=True, verbose=verbose,
csigma=csigma, cthreshold=cthreshold,
cleancr=cleancr, rthreshold=rthreshold, rratio=rratio, date=date, time=time, dofix=dofix)
if verbose: print "Done correcting cal frames for bad pixels and flat-fielding!"
# Extract raw spectral data from the echelle images
ir.imdelete('@'+speccal)
ir.apall('@'+proccal, output='@'+speccal, format='echelle', recenter='yes',resize='yes',extras='yes', nfind=n_ap, nsubaps=1, minsep=10, weights='variance', bkg='yes', b_function=bfunc, b_order=bord, b_sample=bsamp, b_naverage=-3, b_niterate=2, t_order=3, t_sample=horizsamp, t_niterate=3, t_naverage=3, background='fit', clean='yes', interactive=interactive, nsum=-10, t_function='chebyshev')
if verbose: print "Done extracting spectra from cal stars!"
ir.imdelete(meancal)
if calnod:
shutil.copyfile(obs[13][0][0]+postfn, meancal+postfn)
else:
ir.imcombine('@'+speccal, meancal, combine='average', reject='avsigclip', weight='median')
# Construct wavelength solution; apply to all observations.
print "First identify lines in each of SEVERAL ORDERS using 'm'. After this, use 'l' to fit dispersion solution. Maybe then it can find more lines automatically. Then, use 'f' to fit a dispersion function. Then use 'o' and set the order offset to 38 (in standard K-band NIRSPEC mode)"
sys.stdout.flush()
ir.ecidentify(meancal, database=_wldat, coordlist=telluric_list, ftype='absorption', fwidth='10', niterate=3, low=5, high=5, xorder=3, yorder=3)
disp_soln = ns.getdisp(_wldat + os.sep + 'ec' + meancal)
if disp_soln[1]==[32,37]:
w = ns.dispeval(disp_soln[0], disp_soln[1], disp_soln[2], shift=disp_soln[3])
w = w[::-1]
hdr = pyfits.getheader(meancal+postfn)
pyfits.writeto('wmc'+postfn, w, hdr, clobber=True, output_verify='ignore')
else:
print 'Wrong aperture order numbers calculated -- fit is suspect. ' + \
'Press enter to continue.'
if interactive:
raw_input()
w = ns.dispeval(disp_soln[0], disp_soln[1], disp_soln[2], shift=disp_soln[3])
w = w[::-1]
hdr = pyfits.getheader(meancal+postfn)
pyfits.writeto('wmc'+postfn, w, hdr, clobber=True, output_verify='ignore')
w_interp = ns.wl_grid(w, dispersion, method='linear')
#w_interp = w_interp[ny.argsort(w_interp.mean(1))]
hdr_interp = pyfits.getheader(meancal+postfn)
pyfits.writeto('winterp'+postfn, w_interp, hdr_interp, clobber=True, output_verify='ignore')
ns.interp_spec(meancal, w, w_interp, k=3.0, suffix='int', badval=badval, clobber=True)
# Sample each aperture so that they all have equal pixel widths
# and equal wavelength coverage:
ir.ecreidentify('@'+speccal, meancal, database=_wldat, refit='no', cradius=10., shift=0)
filelist = open(speccal)
for line in filelist:
filename = line.strip()
disp_new = ns.getdisp(_wldat+'/ec' + filename)
w_new = ns.dispeval(disp_new[0], disp_new[1], disp_new[2], shift=disp_new[3])
w_new = w_new[::-1]
ns.interp_spec(filename, w_new, w_interp, k=3.0, suffix='int', badval=badval, clobber=True)
filelist.close()
##########################################
if processTarg:
ns.write_exptime(rawtarg, itime=itime)
ns.preprocess('@'+rawtarg, '@'+proctarg, qfix=qfix,
qpref='', flat=_sflatdcn, mask=_mask.replace(maskfn, postfn),
cleanec=cleanec, clobber=True, verbose=verbose,
csigma=csigma, cthreshold=cthreshold,
cleancr=cleancr, rthreshold=rthreshold, rratio=rratio, date=date, time=time, dofix=dofix)
if verbose: print "Done correcting targ frames for bad pixels and flat-fielding!"
ir.imdelete('@'+spectarg)
ir.apall('@'+proctarg, output='@'+spectarg, format='echelle', recenter='yes',resize='yes',extras='yes', nfind=n_ap, nsubaps=1, minsep=10, bkg='yes', b_function=bfunc, b_order=bord, b_sample=bsamp, b_naverage=-3, b_niterate=2, t_order=3, t_sample=horizsamp, t_niterate=3, t_naverage=3, background='fit', clean='yes', interactive=interactive, nsum=-10, t_function='chebyshev')
if verbose: print "Done extracting spectra from target stars!"
# Sample each aperture so that they all have equal pixel widths
# and equal logarithmic wavelength coverage:
ir.ecreidentify('@'+spectarg, meancal, database=_wldat, refit='no', shift=0)
disp_soln = ns.getdisp(_wldat + os.sep + 'ec' + meancal)
if disp_soln[1]==[32,37]:
w = ns.dispeval(disp_soln[0], disp_soln[1], disp_soln[2], shift=disp_soln[3])
w = w[::-1]
else:
raw_input('Wrong aperture order numbers calculated -- fit is suspect.'
' Press enter to continue.')
w = ns.dispeval(disp_soln[0], disp_soln[1], disp_soln[2], shift=disp_soln[3])
w = w[::-1]
#w_interp = ns.wl_grid(w, dispersion, method='linear')
w_interp = pyfits.getdata('winterp.fits')
hdr_interp = pyfits.getheader(meancal+postfn)
filelist = open(spectarg)
for line in filelist:
filename = line.strip()
disp_new = ns.getdisp(_wldat+'/ec' + filename)
w_new = ns.dispeval(disp_new[0], disp_new[1], disp_new[2], shift=disp_new[3])
w_new = w_new[::-1]
ns.interp_spec(filename, w_new, w_interp, k=3.0, suffix='int', badval=badval, clobber=True, verbose=verbose)
filelist.close()
# Write target and Mean Standard to text files for telluric correction:
ns.wspectext(filename + 'int', wlsort=True)
ns.wspectext(meancal + 'int', wlsort=True)
print 'Instructions for IDL XTELLCOR:\n'
print 'Std Spectra is: ' + meancal
print 'Obj Spectra is: ' + filename
print 'Units need to be set to Angstroms! Remove the 2.166 um feature. '
print 'Make sure to get the velocity shift correction correctly.'
print 'At the end, make sure you write out both Telluric and A0V files.'
sys.stdout.flush()
os.system('cd ' + _proc + '\n' + idlexec + ' -e xtellcor_general')
# Get telluric filename:
_telluric = ''
while (not os.path.isfile(_telluric)) and _telluric<>'q':
temp = os.listdir('.')
print('\n\nEnter the telluric filename (q to quit); path is unnecessary if\n '
' you saved it in the processed-data directory. Local possibilities:')
for element in temp:
if element.find('tellspec')>-1: print element
_telluric = raw_input('Filename: ')
if _telluric=='q':
pass
else:
# Read telluric file; put in the right format.
objspec_telcor = ny.loadtxt(_telluric.replace('_tellspec', ''))
objspec_raw = ny.loadtxt(filename + 'int.dat')
infile = open(_telluric, 'r')
data = [map(float,line.split()) for line in infile]
infile.close()
n = len(data)
data = ny.array(data).ravel().reshape(n, 3)
telluric = data.transpose().reshape(3, n_ap, n/n_ap)
telluric = telluric[1:3,:,:]
tl_shape = telluric.shape
telluric = telluric.ravel()
nanind = find(isnan(telluric))
infind = find(isinf(telluric))
ind = ny.concatenate((nanind, infind))
telluric[ind] = badval
telluric = telluric.reshape(tl_shape)
telluric2 = objspec_raw[:,1] / objspec_telcor[:,1]
telluric2_err = telluric2 * ny.sqrt((objspec_raw[:,2]/objspec_raw[:,1])**2 + (objspec_telcor[:,2]/objspec_telcor[:,1])**2)
telluric2_err[np.logical_not(np.isfinite(telluric2))] = badval
telluric2[np.logical_not(np.isfinite(telluric2))] = badval
telluric2_err /= np.median(telluric2)
telluric2 /= np.median(telluric2)
invtelluric3 = np.vstack((telluric2, telluric2_err)).reshape(tl_shape)
tel_scalefac = np.median(telluric)
telluric = telluric / tel_scalefac
# Divide all target frames by the telluric corrector:
filelist = open(spectarg)
for line in filelist:
filename = line.strip() + 'int'
hdr = pyfits.getheader(filename + postfn)
data = pyfits.getdata( filename + postfn)
data = data[ [0,-2], ::-1, :]
newdata = ny.zeros(data.shape)
newspec = data[0,:,:] * telluric[0,:,:]
ns_shape = newspec.shape
tempdata = newspec.ravel()
nanind = find(isnan(tempdata))
infind = find(isinf(tempdata))
ind = ny.concatenate((nanind, infind))
tempdata[ind] = badval
newspec = tempdata.reshape(ns_shape)
newerr = newspec * ny.sqrt((data[1,:,:]/data[0,:,:])**2 + (telluric[1,:,:]/telluric[0,:,:])**2)
newdata[0,:,:] = newspec;
newdata[1,:,:] = newerr
hdr.update('TELLURIC', 'Telluric-corrected with file ' + _telluric)
pyfits.writeto(filename + 'tel' + postfn, newdata[:,::-1], header=hdr, clobber=True, output_verify='ignore')
filelist.close()
os.chdir(dir0)
print "... and we're done!"
| {
"repo_name": "iancrossfield/aries_reduce",
"path": "aries_reduce.py",
"copies": "1",
"size": "24269",
"license": "mit",
"hash": 5501377261144136000,
"line_mean": 38.5260586319,
"line_max": 400,
"alpha_frac": 0.6268902715,
"autogenerated": false,
"ratio": 3.1150044923629827,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9167022132477536,
"avg_score": 0.014974526277089265,
"num_lines": 614
} |
# A Python script to automate th process of sending multiple e-mails using GMAIL.
# THIS SCRIPT SHOULD BE RUN FROM THE COMMAND LINE.
import os,re,smtplib,getpass
# 'regTester' is a function defined to extract "E-MAIL","SUBJECT","MESSAGE" from the text file named 'gmail.txt' .
# All the E-Mail addresses will be stored in "emailList".
# All the SUBJECT will be stored in "subjectList".
# All the MESSAGEs will be stored in "messagesDICT".
def regTester(flag,mail):
mailReg=re.compile(r'E-MAIL =(\s*)?(.*)\n')
messageReg1=re.compile(r'MESSAGE =(\s*)?(.*\n)')
messageReg2=re.compile(r'SUBJECT =(\s*)?(.*)\n')
emailList=[]
messagesDict={}
message=[]
subjectList=[]
string=''
k=0
for i in range(len(list(mail))):
if flag==0:
m=mailReg.search(mail[i])
elif flag==1:
m=messageReg2.search(mail[i])
else:
m=messageReg1.search(mail[i])
try:
if not m.group()==False:
if flag==0:
emailList.append(m.group(2))
elif flag==1:
subjectList.append(m.group(2))
else:
message.append(m.group(2))
j=i+1
while j<len(list(mail)):
if mail[j].startswith('---------'):
break
elif mail[j].startswith('E-MAIL ='):
break
else:
message.append(mail[j])
j=j+1
for z in message:
string+=z
message=[]
messagesDict[k]=str(string)
k+=1
string=''
except:
continue
if flag==0:
return(emailList)
elif flag==1:
return(subjectList)
else:
return(messagesDict)
# "NoOfEmails" store the total number of e-mails to be sent.
print('Fetching all the e-mails to be sent. . . .\n')
mailFile=open('gmail.txt','r')
mail=mailFile.readlines()
emailList=regTester(0,mail)
subjectList=regTester(1,mail)
messagesDict=regTester(2,mail)
NoOfEmails=len(emailList)
print('Total number of e-mails found =%s\n'%NoOfEmails)
# Prompts the user for e-mail ID and password.
# The password will not be displayed when the user is entering it.
print('Enter your e-mail id :=\n')
emailId=str(input())
pswd=getpass.getpass('\nEnter your password ("Dont worry it wont be displayed as you enter") :-\n')
#smtpobj createas a SMTP object that is used for sending emails.
smtpobj=smtplib.SMTP_SSL('smtp.gmail.com', 465)
smtpobj.ehlo()
print('\nLogging In . . . .\n')
smtpobj.login(emailId,pswd)
for mail in range(NoOfEmails):
print('Sending %s e-mail . . . .\n'%(mail+1))
smtpobj.sendmail(emailId,emailList[mail],'Subject:%s\n%s'%(subjectList[mail],messagesDict[mail]))
smtpobj.quit()
print('Logging out ! Done !!')
| {
"repo_name": "boudhayan-dev/GMAIL-messenger",
"path": "GMAILmessenger.py",
"copies": "1",
"size": "3056",
"license": "mit",
"hash": 2380571718919490600,
"line_mean": 29.56,
"line_max": 114,
"alpha_frac": 0.5494109948,
"autogenerated": false,
"ratio": 3.5493612078977934,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4598772202697793,
"avg_score": null,
"num_lines": null
} |
# A Python script to download and create a pandas dataframe out of IRIS earthquake data
#VARIABLES
start_time = '1980-01-01T00:00:00'
end_time = '1989-12-31'
min_latitude = '30'
max_latitude = '40'
min_longitude = '-100'
max_longitude = '-90'
import urllib2
import xmltodict
import pandas as pd
url = 'http://service.iris.edu/fdsnws/event/1/query?starttime=' + start_time +'&endtime=' + end_time
url += '&minlatitude='+min_latitude+'&maxlatitude='+max_latitude+'&minlongitude='+min_longitude+'&maxlongitude='+max_longitude
xmlresponse = urllib2.urlopen(url).read()
quakedict = xmltodict.parse(xmlresponse)
quakes = pd.DataFrame()
for event in quakedict['q:quakeml'][u'eventParameters']['event']:
cdate = event['origin']['time']['value']
cmonth = int(cdate[5:7])
cyear = int(cdate[:4])
clat = event['origin']['latitude']['value']
clon = event['origin']['longitude']['value']
try:
cmagnitude = event['magnitude']['mag']['value']
except:
cmagnitude = 0
try:
cevent_type = event['type']
except:
cevent_type = ''
try:
cdesc = event['description']
except:
cdesc = ''
try:
cmagid = event['preferredMagnitudeID']
except:
cmagid = ''
try:
corid = event['preferredOriginID']
except:
corid = ''
try:
cmagtype = event['magnitude']['type']
except:
cmagtype = ''
try:
cdepth = event['origin']['depth']
except:
cdepth = ''
try:
ccreatinfo = event['origin']['creationInfo']
except:
ccreatinfo = ''
try:
ccontrib = event['origin']['@iris:contributor']
except:
ccontrib = ''
try:
ccat = event['origin']['@iris:catalog']
except:
ccat = ''
try:
cctoi = event['origin']['@iris:contributorOriginId']
except:
cctoi = ''
tempdf = pd.DataFrame({'year':[cyear],
'month':[cmonth],
'date':[cdate],
'lat': [clat],
'lon': [clon],
'mag_type':[cmagtype],
'pref_mag_id':[cmagid],
'pref_orig_id':[corid],
'description':[cdesc],
'event_type':[cevent_type],
'depth':[cdepth],
'creation_info':[ccreatinfo],
'contributor':[ccontrib],
'catalog':[ccat],
'contributor_origin_id':[cctoi],
'magnitude': [cmagnitude] }, dtype=float)
quakes = quakes.append(tempdf)
quakes = quakes[quakes.magnitude != 0]
quakes[['year', 'month']] = quakes[['year', 'month']].astype(int) | {
"repo_name": "Prooffreader/Misc_ipynb",
"path": "Japan_Earthquakes/earthquakes-rest-api.py",
"copies": "1",
"size": "2873",
"license": "mit",
"hash": -7014513477586500000,
"line_mean": 30.2391304348,
"line_max": 126,
"alpha_frac": 0.5081796032,
"autogenerated": false,
"ratio": 3.8002645502645502,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48084441534645506,
"avg_score": null,
"num_lines": null
} |
# a python script to generate an import table in ASM
# Ange Albertini, BSD Licence, 2013
d = {
"kernel32.dll": [
"ExitProcess",
# "FindResourceA",
# "LoadResource",
# "GetModuleHandleA",
# "GetCommandLineA",
],
"msvcrt.dll": [
"printf"
],
"user32.dll":[
"MessageBoxA",
# "LoadIconA",
# "LoadCursorA",
#
# "RegisterClassExA",
# "CreateWindowExA",
# "ShowWindow",
# "UpdateWindow",
#
# "GetMessageA",
# "TranslateMessage",
# "DispatchMessageA",
# "PostQuitMessage",
# "DefWindowProcA",
]
}
################################################################################
t_desc = """istruc IMPORT_IMAGE_DESCRIPTOR
at IMPORT_IMAGE_DESCRIPTOR.DllName, dd sz_%(dllnameclean)-8s - IMAGEBASE
at IMPORT_IMAGE_DESCRIPTOR.IAT, dd iat_%(dllnameclean)-8s - IMAGEBASE
iend"""
t_descs = """%(descs)s
istruc IMPORT_IMAGE_DESCRIPTOR
iend"""
t_hn = """hn_%(apiclean)-16s db 0, 0, '%(api)s', 0"""
t_iatentry = "__imp__%(apiclean)-16s dd hn_%(apiclean)-16s - IMAGEBASE"
t_iat = """iat_%(dllnameclean)s:
%(iat)s
dd 0"""
t_dllnamedec = "sz_%(dllnameclean)-8s db '%(dllname)s', 0"
t_imports = """imports:
%(descs)s
%(iats)s
%(dllnames)s
%(hns)s
"""
################################################################################
descs = []
dllnames = []
iats = []
hns = []
for dllname in d:
dllnameclean = dllname.lower().replace(".dll", "")
descs.append(t_desc % locals())
dllnames.append(t_dllnamedec % locals())
iat = []
for api in d[dllname]:
apiclean = api.lower()
hns.append(t_hn % locals())
iat.append(t_iatentry % locals())
iat = "\r\n".join(iat)
iat = t_iat % locals()
iats.append(iat)
hns = ("\r\n".join(hns)).strip()
descs = ("\r\n".join(descs)).strip()
descs = t_descs % locals()
iats = ("\r\n".join(iats)).strip()
dllnames = ("\r\n".join(dllnames)).strip()
################################################################################
print (t_imports % locals()).strip()
| {
"repo_name": "angea/corkami",
"path": "misc/python/makeimports.py",
"copies": "1",
"size": "2223",
"license": "bsd-2-clause",
"hash": 4594107296771652600,
"line_mean": 21.6489361702,
"line_max": 80,
"alpha_frac": 0.4736842105,
"autogenerated": false,
"ratio": 3.074688796680498,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4048373007180498,
"avg_score": null,
"num_lines": null
} |
import argparse
import os
import subprocess
from LogManager import logger
from CrossReferenceBuilder import CrossReferenceBuilder
from CrossReferenceBuilder import createCrossReferenceLogArgumentParser
from CrossReference import PlatformDependentGenericRoutine
from UtilityFunctions import *
# Do not generate the graph if have more than 30 nodes
MAX_DEPENDENCY_LIST_SIZE = 30
class GraphGenerator:
def __init__(self, crossReference, outDir, docRepDir, dot):
self._crossRef = crossReference
self._allPackages = crossReference.getAllPackages()
self._outDir = outDir
self._docRepDir = docRepDir
self._dot = dot
def generateGraphs(self):
self.generatePackageDependenciesGraph()
self.generatePackageDependentsGraph()
self.generateRoutineCallGraph()
self.generateRoutineCallerGraph()
self.generateColorLegend()
#==========================================================================
#
#==========================================================================
def generatePackageDependenciesGraph(self, isDependency=True):
# generate all dot file and use dot to generated the image file format
if isDependency:
name = "dependencies"
else:
name = "dependents"
logger.info("Start generating package %s......" % name)
logger.info("Total Packages: %d" % len(self._allPackages))
for package in self._allPackages.values():
self.generatePackageDependencyGraph(package, isDependency)
logger.info("End of generating package %s......" % name)
#==========================================================================
#
#==========================================================================
def generatePackageDependentsGraph(self):
self.generatePackageDependenciesGraph(False)
#==========================================================================
## Method to generate the package dependency/dependent graph
#==========================================================================
def generatePackageDependencyGraph(self, package, dependencyList=True):
# merge the routine and package list
depPackages, depPackageMerged = mergeAndSortDependencyListByPackage(
package,
dependencyList)
if dependencyList:
packageSuffix = "_dependency"
else:
packageSuffix = "_dependent"
packageName = package.getName()
normalizedName = normalizePackageName(packageName)
totalPackage = len(depPackageMerged)
if (totalPackage == 0) or (totalPackage > MAX_DEPENDENCY_LIST_SIZE):
logger.info("Nothing to do exiting... Package: %s Total: %d " %
(packageName, totalPackage))
return
try:
dirName = os.path.join(self._outDir, packageName)
if not os.path.exists(dirName):
os.makedirs(dirName)
except OSError, e:
logger.error("Error making dir %s : Error: %s" % (dirName, e))
return
output = open(os.path.join(dirName, normalizedName + packageSuffix + ".dot"), 'w')
output.write("digraph %s {\n" % (normalizedName + packageSuffix))
output.write("\tnode [shape=box fontsize=14];\n") # set the node shape to be box
output.write("\tnodesep=0.35;\n") # set the node sep to be 0.35
output.write("\transsep=0.55;\n") # set the rank sep to be 0.75
output.write("\tedge [fontsize=12];\n") # set the edge label and size props
output.write("\t%s [style=filled fillcolor=orange label=\"%s\"];\n" % (normalizedName,
packageName))
for depPackage in depPackages:
depPackageName = depPackage.getName()
normalizedDepPackName = normalizePackageName(depPackageName)
output.write("\t%s [label=\"%s\" URL=\"%s\"];\n" % (normalizedDepPackName,
depPackageName,
getPackageHtmlFileName(depPackageName)))
depMetricsList = depPackageMerged[depPackage]
edgeWeight = sum(depMetricsList[0:7:2])
edgeLinkURL = getPackageDependencyHtmlFile(normalizedName, normalizedDepPackName)
edgeStartNode = normalizedName
edgeEndNode = normalizedDepPackName
edgeLinkArch = packageName
toolTipStartPackage = packageName
toolTipEndPackage = depPackageName
if not dependencyList:
edgeStartNode = normalizedDepPackName
edgeEndNode = normalizedName
edgeLinkArch = depPackageName
toolTipStartPackage = depPackageName
toolTipEndPackage = packageName
(edgeLabel, edgeToolTip, edgeStyle) = getPackageGraphEdgePropsByMetrics(depMetricsList,
toolTipStartPackage,
toolTipEndPackage)
output.write("\t%s->%s [label=\"%s\" weight=%d URL=\"%s#%s\" style=\"%s\" labeltooltip=\"%s\" edgetooltip=\"%s\"];\n" % (edgeStartNode,
edgeEndNode,
edgeLabel,
edgeWeight,
edgeLinkURL,
edgeLinkArch,
edgeStyle,
edgeToolTip,
edgeToolTip))
output.write("}\n")
output.close()
# use dot tools to generated the image and client side mapping
outputName = os.path.join(dirName, normalizedName + packageSuffix + ".png")
outputmap = os.path.join(dirName, normalizedName + packageSuffix + ".cmapx")
inputName = os.path.join(dirName, normalizedName + packageSuffix + ".dot")
# this is to generated the image in gif format and also cmapx (client side map) to make sure link
# embeded in the graph is clickable
command = "\"%s\" -Tpng -o\"%s\" -Tcmapx -o\"%s\" \"%s\"" % (self._dot,
outputName,
outputmap,
inputName)
logger.debug("command is %s" % command)
retCode = subprocess.call(command, shell=True)
if retCode != 0:
logger.error("calling dot with command[%s] returns %d" % (command, retCode))
#===============================================================================
#
#===============================================================================
def generateRoutineCallGraph(self, isCalled=True):
logger.info("Start Routine generating call graph......")
for package in self._allPackages.itervalues():
routines = package.getAllRoutines()
for routine in routines.itervalues():
isPlatformGenericRoutine = self._crossRef.isPlatformGenericRoutineByName(routine.getName())
if isCalled and isPlatformGenericRoutine:
self.generatePlatformGenericDependencyGraph(routine, isCalled)
else:
self.generateRoutineDependencyGraph(routine, isCalled)
logger.info("End of generating call graph......")
#==========================================================================
# Method to generate routine caller graph for platform dependent routines
#==========================================================================
def generatePlatformGenericDependencyGraph(self, genericRoutine, isDependency):
assert genericRoutine
assert isinstance(genericRoutine, PlatformDependentGenericRoutine)
if not isDependency:
return
platformRoutines = genericRoutine.getAllPlatformDepRoutines()
for routineInfo in platformRoutines.itervalues():
self.generateRoutineDependencyGraph(routineInfo[0], isDependency)
#==========================================================================
#
#==========================================================================
def generateRoutineCallerGraph(self):
self.generateRoutineCallGraph(False)
#==========================================================================
## generate all dot file and use dot to generated the image file format
#==========================================================================
def generateRoutineDependencyGraph(self, routine, isDependency=True):
if not routine.getPackage():
return
routineName = routine.getName()
packageName = routine.getPackage().getName()
if isDependency:
depRoutines = routine.getCalledRoutines()
routineSuffix = "_called"
totalDep = routine.getTotalCalled()
else:
depRoutines = routine.getCallerRoutines()
routineSuffix = "_caller"
totalDep = routine.getTotalCaller()
# do not generate graph if no dep routines or
# totalDep routines > max_dependency_list
if (not depRoutines
or len(depRoutines) == 0
or totalDep > MAX_DEPENDENCY_LIST_SIZE):
logger.debug("No called Routines found! for routine:%s package:%s" % (routineName, packageName))
return
try:
dirName = os.path.join(self._outDir, packageName)
if not os.path.exists(dirName):
os.makedirs(dirName)
except OSError, e:
logger.error("Error making dir %s : Error: %s" % (dirName, e))
return
output = open(os.path.join(dirName, routineName + routineSuffix + ".dot"), 'wb')
output.write("digraph \"%s\" {\n" % (routineName + routineSuffix))
output.write("\tnode [shape=box fontsize=14];\n") # set the node shape to be box
output.write("\tnodesep=0.45;\n") # set the node sep to be 0.15
output.write("\transsep=0.45;\n") # set the rank sep to be 0.75
# output.write("\tedge [fontsize=12];\n") # set the edge label and size props
if routine.getPackage() not in depRoutines:
output.write("\tsubgraph \"cluster_%s\"{\n" % (routine.getPackage()))
output.write("\t\t\"%s\" [style=filled fillcolor=orange];\n" % routineName)
output.write("\t}\n")
for (package, callDict) in depRoutines.iteritems():
output.write("\tsubgraph \"cluster_%s\"{\n" % (package))
for routine in callDict.keys():
output.write("\t\t\"%s\" [penwidth=2 %s URL=\"%s\" tooltip=\"%s\"];\n" % (routine,
findDotColor(routine),
getPackageObjHtmlFileName(routine),
getPackageObjHtmlFileName(routine)
))
if str(package) == packageName:
output.write("\t\t\"%s\" [style=filled fillcolor=orange];\n" % routineName)
output.write("\t\tlabel=\"%s\";\n" % package)
output.write("\t}\n")
for (routine, tags) in callDict.iteritems():
if isDependency:
output.write("\t\t\"%s\"->\"%s\"" % (routineName, routine))
else:
output.write("\t\t\"%s\"->\"%s\"" % (routine, routineName))
output.write(";\n")
output.write("}\n")
output.close()
outputName = os.path.join(dirName, routineName + routineSuffix + ".png")
outputmap = os.path.join(dirName, routineName + routineSuffix + ".cmapx")
inputName = os.path.join(dirName, routineName + routineSuffix + ".dot")
# this is to generated the image in png format and also cmapx (client side map) to make sure link
# embeded in the graph is clickable
# @TODO this should be able to run in parallel
command = "\"%s\" -Tpng -o\"%s\" -Tcmapx -o\"%s\" \"%s\"" % (self._dot,
outputName,
outputmap,
inputName)
logger.debug("command is %s" % command)
retCode = subprocess.call(command, shell=True)
if retCode != 0:
logger.error("calling dot with command[%s] returns %d" % (command, retCode))
#==========================================================================
# Generate Color legend image
#==========================================================================
def generateColorLegend(self, isCalled=True):
command = "\"%s\" -Tpng -o\"%s\" -Tcmapx -o\"%s\" \"%s\"" % (self._dot,
os.path.join(self._outDir,"colorLegend.png"),
os.path.join(self._outDir,"colorLegend.cmapx"),
os.path.join(self._docRepDir,'callerGraph_color_legend.dot'))
logger.debug("command is %s" % command)
retCode = subprocess.call(command, shell=True)
if retCode != 0:
logger.error("calling dot with command[%s] returns %d" % (command, retCode))
#===============================================================================
# main
#===============================================================================
def run(args):
logger.info ("Parsing ICR JSON file....")
icrJsonFile = os.path.abspath(args.icrJsonFile)
parsedICRJSON = parseICRJson(icrJsonFile)
logger.info ("Building cross reference....")
doxDir = os.path.join(args.patchRepositDir, 'Utilities/Dox')
crossRef = CrossReferenceBuilder().buildCrossReferenceWithArgs(args, pkgDepJson=None, icrJson=parsedICRJSON,
inputTemplateDeps=readIntoDictionary(args.inputTemplateDep),
sortTemplateDeps=readIntoDictionary(args.sortTemplateDep),
printTemplateDeps=readIntoDictionary(args.printTemplateDep)
)
logger.info ("Starting generating graphs....")
graphGenerator = GraphGenerator(crossRef,
args.outdir,
doxDir,
args.dot)
graphGenerator.generateGraphs()
logger.info ("End of generating graphs")
if __name__ == '__main__':
crossRefArgParse = createCrossReferenceLogArgumentParser()
parser = argparse.ArgumentParser(
description='VistA Visual Cross-Reference Graph Generator',
parents=[crossRefArgParse])
parser.add_argument('-o', '--outdir', required=True,
help='Output Web Page directory')
parser.add_argument('-dot', required=True,
help='path to the folder containing dot excecutable')
parser.add_argument('-lf', '--outputLogFileName', required=False,
help='the output Logging file')
parser.add_argument('-icr','--icrJsonFile', required=True,
help='JSON formatted information of DBIA/ICR')
parser.add_argument('-st','--sortTemplateDep', required=True,
help='CSV formatted "Relational Jump" field data for Sort Templates')
parser.add_argument('-it','--inputTemplateDep', required=True,
help='CSV formatted "Relational Jump" field data for Input Templates')
parser.add_argument('-pt','--printTemplateDep', required=True,
help='CSV formatted "Relational Jump" field data for Print Templates')
result = parser.parse_args();
if not result.outputLogFileName:
outputLogFile = getTempLogFile("GraphGen.log")
else:
outputLogFile = result.outputLogFileName
initLogging(outputLogFile)
logger.debug (result)
run(result)
| {
"repo_name": "OSEHRA-Sandbox/VistA",
"path": "Utilities/Dox/PythonScripts/GraphGenerator.py",
"copies": "1",
"size": "17601",
"license": "apache-2.0",
"hash": 7852159826159722000,
"line_mean": 53.3240740741,
"line_max": 147,
"alpha_frac": 0.5175274132,
"autogenerated": false,
"ratio": 5.046158256880734,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6063685670080734,
"avg_score": null,
"num_lines": null
} |
from builtins import str
from builtins import object
from future.utils import iteritems
from future.utils import itervalues
import argparse
import os
import re
import subprocess
from multiprocessing.dummy import Pool as ThreadPool
from LogManager import logger, initLogging
from CrossReferenceBuilder import CrossReferenceBuilder
from CrossReferenceBuilder import createCrossReferenceLogArgumentParser
from UtilityFunctions import getPackageHtmlFileName, getPackageDependencyHtmlFileName
from UtilityFunctions import getPackageComponentLink, getPackageGraphEdgePropsByMetrics
from UtilityFunctions import mergeAndSortDependencyListByPackage, normalizePackageName
from UtilityFunctions import normalizeName, parseICRJson, readIntoDictionary
from UtilityFunctions import PACKAGE_COMPONENT_MAP, MAX_DEPENDENCY_LIST_SIZE, COLOR_MAP
class GraphGenerator(object):
def __init__(self, crossReference, outDir, docRepDir, dot):
self._crossRef = crossReference
self._allPackages = crossReference.getAllPackages()
self._outDir = outDir
self._docRepDir = docRepDir
self._dot = dot
self._isDependency = False
# Check for package directories once
# TODO: Should delete empty directories after graphs are generated?
for package in itervalues(self._allPackages):
try:
packageName = package.getName()
dirName = os.path.join(self._outDir, packageName)
if not os.path.exists(dirName):
os.makedirs(dirName)
except OSError as e:
logger.error("Error making dir %s : Error: %s" % (dirName, e))
def generateGraphs(self):
logger.progress("Generate Package Dependencies graphs")
self.generatePackageDependenciesGraph()
logger.progress("Generate Package Dependents graphs")
self.generatePackageDependentsGraph()
logger.progress("Generate Routine Call graphs")
self.generateRoutineCallGraph()
logger.progress("Generate Routine Caller graphs")
self.generateRoutineCallerGraph()
logger.progress("Generate Color Legend")
self.generateColorLegend()
#==========================================================================
## Methods to generate the package dependency/dependent graphs
#==========================================================================
def generatePackageDependenciesGraph(self, isDependency=True):
# generate all dot file and use dot to generated the image file format
self._isDependency = isDependency
if self._isDependency:
name = "dependencies"
else:
name = "dependents"
logger.info("Start generating package %s......" % name)
logger.info("Total Packages: %d" % len(self._allPackages))
# Make the Pool of workers
pool = ThreadPool(4)
# Create graphs in their own threads
pool.map(self._generatePackageDependencyGraph, itervalues(self._allPackages))
# close the pool and wait for the work to finish
pool.close()
pool.join()
logger.info("End of generating package %s......" % name)
def generatePackageDependentsGraph(self):
self.generatePackageDependenciesGraph(False)
def _generatePackageDependencyGraph(self, package):
# merge the routine and package list
depPackages, depPackageMerged = mergeAndSortDependencyListByPackage(package, self._isDependency)
packageName = package.getName()
totalPackage = len(depPackageMerged)
if (totalPackage == 0) or (totalPackage > MAX_DEPENDENCY_LIST_SIZE):
logger.info("Nothing to do exiting... Package: %s Total: %d " %
(packageName, totalPackage))
return
dirName = os.path.join(self._outDir, packageName)
if self._isDependency:
packageSuffix = "_dependency"
else:
packageSuffix = "_dependent"
normalizedName = normalizePackageName(packageName)
dotFilename = os.path.join(dirName, "%s%s.dot" % (normalizedName, packageSuffix))
with open(dotFilename, 'w') as output:
output.write("digraph %s%s {\n" % (normalizedName, packageSuffix))
output.write("\tnode [shape=box fontsize=14];\n") # set the node shape to be box
output.write("\tnodesep=0.35;\n") # set the node sep to be 0.35
output.write("\transsep=0.55;\n") # set the rank sep to be 0.75
output.write("\tedge [fontsize=12];\n") # set the edge label and size props
output.write("\t%s [style=filled fillcolor=orange label=\"%s\"];\n" % (normalizedName,
packageName))
for depPackage in depPackages:
depPackageName = depPackage.getName()
normalizedDepPackName = normalizePackageName(depPackageName)
output.write("\t%s [label=\"%s\" URL=\"%s\"];\n" % (normalizedDepPackName,
depPackageName,
getPackageHtmlFileName(depPackageName)))
depMetricsList = depPackageMerged[depPackage]
edgeWeight = sum(depMetricsList[0:7:2])
edgeLinkURL = getPackageDependencyHtmlFileName(normalizedName, normalizedDepPackName)
if self._isDependency:
edgeStartNode = normalizedName
edgeEndNode = normalizedDepPackName
edgeLinkArch = packageName
toolTipStartPackage = packageName
toolTipEndPackage = depPackageName
else:
edgeStartNode = normalizedDepPackName
edgeEndNode = normalizedName
edgeLinkArch = depPackageName
toolTipStartPackage = depPackageName
toolTipEndPackage = packageName
(edgeLabel, edgeToolTip, edgeStyle) = getPackageGraphEdgePropsByMetrics(depMetricsList,
toolTipStartPackage,
toolTipEndPackage)
output.write("\t%s->%s [label=\"%s\" weight=%d URL=\"%s#%s\" style=\"%s\" labeltooltip=\"%s\" edgetooltip=\"%s\"];\n" %
(edgeStartNode, edgeEndNode, edgeLabel,
edgeWeight, edgeLinkURL, edgeLinkArch,
edgeStyle, edgeToolTip, edgeToolTip))
output.write("}\n")
pngFilename = os.path.join(dirName, "%s%s.png" % (normalizedName, packageSuffix))
cmapxFilename = os.path.join(dirName, "%s%s.cmapx" % (normalizedName, packageSuffix))
self._generateImagesFromDotFile(pngFilename, cmapxFilename, dotFilename)
#===============================================================================
#
#===============================================================================
def generateRoutineCallGraph(self, isCalled=True):
logger.info("Start Routine generating call graph......")
self._isDependency = isCalled
# Make a list of all routines we want to process
allRoutines = []
for package in itervalues(self._allPackages):
for routine in itervalues(package.getAllRoutines()):
isPlatformGenericRoutine = self._crossRef.isPlatformGenericRoutineByName(routine.getName())
if self._isDependency and isPlatformGenericRoutine:
platformRoutines = routine.getAllPlatformDepRoutines()
for routineInfo in itervalues(platformRoutines):
allRoutines.append(routineInfo[0])
else:
allRoutines.append(routine)
# Add other package components too
# TODO: This logic is copied from
# WebPageGenerator::generatePackageInformationPages(),
# could be improved in both places
for keyVal in PACKAGE_COMPONENT_MAP:
for package in itervalues(self._allPackages):
allRoutines.extend(itervalues(package.getAllPackageComponents(keyVal)))
# Make the Pool of workers
pool = ThreadPool(4)
# Create graphs in their own threads
pool.map(self._generateRoutineDependencyGraph, allRoutines)
# close the pool and wait for the work to finish
pool.close()
pool.join()
logger.info("End of generating call graph......")
def generateRoutineCallerGraph(self):
self.generateRoutineCallGraph(False)
def _generateRoutineDependencyGraph(self, routine):
package = routine.getPackage()
if not package:
return
routineName = routine.getName()
packageName = package.getName()
if self._isDependency:
depRoutines = routine.getCalledRoutines()
else:
depRoutines = routine.getCallerRoutines()
# do not generate graph if no dep routines or
# total dep routines > max_dependency_list
if not depRoutines:
logger.debug("No called Routines found! for routine:%s package:%s" %
(routineName, packageName))
return
# Count total number of routines
totalRoutines = 0
for callDict in itervalues(depRoutines):
totalRoutines += len(callDict)
if totalRoutines > MAX_DEPENDENCY_LIST_SIZE:
logger.debug("Skipping... Found %d dep routines for routine:%s package:%s (max allowed %d)" %
(totalRoutines, routineName, packageName, MAX_DEPENDENCY_LIST_SIZE))
return
dirName = os.path.join(self._outDir, packageName)
normalizedName = normalizeName(routineName)
if self._isDependency:
routineSuffix = "called"
else:
routineSuffix = "caller"
routineType = routine.getObjectType()
fileNameRoot = os.path.join(dirName, "%s_%s_%s" % (routineType, normalizedName, routineSuffix))
dotFilename = "%s.dot" % fileNameRoot
pngFilename = "%s.png" % fileNameRoot
cmapxFilename = "%s.cmapx" % fileNameRoot
with open(dotFilename, 'w') as output:
escapedName = re.escape(routineName)
nodeName = "root"
output.write("digraph \"%s\" {\n" % fileNameRoot)
output.write("\tnode [shape=box fontsize=14];\n") # set the node shape to be box
output.write("\tnodesep=0.45;\n") # set the node sep to be 0.15
output.write("\transsep=0.45;\n") # set the rank sep to be 0.75
# output.write("\tedge [fontsize=12];\n") # set the edge label and size props
if package not in depRoutines:
output.write("\tsubgraph \"cluster_%s\"{\n" % package)
output.write("\t\t\"%s\" [label=\"%s\" style=filled fillcolor=orange];\n" % (nodeName, escapedName))
output.write("\t}\n")
for (depPackage, callDict) in iteritems(depRoutines):
output.write("\tsubgraph \"cluster_%s\"{\n" % depPackage)
for depRoutine in callDict:
escapedDepRoutineName = re.escape(depRoutine.getName())
htmlFileName = getPackageComponentLink(depRoutine)
output.write("\t\t\"%s\" [penwidth=2 color=\"%s\" URL=\"%s\" tooltip=\"%s\"];\n" %
(escapedDepRoutineName, COLOR_MAP[depRoutine.getObjectType()],
htmlFileName, htmlFileName))
if str(depPackage) == packageName:
output.write("\t\t\"%s\" [label=\"%s\" style=filled fillcolor=orange];\n" % (nodeName, escapedName))
output.write("\t\tlabel=\"%s\";\n" % depPackage)
output.write("\t}\n")
for depRoutine in callDict:
escapedDepRoutineName = re.escape(depRoutine.getName())
if self._isDependency:
output.write("\t\t\"%s\"->\"%s\"" % (nodeName, escapedDepRoutineName))
else:
output.write("\t\t\"%s\"->\"%s\"" % (escapedDepRoutineName, nodeName))
output.write(";\n")
output.write("}\n")
self._generateImagesFromDotFile(pngFilename, cmapxFilename, dotFilename)
#==========================================================================
# Generate Color legend image
#==========================================================================
def generateColorLegend(self):
self._generateImagesFromDotFile(os.path.join(self._outDir, "colorLegend.png"),
os.path.join(self._outDir, "colorLegend.cmapx"),
os.path.join(self._docRepDir, 'callerGraph_color_legend.dot'))
#==========================================================================
def _generateImagesFromDotFile(self, pngFilename, cmapxFilename, dotFilename):
# Generate the image in png format and also cmapx (client side map) to
# make sure link embeded in the graph is clickable
# @TODO this should be able to run in parallel
command = "\"%s\" -Tpng -o\"%s\" -Tcmapx -o\"%s\" \"%s\"" % (self._dot,
pngFilename,
cmapxFilename,
dotFilename)
retCode = subprocess.call(command, shell=True)
if retCode != 0:
logger.error("calling dot with command[%s] returns %d" % (command, retCode))
#===============================================================================
# main
#===============================================================================
def run(args):
logger.progress("Parsing ICR JSON file....")
icrJsonFile = os.path.abspath(args.icrJsonFile)
parsedICRJSON = parseICRJson(icrJsonFile)
doxDir = os.path.join(args.patchRepositDir, 'Utilities/Dox')
crossRef = CrossReferenceBuilder().buildCrossReferenceWithArgs(args,
icrJson=parsedICRJSON,
inputTemplateDeps=readIntoDictionary(args.inputTemplateDep),
sortTemplateDeps=readIntoDictionary(args.sortTemplateDep),
printTemplateDeps=readIntoDictionary(args.printTemplateDep)
)
logger.progress("Starting generating graphs....")
graphGenerator = GraphGenerator(crossRef, args.outDir, doxDir, args.dot)
graphGenerator.generateGraphs()
if __name__ == '__main__':
crossRefArgParse = createCrossReferenceLogArgumentParser()
parser = argparse.ArgumentParser(
description='VistA Visual Cross-Reference Graph Generator',
parents=[crossRefArgParse])
parser.add_argument('-dot', required=True,
help='path to the folder containing dot excecutable')
parser.add_argument('-icr', '--icrJsonFile', required=True,
help='JSON formatted information of DBIA/ICR')
parser.add_argument('-st', '--sortTemplateDep', required=True,
help='CSV formatted "Relational Jump" field data for Sort Templates')
parser.add_argument('-it', '--inputTemplateDep', required=True,
help='CSV formatted "Relational Jump" field data for Input Templates')
parser.add_argument('-pt', '--printTemplateDep', required=True,
help='CSV formatted "Relational Jump" field data for Print Templates')
result = parser.parse_args()
initLogging(result.logFileDir, "GraphGen.log")
logger.debug(result)
run(result)
| {
"repo_name": "OSEHRA/VistA",
"path": "Utilities/Dox/PythonScripts/GraphGenerator.py",
"copies": "1",
"size": "17199",
"license": "apache-2.0",
"hash": -2822502436672369000,
"line_mean": 50.9607250755,
"line_max": 135,
"alpha_frac": 0.5685214257,
"autogenerated": false,
"ratio": 4.817647058823529,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0029335202808853517,
"num_lines": 331
} |
"""A python script to run Tekton pipelines to update Kubeflow manifests."""
# TODO(jlewi): We might want to emit structured logs and then sync to
# BigQuery to support easy monitoring.
# TODO(jlewi): A lot of the git management code has been refactored into
# the module git_repo_manager to make it reusable. We should refactor the
# code to use that.
import fire
import collections
import logging
import os
import subprocess
import traceback
import re
import time
import yaml
from kubeflow.testing import kf_logging
from kubeflow.testing import util
from kubeflow.testing import yaml_util
from kubeflow.testing.cd import close_old_prs
from kubernetes import client as k8s_client
from kubernetes import config as k8s_config
GIT_URL_RE = re.compile(r"([^:]*):([^/]*)/([^\.]*)\.git")
GIT_TUPLE = collections.namedtuple("git_tuple", ("host", "owner", "repo"))
APP_VERSION_TUPLE = collections.namedtuple("app_version", ("app", "version"))
IMAGE_TUPLE = collections.namedtuple("image", ("name", "tag", "digest"))
PR_INFO = collections.namedtuple("pr_info", ("url", "author", "branch"))
MANIFESTS_REPO_NAME = "manifests"
# The name of the GitHub user under which kubeflow-bot branches exist
KUBEFLOW_BOT = "kubeflow-bot"
# The name of the git resource in the Tekton pipeline that will be the
# git resource containing the application source code.
APP_REPO_RESOURCE_NAME = "app-repo"
def _combine_params(left, right):
"""Combine to lists of name,value pairs."""
d = {}
for p in left:
d[p["name"]] = p
for p in right:
d[p["name"]] = p
result = []
for _, v in d.items():
result.append(v)
return result
def _get_repo_url(repo_spec):
"""Get a tuple representing the resource URL.
Args:
repo_spec: Dictionary for the repo spec
Returns:
tuple representing the git url
"""
url = _get_param(repo_spec["resourceSpec"]["params"], "url")
if not url:
raise ValueError(f"Repository {repo_spec['name']} is missing param url") # pylint: disable=syntax-error
url = url["value"]
repo = _parse_git_url(url)
return repo
def _sync_repos(repos, src_dir):
"""Make sure all the repositories are checked out to src dir and up to date."""
if not os.path.exists(src_dir):
os.makedirs(src_dir)
for r in repos:
repo = _get_repo_url(r)
# Convert the repo to https so we don't need ssh keys; this is a bit
# of a hack.
url = f"https://github.com/{repo.owner}/{repo.repo}.git"
logging.info(f"Sync mapped repo {r} to {url}")
repo_dir = os.path.join(src_dir, repo.owner, repo.repo)
if not os.path.exists(repo_dir):
if not os.path.exists(os.path.join(src_dir, repo.owner)):
os.makedirs(os.path.join(src_dir, repo.owner))
logging.info(f"Clone {url}")
util.run(["git", "clone", url, repo.repo],
cwd=os.path.join(src_dir, repo.owner))
logging.info(f"Sync repo {repo}")
util.run(["git", "fetch", "origin"],
cwd=os.path.join(src_dir, repo.owner, repo.repo))
# TODO(jlewi): This is now defined in git_repo_manager
def _parse_git_url(url):
m = GIT_URL_RE.match(url)
if not m:
return GIT_TUPLE("", "", "")
return GIT_TUPLE(m.group(1), m.group(2), m.group(3))
def _last_commit(branch, repo_root, path):
"""Get the last commit of a change to the source.
Args:
branch: The branch e.g. origin/master
path: The relative path; if none or empty run in the root of the repo
"""
util.run(["git", "checkout", branch], cwd=repo_root)
command = ["git", "log", "-n", "1", "--pretty=format:\"%h\""]
if path:
command.append(path)
output = util.run(command, cwd=repo_root)
return output.strip('"')
def _get_param(params, name):
for p in params:
if p["name"] == name:
return p
return None
def _param_index(params, name):
"""Return the index of the specified item"""
for i, p in enumerate(params):
if p["name"] == name:
return i
raise LookupError(f"Missing item with name={name}")
def _build_run(app_run, app, version, commit):
"""Create a PipelineRun spec.
Args:
app_run: A pipelinerun to use as the template
app: The application configuration
version: The version specification
commit: The specific commit at which to build the source image.
tag: The tag for the image
"""
app_run["metadata"]["generateName"] = f"cd-{app['name']}-{commit}"
# Override the params
app_run["spec"]["params"] = _combine_params(app_run["spec"]["params"],
app["params"])
# Override the repositories
# Before we override the repositories we need to change the name of the
# repo containing the source to be the name of the parameter
# APP_REPO_RESOURCE_NAME
source_index = _param_index(version["repos"], app["sourceRepo"])
version["repos"][source_index]["name"] = APP_REPO_RESOURCE_NAME
# Tekton will give us an error if we include extra resources; i.e. resources
# not defined in the Pipeline spec. So we need to remove from version all
# the repos we don't need
expected_repos = [APP_REPO_RESOURCE_NAME, "manifests", "ci-tools"]
new_repos = []
for v in version["repos"]:
if v["name"] in expected_repos:
new_repos.append(v)
version["repos"] = new_repos
app_run["spec"]["resources"] = _combine_params(app_run["spec"]["resources"],
version["repos"])
# Override the commit for the src repo to pin to a specific commit
source_index = _param_index(app_run["spec"]["resources"],
APP_REPO_RESOURCE_NAME)
source_params = app_run["spec"]["resources"][source_index]["resourceSpec"]["params"]
updated_params = _combine_params(source_params, [{
"name": "revision",
"value": commit,}])
app_run["spec"]["resources"][source_index]["resourceSpec"]["params"] = updated_params
# Override the image resource
tag = f"{version['tag']}-g{commit}"
image_resource = _get_param(app_run["spec"]["resources"], "image")
src_image_url = _get_param(app["params"], "src_image_url")["value"]
if not src_image_url:
raise ValueError(f"App {app['name']} is missing parameter "
f"src_image_url")
image_resource["resourceSpec"]["params"] = _combine_params(
image_resource["resourceSpec"]["params"],
[{
"name": "url",
"value": f"{src_image_url}:{tag}",
}]
)
app_run["spec"]["resources"] = _combine_params(app_run["spec"]["resources"],
[image_resource])
# Add some helpful labels
# This will be used to see if we already have a pipeline running for
# a specific application and version
if not "labels" in app_run["metadata"]:
app_run["metadata"]["labels"] = {}
app_run["metadata"]["labels"]["app"] = app["name"]
app_run["metadata"]["labels"]["version"] = version["name"]
app_run["metadata"]["labels"]["image_tag"] = tag
return app_run
def _deep_copy(data):
value = yaml.dump(data)
return yaml.load(value)
def _get_image(config, image_name):
"""Get the tag for an image
Args:
config: kustomization config
image_name: Name of the image
"""
for i in config.get("images"):
if i["name"] == image_name:
image_name = i.get("newName", image_name)
new_tag = i.get("newTag", "")
if not new_tag:
if ":" in image_name:
image_name, new_tag = image_name.split(":", 1)
new_digest = i.get("digest", "")
return IMAGE_TUPLE(image_name, new_tag, new_digest)
raise LookupError(f"Could not find image: {image_name}")
def _open_prs(repo_dir):
"""Return open PRs for the given repository.
Args:
repo_dir: The directory for the repo to list open PRs for.
Returns:
prs: [PR_INFO]; list of PR_INFO objects describing open PRs
"""
# See hub conventions:
# https://hub.github.com/hub.1.html
# The GitHub repository is determined automatically based on the name
# of remote repositories
#
# Don't use util.run because we don't want to echo output of hub pr
output = subprocess.check_output(["hub", "pr", "list", "--format=%U;%H;%t\n"],
cwd=repo_dir).decode()
lines = output.splitlines()
prs = []
for l in lines:
pieces = l.split(";")
# Title could potentially have semi colons in it so we could wind up
# with more than 3 pieces.
if len(pieces) < 3:
logging.error(f"Line {l} doesn't appear to match expected format of "
f"url;head;title")
continue
url = pieces[0]
head = pieces[1]
# The head reference of the PRis in the format {author}:{branch}
head_pieces = head.split(":")
if len(head_pieces) != 2:
logging.error(f"Head={head} doesn't appear to be in form $author:$branch")
continue
author = head_pieces[0]
branch = head_pieces[1]
prs.append(PR_INFO(url, author, branch))
return prs
def _handle_app(run, app, version, src_dir, output_dir):
"""Create the PipelineRun for the specified application.
Returns:
run_file: The file containing the PipelineRun
needs_update: (boolean) True if the kustomize manifest is outdated and
we need to run the pipeline to update it
"""
app_version = AppVersion(app, version)
# Get the last change to the source
src_path = _get_param(app["params"], "path_to_context")["value"]
if not src_path:
# We want to allow it to be the root of the repository
src_path = ""
logging.info("src_path not specified or is root of repository")
repo_root = os.path.join(src_dir, app_version.repo.owner,
app_version.repo.repo)
src_branch = _get_param(app_version.source_repo["resourceSpec"]["params"],
"revision")["value"]
branch_spec = f"origin/{src_branch}"
commit = _last_commit(branch_spec, repo_root, src_path)
run = _build_run(run, app, _deep_copy(version), commit)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_file = os.path.join(output_dir,
f"{app['name']}-run-{src_branch}-"
f"{commit}.yaml")
logging.info(f"Writing run for App: {app['name']} Version: "
f"{version['name']} to file {output_file}",
extra=run["metadata"]["labels"])
with open(output_file, "w") as hf:
yaml.dump(run, hf)
manifests_branch = _get_param(app_version.manifests_repo["resourceSpec"]
["params"], "revision")["value"]
manifests_repo = _get_repo_url(app_version.manifests_repo)
manifests_dir = os.path.join(src_dir, manifests_repo.owner,
manifests_repo.repo)
util.run(["git", "checkout", f"origin/{manifests_branch}"],
cwd=manifests_dir)
# Determine whether there is already a PR open to update the image.
#
# We do this by looking to see if there is a PR open for the branch that
# a PR would create.
image_tag = run["metadata"]["labels"]["image_tag"]
pr_branch = _branch_for_app(app, image_tag)
open_prs = _open_prs(manifests_dir)
logging.info(f"Checking if there is a PR from {KUBEFLOW_BOT} "
f"for branch {pr_branch}")
for pr in open_prs:
if pr.branch == pr_branch:
logging.info(f"For App: {app['name']} Tag: {image_tag} found "
f"open PR {pr.url}")
return output_file, False
# Determine whether the application/version actually needs to be run or not
# We do this by loading the kustomize file and seeing if it is up to date
path_to_manifests_dir = _get_param(app["params"],
"path_to_manifests_dir")["value"]
kustomization_path = os.path.join(manifests_dir,
path_to_manifests_dir,
"kustomization.yaml")
with open(kustomization_path) as hf:
kustomize_config = yaml.load(hf)
src_image_url = _get_param(app["params"], "src_image_url")["value"]
current_image = _get_image(kustomize_config, src_image_url)
logging.info(f"App: {app['name']} Version: {version['name']} "
f"Current Image Tag: {current_image.tag}")
needs_update = False
if current_image.tag != run["metadata"]["labels"]["image_tag"]:
needs_update = True
logging.info(f"App: {app['name']} Version: {version['name']} "
f"needs to be updated to tag {current_image.tag}")
return output_file, needs_update
def _branch_for_app(app, image_tag):
"""Return the branch that will be used to update the specified application.
Args:
app: Dictionary describing the app.
image_tag: The tag for image
"""
# This logic needs to match what the script creating the PR is doing.
# e.g.
# https://github.com/kubeflow/testing/blob/909454ab283d6ee67107a1c1607ca4ec9542bfeb/py/kubeflow/testing/ci/rebuild-manifests.sh#L47
src_image_url = _get_param(app["params"], "src_image_url")["value"]
image_name = src_image_url.split("/")[-1]
return f"update_{image_name}_{image_tag}"
class UpdateKfApps:
@staticmethod
def create_runs(config, output_dir, template, src_dir):
"""Create YAML files for the pipeline runs.
Args:
config: The path to the configuration
output_dir: Directory where pipeline runs should be written
template: The path to the YAML file to act as a template
src_dir: Directory where source should be checked out
"""
run_config = yaml_util.load_file(config)
failures = []
# List of the YAML files corresponding to pipeline runs that need to
# be run.
all_pipelines = []
pipelines_to_run = []
# Loop over the cross product of versions and applications and generate
# a run for each one.
for version in run_config["versions"]:
_sync_repos(version["repos"], src_dir)
for app in run_config["applications"]:
pair = APP_VERSION_TUPLE(app["name"], version["name"])
# Load a fresh copy of the template
run = yaml_util.load_file(template)
# Make copies of app and version so that we don't end up modifying them
try:
run_file, needs_update = _handle_app(run, _deep_copy(app),
_deep_copy(version),
src_dir, output_dir)
all_pipelines.append(run_file)
if needs_update:
pipelines_to_run.append(run_file)
except (FileNotFoundError, LookupError, ValueError) as e:
failures.append(pair)
extra = {
"app": app['name'],
"version": version['name'],
}
logging.error(f"Exception occured creating run for "
f"App: {app['name']} Version: {version['name']} "
f"Exception: {e}\n"
f"{traceback.format_exc()}\n.", extra=extra)
if failures:
failed = [f"(App:{i.app}, Version:{i.version})" for i in failures]
failed = ", ".join(failed)
logging.error(f"Failed to generate pipeline runs for: {failed}")
else:
logging.info("Succcessfully created pipeline runs for all apps and "
"versions")
logging.info(f"Pipelines that need to be run {len(pipelines_to_run)} of "
f"{len(all_pipelines)}")
logging.info("The pipelines that need to be run are:\n%s", "\n".join(
pipelines_to_run))
return all_pipelines, pipelines_to_run
@staticmethod
def apply(config, output_dir, template, src_dir, namespace):
"""Create PipelineRuns for any applications that need to be updated.
Args:
config: The path to the configuration; can be local or http file
output_dir: Directory where pipeline runs should be written
template: The path to the YAML file to act as a template
src_dir: Directory where source should be checked out
"""
logging.info("Closing old PRs")
closer = close_old_prs.PRCloser()
closer.apply()
service_account_path = "/var/run/secrets/kubernetes.io"
if os.path.exists("/var/run/secrets/kubernetes.io"):
logging.info(f"{service_account_path} exists; loading in cluster config")
k8s_config.load_incluster_config()
else:
logging.info(f"{service_account_path} doesn't exists; "
"loading kube config file")
k8s_config.load_kube_config(persist_config=False)
client = k8s_client.ApiClient()
crd_api = k8s_client.CustomObjectsApi(client)
_, pipelines_to_run = UpdateKfApps.create_runs(
config, output_dir, template, src_dir)
if not pipelines_to_run: # pylint: disable=too-many-nested-blocks
logging.info("No pipelines need to be run")
else:
logging.info("Submitting pipeline runs to update applications")
for p in pipelines_to_run:
with open(p) as hf:
run = yaml.load(hf)
group, version = run["apiVersion"].split("/", 1)
kind = run["kind"]
plural = kind.lower() + "s"
# Check if there are any pipelines running for the same application
label_filter = {}
for k in ["app", "version", "image_tag"]:
label_filter[k] = run["metadata"]["labels"][k]
items = [f"{k}={v}" for k, v in label_filter.items()]
selector = ",".join(items)
# TODO(https://github.com/tektoncd/pipeline/issues/1302): We should
# probably do some garbage collection of old runs.
current_runs = crd_api.list_namespaced_custom_object(
group, version, namespace, plural, label_selector=selector)
active_run = None
for r in current_runs["items"]:
conditions = r["status"].get("conditions", [])
running = True
for c in conditions[::-1]:
if c.get("type", "").lower() == "succeeded":
if c.get("status", "").lower() in ["true", "false"]:
running = False
break
if running:
active_run = r['metadata']['name']
break
labels = run["metadata"]["labels"]
if active_run:
logging.info(f"Found pipeline run {active_run} "
f"already running for {p}; not rerunning",
extra=labels)
continue
logging.info(f"Creating run from file {p}", extra=labels)
result = crd_api.create_namespaced_custom_object(group, version, namespace, plural,
run)
logging.info(f"Created run "
f"{result['metadata']['namespace']}"
f".{result['metadata']['name']}", extra=labels)
@staticmethod
def sync(config, output_dir, template, src_dir, namespace,
sync_time_seconds=600):
"""Perioridically fire off tekton pipelines to update the manifests.
Args:
config: The path to the configuration
output_dir: Directory where pipeline runs should be written
template: The path to the YAML file to act as a template
src_dir: Directory where source should be checked out
sync_time_seconds: Time in seconds to wait between launches.
"""
while True:
UpdateKfApps.apply(config, output_dir, template, src_dir, namespace)
logging.info("Wait before rerunning")
time.sleep(sync_time_seconds)
class AppVersion:
"""App version is a wrapper around a combination of application and version.
"""
def __init__(self, app, version):
"""Construct the AppVersion instance.
Args:
app: A dictionary representing the app
version: dictionary representing the version
"""
self.source_repo = None
self.manifests_repo = None
for r in version["repos"]:
if r["name"] == app["sourceRepo"]:
self.source_repo = r
if r["name"] == MANIFESTS_REPO_NAME:
self.manifests_repo = r
if not self.source_repo:
raise ValueError(f"App {app['name']} uses repo {app['sourceRepo']} "
f"but this repo was not defined in version "
f"{version['name']}")
if not self.manifests_repo:
raise ValueError(f"Repo {MANIFESTS_REPO_NAME} was not defined in version "
f"{version['name']}")
self.branch = _get_param(self.source_repo["resourceSpec"]["params"], "revision")
url = _get_param(self.source_repo["resourceSpec"]["params"], "url")
if not url:
raise ValueError(f"Repository {self.source_repo['name']} is missing "
f"param url")
self.url = url["value"]
self.repo = _parse_git_url(self.url)
if __name__ == "__main__":
# Emit logs in json format. This way we can do structured logging
# and we can query extra fields easily in stackdriver and bigquery.
json_handler = logging.StreamHandler()
json_handler.setFormatter(kf_logging.CustomisedJSONFormatter())
logger = logging.getLogger()
logger.addHandler(json_handler)
logger.setLevel(logging.INFO)
fire.Fire(UpdateKfApps)
| {
"repo_name": "kubeflow/testing",
"path": "py/kubeflow/testing/cd/update_kf_apps.py",
"copies": "1",
"size": "21132",
"license": "apache-2.0",
"hash": -5563661797556832000,
"line_mean": 32.2787401575,
"line_max": 133,
"alpha_frac": 0.6183513155,
"autogenerated": false,
"ratio": 3.706068046299544,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48244193617995434,
"avg_score": null,
"num_lines": null
} |
# A python server that a Spark Core can communicate with. Inspired by the ruby and js servers here: https://github.com/spark/local-communication-example
import re
import socket
import sys
def help():
print("Commands: <pin number><state>")
print(" eg: 7h set pin D7 to high")
print(" eg: 0l set pin D0 to low")
print(" x Exit")
def handleInput(inp, conn):
if (re.match(r'^[0-7][lh]$', inp)):
conn.sendall(bytes(inp.lower(), "ascii"))
elif (inp == 'x'):
conn.close()
sys.exit()
else:
help()
def main():
port = 9000
ipAddress = [(s.connect(('8.8.8.8', 80)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]
print("Listening on port " + str(port) + " at IP address " + ipAddress)
print("In another window run curl https://api.spark.io/v1/devices/<DEVICE_ID>/connect -d access_token=<ACCESS_TOKEN> -d ip=" + ipAddress)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((ipAddress, port))
s.listen(1)
conn, sparkAddr = s.accept()
print("Connected from " + sparkAddr[0] + ":" + str(sparkAddr[1]))
while True:
print('>> ', end="")
command = input()
handleInput(command.rstrip(), conn)
if __name__ == "__main__":
main()
| {
"repo_name": "KarlParkinson/spark-hacking",
"path": "local-server-connect/simple_server.py",
"copies": "1",
"size": "1331",
"license": "mit",
"hash": -5394630077084656000,
"line_mean": 31.4634146341,
"line_max": 152,
"alpha_frac": 0.5935386927,
"autogenerated": false,
"ratio": 3.2945544554455446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4388093148145545,
"avg_score": null,
"num_lines": null
} |
# a python solution presented by user fibonacci__ (https://www.reddit.com/user/fibonacci__)
import time
input1 = '''10
3 6
0 4
7 3
9 9'''
input2 = '''1000
616 293
344 942
27 524
716 291
860 284
74 928
970 594
832 772
343 301
194 882
948 912
533 654
242 792
408 34
162 249
852 693
526 365
869 303
7 992
200 487
961 885
678 828
441 152
394 453'''
with open('lots_of_switches.txt') as file:
input3 = ''.join(file.readlines())
def switched(input):
input = input.split('\n')[1:]
lights = set()
for i in input:
i = map(int, i.split())
lights.symmetric_difference_update(xrange(min(i), max(i) + 1))
return len(lights)
def switched_bonus(input):
start = time.clock()
input = map(lambda x: sorted(map(int, x.split())), input.strip().split('\n')[1:])
input = sorted([j for i in input for j in [(i[0], 0), (i[1], 1)]])
total = 0
for i, j in zip(input[::2], input[1::2]):
#print "i is %r, j is %r" % (i, j)
total += j[0] - i[0] + j[1] - i[1]
end = time.clock()
print end-start
return total
print switched_bonus(input1)
print switched_bonus(input2)
print switched_bonus(input3)
"""
someone commented asking for explanation:
Can you explain the thought process behind the bonus speed optimizations please? I'm a bit confused on why you sorted the intervals, then subtracted the evens+1 from the odds.
fibonacci__ replied:
The idea is to find the difference between pairs of intervals of start and end indices. The intuition is that pairs of ranges will cancel out except at the ends where only one of the ranges will toggle the lights.
For example, [0,4] and [2,4] will toggle only [0,1] since the toggling of [2,4] will cancel out. Therefore, the number of lights toggled is (1 + 1) - 0 = 2. Similarly, [0,4] and [1,3] will only toggle [0,0] and [4,4]. The number of lights toggled is 1 - 0 + (4 + 1) - (3 + 1) = 2. The reason for adding 1 to the end of each range is to convert from a closed interval to a half-open interval for correct counting of lights, so [1,3] contains (3 + 1) - 1 = 3 lights instead of 3 - 1 = 2 lights which is incorrect.
Sorting the start and end indices is essential as it allows pairs to be processed at the non-overlapping ends of the ranges and to be processed in order from left to right so each range edge pair is accounted for at most once.
permalinkparent
reply:
That makes a lot of sense. In my head it's similar to a "NOT AND" statement in binary logic. I think you could remove your last zip and speed things up very slightly. Thank you!
light_input = [sorted([i for i in map(int, row.split())]) for row in txt.strip().split('\n')[1:]]
my_input = sorted([j for i in light_input for j in [i[0], i[1]+1]])
print(sum(my_input[1::2]) - sum(my_input[::2]))
"""
| {
"repo_name": "jamtot/DailyChallenge",
"path": "light_switches (22feb2016)/online_sample.py",
"copies": "1",
"size": "2784",
"license": "mit",
"hash": -3563167289593519600,
"line_mean": 31.3720930233,
"line_max": 511,
"alpha_frac": 0.6778017241,
"autogenerated": false,
"ratio": 3.019522776572668,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41973245006726684,
"avg_score": null,
"num_lines": null
} |
"""A Python specification is an abstract requirement definition of a interpreter"""
from __future__ import absolute_import, unicode_literals
import os
import re
import sys
from collections import OrderedDict
from virtualenv.info import fs_is_case_sensitive
from virtualenv.util.six import ensure_str
PATTERN = re.compile(r"^(?P<impl>[a-zA-Z]+)?(?P<version>[0-9.]+)?(?:-(?P<arch>32|64))?$")
IS_WIN = sys.platform == "win32"
class PythonSpec(object):
"""Contains specification about a Python Interpreter"""
def __init__(self, str_spec, implementation, major, minor, micro, architecture, path):
self.str_spec = str_spec
self.implementation = implementation
self.major = major
self.minor = minor
self.micro = micro
self.architecture = architecture
self.path = path
@classmethod
def from_string_spec(cls, string_spec):
impl, major, minor, micro, arch, path = None, None, None, None, None, None
if os.path.isabs(string_spec):
path = string_spec
else:
ok = False
match = re.match(PATTERN, string_spec)
if match:
def _int_or_none(val):
return None if val is None else int(val)
try:
groups = match.groupdict()
version = groups["version"]
if version is not None:
versions = tuple(int(i) for i in version.split(".") if i)
if len(versions) > 3:
raise ValueError
if len(versions) == 3:
major, minor, micro = versions
elif len(versions) == 2:
major, minor = versions
elif len(versions) == 1:
version_data = versions[0]
major = int(str(version_data)[0]) # first digit major
if version_data > 9:
minor = int(str(version_data)[1:])
ok = True
except ValueError:
pass
else:
impl = groups["impl"]
if impl == "py" or impl == "python":
impl = "CPython"
arch = _int_or_none(groups["arch"])
if not ok:
path = string_spec
return cls(string_spec, impl, major, minor, micro, arch, path)
def generate_names(self):
impls = OrderedDict()
if self.implementation:
# first consider implementation as it is
impls[self.implementation] = False
if fs_is_case_sensitive():
# for case sensitive file systems consider lower and upper case versions too
# trivia: MacBooks and all pre 2018 Windows-es were case insensitive by default
impls[self.implementation.lower()] = False
impls[self.implementation.upper()] = False
impls["python"] = True # finally consider python as alias, implementation must match now
version = self.major, self.minor, self.micro
try:
version = version[: version.index(None)]
except ValueError:
pass
for impl, match in impls.items():
for at in range(len(version), -1, -1):
cur_ver = version[0:at]
spec = "{}{}".format(impl, ".".join(str(i) for i in cur_ver))
yield spec, match
@property
def is_abs(self):
return self.path is not None and os.path.isabs(self.path)
def satisfies(self, spec):
"""called when there's a candidate metadata spec to see if compatible - e.g. PEP-514 on Windows"""
if spec.is_abs and self.is_abs and self.path != spec.path:
return False
if spec.implementation is not None and spec.implementation.lower() != self.implementation.lower():
return False
if spec.architecture is not None and spec.architecture != self.architecture:
return False
for our, req in zip((self.major, self.minor, self.micro), (spec.major, spec.minor, spec.micro)):
if req is not None and our is not None and our != req:
return False
return True
def __unicode__(self):
return "{}({})".format(
type(self).__name__,
", ".join(
"{}={}".format(k, getattr(self, k))
for k in ("implementation", "major", "minor", "micro", "architecture", "path")
if getattr(self, k) is not None
),
)
def __repr__(self):
return ensure_str(self.__unicode__())
| {
"repo_name": "pypa/virtualenv",
"path": "src/virtualenv/discovery/py_spec.py",
"copies": "5",
"size": "4790",
"license": "mit",
"hash": 6661146591130254000,
"line_mean": 38.262295082,
"line_max": 106,
"alpha_frac": 0.5296450939,
"autogenerated": false,
"ratio": 4.544592030360532,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7574237124260531,
"avg_score": null,
"num_lines": null
} |
"""A Python translation of the SAX2 parser API. This file provides only
default classes with absolutely minimum functionality, from which
drivers and applications can be subclassed.
Many of these classes are empty and are included only as documentation
of the interfaces.
"""
from xml.sax import saxlib
class LexicalHandler:
"""
Default handler for lexical events
Note: All methods can raise SAXException
"""
handlerId = 'http://xml.org/sax/handlers/lexical'
def xmlDecl(self, version, encoding, standalone):
"""The XML Declaration"""
pass
def startDTD(self, doctype, publicID, systemID):
"""Invoked at the beginning of the DOCTYPE declaration"""
pass
def endDTD(self):
"""
Invoked after all components of the DOCTYPE declaration,
including both internal and external DTD subsets
"""
pass
def startEntity(self, name):
"""
Note: If an external DTD subset is read, it will invoke this method
with special entity name of "[DTD]"
"""
pass
def endEntity(self, name):
pass
def comment(self, text):
"""XML Comment"""
pass
def startCDATA(self):
"""Beginning of CDATA Section"""
pass
def endCDATA(self):
"""End of CDATA Section"""
pass
class AttributeList2(saxlib. AttributeList):
def isSpecified(self, id):
"""
Whether the attribute value with the given name or index was
explicitly specified in the element, or was determined from the
default. Parameter can be either integer index or attribute name.
None (the default) signals 'Don't Know', else a boolean return
"""
pass
def getEntityRefList(self, id):
"""
XML 1,0 parsers are required to report all entity references,
even if unexpanded. This includes those in attribute strings.
Many parsers and apps ignore this, but for full conformance,
This method can be called to get a list of indexes referring
to entity references within the attribute value string for the
given name or index. Parameter can be either integer index or
attribute name.
"""
pass
class EntityRefList:
"""
This is the entity-reference list returned by
AttributeList2.getEntityRefList(index)
"""
def getLength(self):
"Return the number of Entity Ref pointers"
pass
def getEntityName(self, index):
"Return the name of the entity reference at the given index"
pass
def getEntityRefStart(self, index):
"""
Return the string start position of the entity reference
at the given index
"""
pass
def getEntityRefEnd(self, index):
"""
Return the string end position of the entity reference
at the given index
"""
pass
def __len__(self):
"Alias for getLength."
pass
class DTDDeclHandler:
"""
A handler for a minimal set of DTD Events
"""
MODEL_ELEMENTS = 1
MODEL_MIXED = 2
MODEL_ANY = 3
MODEL_EMPTY = 4
ATTRIBUTE_DEFAULTED = 1
ATTRIBUTE_IMPLIED = 2
ATTRIBUTE_REQUIRED = 3
ATTRIBUTE_FIXED = 4
handlerId = 'http://xml.org/sax/handlers/dtd-decl'
def elementDecl(self, name, modelType, model):
"""
Report an element-type declaration.
name and model are strings, modelType is an enumerated int from 1 to 4
"""
pass
def attributeDecl(self,
element,
name,
type,
defaultValue,
defaultType,
entityRefs):
"""
Report an attribute declaration. The first 4 parameters are strings,
defaultType is an integer from 1 to 4, entityRefs is an EntityRefList
"""
pass
def externalEntityDecl(self, name, isParameterEntity, publicId, systemId):
"""
Report an external entity declaration.
All parameters are strings except for isParameterEntity,
which is 0 or 1
"""
pass
def internalEntityDecl(self, name, isParameterEntity, value):
"""
Report an external entity declaration.
All parameters are strings except for isParameterEntity,
which is 0 or 1
"""
pass
class NamespaceHandler:
"""
Receive callbacks for the start and end of the scope of each
namespace declaration.
"""
handlerId = 'http://xml.org/sax/handlers/namespace'
def startNamespaceDeclScope(self, prefix, uri):
"""
Report the start of the scope of a namespace declaration.
This event will be reported before the startElement event
for the element containing the namespace declaration. All
declarations must be properly nested; if there are multiple
declarations in a single element, they must end in the opposite
order that they began.
both parameters are strings
"""
pass
def endNamespaceDeclScope(self, prefix):
"""
Report the end of the scope of a namespace declaration.
This event will be reported after the endElement event for
the element containing the namespace declaration. Namespace
scopes must be properly nested.
"""
pass
class ModParser(saxlib.Parser):
"""
All methods may raise
SAXNotSupportedException
"""
def setFeature(self, featureID, state):
"""
featureId is a string, state a boolean
"""
pass
def setHandler(self, handlerID, handler):
"""
handlerID is a string, handler a handler instance
"""
pass
def set(self, propID, value):
"""
propID is a string, value of arbitrary type
"""
pass
def get(self, propID):
pass
import sys
if sys.platform[0:4] == 'java':
from exceptions import Exception
class SAXNotSupportedException(Exception):
"""
Indicate that a SAX2 parser interface does not support a particular
feature or handler, or property.
"""
pass
#Just a few helper lists with the core components
CoreHandlers = [
'http://xml.org/sax/handlers/lexical',
'http://xml.org/sax/handlers/dtd-decl',
'http://xml.org/sax/handlers/namespace'
]
CoreProperties = [
'http://xml.org/sax/properties/namespace-sep',
#write-only string
#Set the separator to be used between the URI part of a name and the
#local part of a name when namespace processing is being performed
#(see the http://xml.org/sax/features/namespaces feature). By
#default, the separator is a single space. This property may not be
#set while a parse is in progress (raises SAXNotSupportedException).
'http://xml.org/sax/properties/dom-node',
#read-only Node instance
#Get the DOM node currently being visited, if the SAX parser is
#iterating over a DOM tree. If the parser recognises and supports
#this property but is not currently visiting a DOM node, it should
#return null (this is a good way to check for availability before the
#parse begins).
'http://xml.org/sax/properties/xml-string'
#read-only string
#Get the literal string of characters associated with the current
#event. If the parser recognises and supports this property but is
#not currently parsing text, it should return null (this is a good
#way to check for availability before the parse begins).
]
CoreFeatures = [
'http://xml.org/sax/features/validation',
#Validate (1) or don't validate (0).
'http://xml.org/sax/features/external-general-entities',
#Expand external general entities (1) or don't expand (0).
'http://xml.org/sax/features/external-parameter-entities',
#Expand external parameter entities (1) or don't expand (0).
'http://xml.org/sax/features/namespaces',
#Preprocess namespaces (1) or don't preprocess (0). See also
#the http://xml.org/sax/properties/namespace-sep property.
'http://xml.org/sax/features/normalize-text'
#Ensure that all consecutive text is returned in a single callback to
#DocumentHandler.characters or DocumentHandler.ignorableWhitespace
#(1) or explicitly do not require it (0).
]
| {
"repo_name": "Integral-Technology-Solutions/ConfigNOW",
"path": "Lib/xml/dom/ext/reader/Sax2Lib.py",
"copies": "13",
"size": "8295",
"license": "mit",
"hash": 4172427997175109000,
"line_mean": 28.414893617,
"line_max": 78,
"alpha_frac": 0.6526823388,
"autogenerated": false,
"ratio": 4.393538135593221,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A python utils library
* https://github.com/theno/utlz
* https://pypi.python.org/pypi/utlz
"""
import os
import shutil
from setuptools import setup, find_packages
from codecs import open
def create_readme_with_long_description():
this_dir = os.path.abspath(os.path.dirname(__file__))
readme_md = os.path.join(this_dir, 'README.md')
readme = os.path.join(this_dir, 'README')
if os.path.isfile(readme_md):
if os.path.islink(readme):
os.remove(readme)
shutil.copy(readme_md, readme)
try:
import pypandoc
long_description = pypandoc.convert(readme_md, 'rst', format='md')
if os.path.islink(readme):
os.remove(readme)
with open(readme, 'w') as out:
out.write(long_description)
except(IOError, ImportError, RuntimeError):
if os.path.isfile(readme_md):
os.remove(readme)
os.symlink(readme_md, readme)
with open(readme, encoding='utf-8') as in_:
long_description = in_.read()
return long_description
description = 'A python utils library'
long_description = create_readme_with_long_description()
this_dir = os.path.abspath(os.path.dirname(__file__))
filename = os.path.join(this_dir, 'utlz', '_version.py')
with open(filename, 'rt') as fh:
version = fh.read().split('"')[1]
setup(
name='utlz',
version=version,
description=description,
long_description=long_description,
url='https://github.com/theno/utlz',
author='Theodor Nolte',
author_email='utlz@theno.eu',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='python development utilities library',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
)
| {
"repo_name": "theno/utlz",
"path": "setup.py",
"copies": "1",
"size": "2322",
"license": "mit",
"hash": 2730287172163115500,
"line_mean": 32.1714285714,
"line_max": 74,
"alpha_frac": 0.6231696813,
"autogenerated": false,
"ratio": 3.7211538461538463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4844323527453846,
"avg_score": null,
"num_lines": null
} |
# A Python version of the redis-caching server.
#
# Issues a thread per connection, each thread blocks on every operation.
#
# Tested with Python 3.6 (requires the 'redis' package to be installed).
#
# Eli Bendersky [http://eli.thegreenplace.net]
# This code is in the public domain.
import math
import redis
import socket
import sys
from concurrent.futures import ThreadPoolExecutor
rclient = redis.StrictRedis(host='localhost')
def is_prime(num):
if num == 2:
return True
elif num < 2 or num % 2 == 0:
return False
else:
upto = int(math.sqrt(num)) + 1
for i in range(3, upto, 2):
if num % i == 0:
return False
return True
def handle_client_data(buf, sockobj):
"""A new buffer of data was received from a client - handle it."""
cachekey = b'primecache:' + buf
cached = rclient.get(cachekey)
if cached is None:
computed = b'prime' if is_prime(int(buf)) else b'composite'
rclient.set(cachekey, computed)
sockobj.send(computed + b'\n')
else:
sockobj.send(cached + b'\n')
def serve_connection(sockobj, client_address):
print('peer {0} connected'.format(client_address))
while True:
try:
buf = sockobj.recv(1024)
if not buf:
break
print('boba')
handle_client_data(buf, sockobj)
except IOError as e:
break
except Exception as e:
print('unknown exception', e)
raise
print('connection from {0} closed'.format(client_address))
sys.stdout.flush()
sockobj.close()
if __name__ == '__main__':
portnum = 8070 if len(sys.argv) < 2 else int(sys.argv[1])
pool = ThreadPoolExecutor()
sockobj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockobj.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sockobj.bind(('localhost', portnum))
sockobj.listen(15)
try:
while True:
client_socket, client_address = sockobj.accept()
pool.submit(serve_connection, client_socket, client_address)
except KeyboardInterrupt as e:
print(e)
sockobj.close()
| {
"repo_name": "eliben/code-for-blog",
"path": "2018/async-socket-server/primeserver-py-blocking.py",
"copies": "1",
"size": "2195",
"license": "unlicense",
"hash": -4649461307075161000,
"line_mean": 26.0987654321,
"line_max": 72,
"alpha_frac": 0.6150341686,
"autogenerated": false,
"ratio": 3.70777027027027,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.982280443887027,
"avg_score": 0,
"num_lines": 81
} |
"""A Python wrapper for the DigitalOcean API."""
import sys
python_major_version = sys.version_info[0]
import re
import json
import logging
from itertools import chain, cycle, islice
import requests
### Redact client_id and api_key from logged URLs.
class ClientAndKeyRedactor(logging.Formatter):
def format(self, record):
message = super(ClientAndKeyRedactor, self).format(record)
return re.sub(
r'(client_id|api_key)=[^\s\&]*', r'\1=[REDACTED]', message)
handler = logging.StreamHandler()
handler.setFormatter(
ClientAndKeyRedactor('%(asctime)s %(levelname)s %(message)s'))
logger = logging.getLogger('requests')
logger.addHandler(handler)
logger.propagate = False
###
class DigitalOceanAPI(object):
"""This is a general wrapper around the DigitalOcean API.
See README.md for example usage.
TODO:
- Improve error reporting: Misuse of the request() command, such as by
passing the wrong number of ids for a given api endpoint, results in
an APIException (good!) that can be hard to decipher (bad!).
"""
class APIException(Exception):
"""The DigitalOcean API returned an error."""
def __init__(self, resource_path, api_msg):
self.resource_path = resource_path
self.api_msg = api_msg
def __str__ (self):
msg = 'While getting https://{}{}, DigitalOcean reported an ' + \
'error:\n\t"{}"'
return msg.format(
DigitalOceanAPI.api_host, self.resource_path, self.api_msg)
api_host = "api.digitalocean.com"
def __init__(
self,
client_id,
api_key,
check_cert = True,
pemfile = None,
capath = None,
maximum_retries=2,
debug=False
):
"""Create a DigitalOceanAPI instance.
Keyword Arguments:
check_cert - whether the HTTPS connection's cert should be verified
pemfile - path to PEM file, such as "/etc/ssl/certs/ca-certificates.pem"
capath - path to list of CA certs, such as "/etc/ssl/certs/"
Note:
To use the DigitalOceanAPI class, you must provide either a
pemfile or a capath. If it is not obvious which values to provide,
see http://mercurial.selenic.com/wiki/CACertificates for a related
explanation and platform-specific hints.
"""
assert client_id
assert api_key
assert pemfile or capath if check_cert else True
assert maximum_retries >= 0
self.connection = None
self.client_id = client_id
self.api_key = api_key
self.pemfile = pemfile
self.capath = capath
self.check_cert = check_cert
self.maximum_retries = maximum_retries
self._retries_count = maximum_retries
self._debug = debug
def __enter__(self):
return self
def __exit__(self):
pass
def request(self, api_endpoint, params={}, ids=[]):
"""Perform an API request and returns the parsed JSON data.
Throws an exception with API description if the response is an error.
Arguments:
api_endpoint -
API endpoints are documented at
https://www.digitalocean.com/api
IMPORTANT NOTE:
When DigitalOcean specifies endpoints that require ids,
their documenation shows the ids inline as they appear in
the HTTP GET request. For example, '/images/[image_id]' or
'/domains/[domain_id]/records/new'. s
To keep things simple for you, the api_endpoint parameter
omits the placeholders for the ids, the ids themselves,
and the leading front slash.
So, for example:
when DigitalOcean says:
/domains
set api_endpoint to:
'domains'
when DigitalOcean says:
/images/[image_id]/destroy
set api_endpoint to:
'images/destroy'
when DigitalOcean says:
/domains/[domain_id]/records/[record_id]/destroy
set api_endpoint to:
'domains/records/destroy'
params - parameters of the request EXCLUDING client_id and api_key
e.g., {'name': 'domain_name', 'ip_address': '127.0.0.1'}
ids - array of ids for endpoints that require an id or two
Example endpoints that require ids:
/droplets/[droplet_id]/reboot requires one id
/domains/[domain_id]/records/[record_id] requires two ids
The array must contain the ids required by the endpoint in the
order they appear in the endpoint.
"""
resource_path = self._make_resource_path(api_endpoint, params, ids)
try:
response = requests.get("https://{}{}".format(
DigitalOceanAPI.api_host, resource_path))
except requests.exceptions.RequestException as e:
return self._retry_or_raise(api_endpoint, params, ids, e)
if response.status_code != 200:
raise DigitalOceanAPI.APIException(
resource_path, 'status code: {}'.format(response.status_code))
self._retries_count = 0
try:
response_json = response.json()
except ValueError:
raise DigitalOceanAPI.APIException(
self._make_resource_path(
api_endpoint, params, ids, redact_credentials=True),
response_data)
if response_json["status"] != "OK":
raise DigitalOceanAPI.APIException(
self._make_resource_path(
api_endpoint, params, ids, redact_credentials=True),
response_json["message"])
return response_json
def _make_resource_path(
self, api_endpoint, params={}, ids=[], redact_credentials=False
):
path = '/'
try:
path += '/'.join(self._roundrobin(api_endpoint.split('/'), ids))
except KeyError:
path += api_endpoint
credentials = {
'client_id': self.client_id,
'api_key': self.api_key
} if not redact_credentials else {
'client_id': '[REDACTED]',
'api_key': '[REDACTED]'
}
all_params = dict(chain(params.items(), credentials.items()))
path_params = '&'.join(
['{}={}'.format(k,v) for (k,v) in all_params.items()])
return '{}{}?{}'.format(
path,
'/' if '/' not in path else '',
path_params)
def _roundrobin(self, *iterables):
"""roundrobin('ABC', 'D', 'EF') --> A D E B F C"""
# Based upon a recipe credited to George Sakkis.
# Source: http://docs.python.org/3.3/library/itertools.html
pending = len(iterables)
if python_major_version == 3:
nexts = cycle(iter(it).__next__ for it in iterables)
else:
nexts = cycle(iter(it).next for it in iterables)
while pending:
try:
for next in nexts:
yield str(next())
except StopIteration:
pending -= 1
nexts = cycle(islice(nexts, pending))
def _retry_or_raise(self, api_endpoint, params, ids, exception):
if self._retries_count < self.maximum_retries:
self._retries_count += 1
return self.request(api_endpoint, params, ids)
else:
raise exception.__class__(' -- '.join(
[
'https://{}{}'.format(
DigitalOceanAPI.api_host,
self._make_resource_path(
api_endpoint,
params,
ids,
redact_credentials=True)),
str(exception)
]
)) | {
"repo_name": "kkurian/digitalocean",
"path": "digitalocean.py",
"copies": "1",
"size": "8283",
"license": "mit",
"hash": -389862112344218200,
"line_mean": 31.873015873,
"line_max": 84,
"alpha_frac": 0.5417119401,
"autogenerated": false,
"ratio": 4.4700485698866705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5511760509986671,
"avg_score": null,
"num_lines": null
} |
"""A PyZMQ based RPC service.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2012. Brian Granger, Min Ragan-Kelley
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import logging
import sys
import zmq
from zmq.eventloop.zmqstream import ZMQStream
from zmq.eventloop.ioloop import IOLoop
from zmq.utils import jsonapi
from .serializer import PickleSerializer
from .py3compat import format_exc
#-----------------------------------------------------------------------------
# RPC Service
#-----------------------------------------------------------------------------
class RPCBase(object):
def __init__(self, loop=None, context=None, serializer=None):
"""Base class for RPC service and proxy.
Parameters
==========
loop : IOLoop
An existing IOLoop instance, if not passed, then IOLoop.instance()
will be used.
context : Context
An existing Context instance, if not passed, the Context.instance()
will be used.
serializer : Serializer
An instance of a Serializer subclass that will be used to serialize
and deserialize args, kwargs and the result.
"""
self.loop = loop if loop is not None else IOLoop.instance()
self.context = context if context is not None else zmq.Context.instance()
self.socket = None
self.stream = None
self._serializer = serializer if serializer is not None else PickleSerializer()
self.reset()
#-------------------------------------------------------------------------
# Public API
#-------------------------------------------------------------------------
def reset(self):
"""Reset the socket/stream."""
if isinstance(self.socket, zmq.Socket):
self.socket.close()
self._create_socket()
self.urls = []
def bind(self, url):
"""Bind the service to a url of the form proto://ip:port."""
self.socket.bind(url)
self.urls.append(url)
def bind_ports(self, ip, ports):
"""Try to bind a socket to the first available tcp port.
The ports argument can either be an integer valued port
or a list of ports to try. This attempts the following logic:
* If ports==0, we bind to a random port.
* If ports > 0, we bind to port.
* If ports is a list, we bind to the first free port in that list.
In all cases we save the eventual url that we bind to.
This raises zmq.ZMQBindError if no free port can be found.
"""
if isinstance(ports, int):
ports = [ports]
for p in ports:
try:
if p==0:
port = self.socket.bind_to_random_port("tcp://%s" % ip)
else:
self.socket.bind("tcp://%s:%i" % (ip, p))
port = p
except zmq.ZMQError:
# bind raises this if the port is not free
continue
except zmq.ZMQBindError:
# bind_to_random_port raises this if no port could be found
continue
else:
break
else:
raise zmq.ZMQBindError('Could not find an available port')
url = 'tcp://%s:%i' % (ip, port)
self.urls.append(url)
return port
def connect(self, url):
"""Connect the service to a url of the form proto://ip:port."""
self.socket.connect(url)
self.urls.append(url)
class RPCService(RPCBase):
"""An RPC service that takes requests over a ROUTER socket."""
def _create_socket(self):
self.socket = self.context.socket(zmq.ROUTER)
self.stream = ZMQStream(self.socket, self.loop)
self.stream.on_recv(self._handle_request)
def _build_reply(self, status, data):
"""Build a reply message for status and data.
Parameters
----------
status : bytes
Either b'SUCCESS' or b'FAILURE'.
data : list of bytes
A list of data frame to be appended to the message.
"""
reply = []
reply.extend(self.idents)
reply.extend([b'|', self.msg_id, status])
reply.extend(data)
return reply
def _handle_request(self, msg_list):
"""Handle an incoming request.
The request is received as a multipart message:
[<idents>, b'|', msg_id, method, <sequence of serialized args/kwargs>]
The reply depends on if the call was successful or not:
[<idents>, b'|', msg_id, 'SUCCESS', <sequece of serialized result>]
[<idents>, b'|', msg_id, 'FAILURE', <JSON dict of ename, evalue, traceback>]
Here the (ename, evalue, traceback) are utf-8 encoded unicode.
"""
i = msg_list.index(b'|')
self.idents = msg_list[0:i]
self.msg_id = msg_list[i+1]
method = msg_list[i+2].decode('utf-8')
data = msg_list[i+3:]
args, kwargs = self._serializer.deserialize_args_kwargs(data)
# Find and call the actual handler for message.
handler = getattr(self, method, None)
if handler is not None and getattr(handler, 'is_rpc_method', False):
try:
result = handler(*args, **kwargs)
except Exception:
self._send_error()
else:
try:
data_list = self._serializer.serialize_result(result)
except Exception:
self._send_error()
else:
reply = self._build_reply(b'SUCCESS', data_list)
self.stream.send_multipart(reply)
else:
logging.error('Unknown RPC method: %s' % method)
self.idents = None
self.msg_id = None
def _send_error(self):
"""Send an error reply."""
etype, evalue, tb = sys.exc_info()
error_dict = {
'ename' : str(etype.__name__),
'evalue' : str(evalue),
'traceback' : format_exc(tb)
}
data_list = [jsonapi.dumps(error_dict)]
reply = self._build_reply(b'FAILURE', data_list)
self.stream.send_multipart(reply)
def start(self):
"""Start the event loop for this RPC service."""
self.loop.start()
def rpc_method(f):
"""A decorator for use in declaring a method as an rpc method.
Use as follows::
@rpc_method
def echo(self, s):
return s
"""
f.is_rpc_method = True
return f
| {
"repo_name": "ellisonbg/zpyrpc",
"path": "zpyrpc/service.py",
"copies": "1",
"size": "6985",
"license": "bsd-3-clause",
"hash": 2729526632178252300,
"line_mean": 32.2619047619,
"line_max": 87,
"alpha_frac": 0.5178239084,
"autogenerated": false,
"ratio": 4.454719387755102,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5472543296155101,
"avg_score": null,
"num_lines": null
} |
"""Aqara Devices"""
import json
import logging
from pydispatch import dispatcher
from aqara.const import (
AQARA_DEVICE_HT,
AQARA_DEVICE_MOTION,
AQARA_DEVICE_MAGNET,
AQARA_DEVICE_SWITCH,
AQARA_SWITCH_ACTION_CLICK,
AQARA_SWITCH_ACTION_DOUBLE_CLICK,
AQARA_SWITCH_ACTION_LONG_CLICK_PRESS,
AQARA_SWITCH_ACTION_LONG_CLICK_RELEASE,
AQARA_DATA_VOLTAGE,
AQARA_DATA_STATUS,
AQARA_DATA_TEMPERATURE,
AQARA_DATA_HUMIDITY
)
HASS_UPDATE_SIGNAL = "update_hass_sensor"
HASS_HEARTBEAT_SIGNAL = "heartbeat_hass_sensor"
_LOGGER = logging.getLogger(__name__)
BUTTON_ACTION_MAP = {
"click": AQARA_SWITCH_ACTION_CLICK,
"double_click": AQARA_SWITCH_ACTION_DOUBLE_CLICK,
"long_click_press": AQARA_SWITCH_ACTION_LONG_CLICK_PRESS,
"long_click_release": AQARA_SWITCH_ACTION_LONG_CLICK_RELEASE
}
def create_device(gateway, model, sid):
"""Device factory"""
if model == AQARA_DEVICE_HT:
return AqaraHTSensor(gateway, sid)
elif model == AQARA_DEVICE_MOTION:
return AqaraMotionSensor(gateway, sid)
elif model == AQARA_DEVICE_MAGNET:
return AqaraContactSensor(gateway, sid)
elif model == AQARA_DEVICE_SWITCH:
return AqaraSwitchSensor(gateway, sid)
else:
raise RuntimeError('Unsupported device type: {} [{}]'.format(model, sid))
class AqaraBaseDevice(object):
"""AqaraBaseDevice"""
def __init__(self, model, gateway, sid):
self._gateway = gateway
self._model = model
self._sid = sid
self._voltage = None
@property
def sid(self):
"""property: sid"""
return self._sid
@property
def model(self):
"""property: model"""
return self._model
@property
def voltage(self):
"""property: voltage"""
return self._voltage
def subscribe_update(self, handle_update):
"""subscribe to sensor update event"""
dispatcher.connect(handle_update, signal=HASS_UPDATE_SIGNAL, sender=self)
def unsubscribe_update(self, handle_update):
"""unsubscribe from sensor update event"""
dispatcher.disconnect(handle_update, signal=HASS_UPDATE_SIGNAL, sender=self)
def subscribe_heartbeat(self, handle_heartbeat):
"""subscirbe to sensor heartbeat event"""
dispatcher.connect(handle_heartbeat, signal=HASS_HEARTBEAT_SIGNAL, sender=self)
def unsubscribe_heartbeat(self, handle_heartbeat):
"""unsubscribe from sensor heartbeat event"""
dispatcher.disconnect(handle_heartbeat, signal=HASS_HEARTBEAT_SIGNAL, sender=self)
def update_now(self):
"""force read sensor data"""
self._gateway.read_device(self._sid)
def on_update(self, data):
"""handler for sensor data update"""
self.log_info("on_update: {}".format(json.dumps(data)))
if AQARA_DATA_VOLTAGE in data:
self._voltage = data[AQARA_DATA_VOLTAGE]
self.do_update(data)
dispatcher.send(signal=HASS_UPDATE_SIGNAL, sender=self)
def on_heartbeat(self, data):
"""handler for heartbeat"""
self.log_info("on_heartbeat: {}".format(json.dumps(data)))
if AQARA_DATA_VOLTAGE in data:
self._voltage = data[AQARA_DATA_VOLTAGE]
self.do_heartbeat(data)
dispatcher.send(signal=HASS_HEARTBEAT_SIGNAL, sender=self)
def do_update(self, data):
"""update sensor state according to data"""
pass
def do_heartbeat(self, data):
"""update heartbeat"""
pass
def log_warning(self, msg):
"""log warning"""
self._log(_LOGGER.warning, msg)
def log_info(self, msg):
"""log info"""
self._log(_LOGGER.info, msg)
def log_debug(self, msg):
"""log debug"""
self._log(_LOGGER.debug, msg)
def _log(self, log_func, msg):
"""log"""
log_func('%s [%s]: %s', self.sid, self.model, msg)
class AqaraHTSensor(AqaraBaseDevice):
"""AqaraHTSensor"""
def __init__(self, gateway, sid):
super().__init__(AQARA_DEVICE_HT, gateway, sid)
self._temperature = 0
self._humidity = 0
@property
def temperature(self):
"""property: temperature (unit: C)"""
return self._temperature
@property
def humidity(self):
"""property: humidity (unit: %)"""
return self._humidity
def do_update(self, data):
if AQARA_DATA_TEMPERATURE in data:
self._temperature = self.parse_value(data[AQARA_DATA_TEMPERATURE])
if AQARA_DATA_HUMIDITY in data:
self._humidity = self.parse_value(data[AQARA_DATA_HUMIDITY])
def do_heartbeat(self, data):
# heartbeat for HT sensor contains the same data as report
self.do_update(data)
@staticmethod
def parse_value(str_value):
"""parse sensor_ht values"""
return round(int(str_value) / 100, 1)
class AqaraContactSensor(AqaraBaseDevice):
"""AqaraContactSensor"""
def __init__(self, gateway, sid):
super().__init__(AQARA_DEVICE_MAGNET, gateway, sid)
self._triggered = False
@property
def triggered(self):
"""property: triggered (bool)"""
return self._triggered
def do_update(self, data):
if AQARA_DATA_STATUS in data:
self._triggered = data[AQARA_DATA_STATUS] == "open"
def do_heartbeat(self, data):
self.do_update(data)
class AqaraMotionSensor(AqaraBaseDevice):
"""AqaraMotionSensor"""
def __init__(self, gateway, sid):
super().__init__(AQARA_DEVICE_MOTION, gateway, sid)
self._triggered = False
@property
def triggered(self):
"""property: triggered (bool)"""
return self._triggered
def do_update(self, data):
if AQARA_DATA_STATUS in data:
self._triggered = data[AQARA_DATA_STATUS] == "motion"
else:
self._triggered = False
class AqaraSwitchSensor(AqaraBaseDevice):
"""AqaraMotionSensor"""
def __init__(self, gateway, sid):
super().__init__(AQARA_DEVICE_SWITCH, gateway, sid)
self._action = None
@property
def action(self):
"""property: last_action"""
return self._action
def do_update(self, data):
if AQARA_DATA_STATUS in data:
status = data[AQARA_DATA_STATUS]
if status in BUTTON_ACTION_MAP:
self._action = BUTTON_ACTION_MAP[status]
else:
self.log_warning('invalid status: {}' % status)
| {
"repo_name": "javefang/pyaqara",
"path": "aqara/device.py",
"copies": "1",
"size": "6504",
"license": "mit",
"hash": 1763891152878805000,
"line_mean": 29.2511627907,
"line_max": 90,
"alpha_frac": 0.6206949569,
"autogenerated": false,
"ratio": 3.5463467829880044,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4667041739888005,
"avg_score": null,
"num_lines": null
} |
"""aq - Query AWS resources with SQL
Usage:
aq [--profile=<profile>] [--region=<region>] [--table-cache-ttl=<seconds>] [-v] [--debug]
aq [--profile=<profile>] [--region=<region>] [--table-cache-ttl=<seconds>] [-v] [--debug] <query>
Sample queries:
aq "select tags->'Name' from ec2_instances"
aq "select count(*) from us_west_1.ec2_instances"
Options:
--profile=<profile> Use a specific profile from your credential file
--region=<region> The region to use. Overrides config/env settings
--table-cache-ttl=<seconds> number of seconds to cache the tables
before we update them from AWS again [default: 300]
-v, --verbose enable verbose logging
--debug enable debug mode
"""
from __future__ import print_function
import traceback
from collections import namedtuple
from docopt import docopt
from aq.engines import BotoSqliteEngine
from aq.errors import QueryError
from aq.formatters import TableFormatter
from aq.logger import initialize_logger
from aq.parsers import SelectParser
from aq.prompt import AqPrompt
__version__ = '0.1.1'
QueryResult = namedtuple('QueryResult', ('parsed_query', 'query_metadata', 'columns', 'rows'))
def get_engine(options):
return BotoSqliteEngine(options)
def get_parser(options):
return SelectParser(options)
def get_formatter(options):
return TableFormatter(options)
def get_prompt(parser, engine, options):
return AqPrompt(parser, engine, options)
def main():
args = docopt(__doc__)
initialize_logger(verbose=args['--verbose'], debug=args['--debug'])
parser = get_parser(args)
engine = get_engine(args)
formatter = get_formatter(args)
if args['<query>']:
query = args['<query>']
res = execute_query(engine, formatter, parser, query)
print(formatter.format(res.columns, res.rows))
else:
repl = get_prompt(parser, engine, args)
while True:
try:
query = repl.prompt()
res = execute_query(engine, formatter, parser, query)
print(formatter.format(res.columns, res.rows))
repl.update_with_result(res.query_metadata)
except EOFError:
break
except QueryError as e:
print('QueryError: {0}'.format(e))
except:
traceback.print_exc()
def execute_query(engine, formatter, parser, query):
parsed_query, metadata = parser.parse_query(query)
columns, rows = engine.execute(parsed_query, metadata)
return QueryResult(parsed_query=parsed_query, query_metadata=metadata,
columns=columns, rows=rows)
| {
"repo_name": "lebinh/aq",
"path": "aq/__init__.py",
"copies": "1",
"size": "2686",
"license": "mit",
"hash": 6459277353202927000,
"line_mean": 30.2325581395,
"line_max": 101,
"alpha_frac": 0.6496649293,
"autogenerated": false,
"ratio": 3.9154518950437316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5065116824343732,
"avg_score": null,
"num_lines": null
} |
""" A Qt API selector that can be used to switch between PyQt and PySide.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import os
import sys
from matplotlib import rcParams, verbose
# Available APIs.
QT_API_PYQT = 'PyQt4' # API is not set here; Python 2.x default is V 1
QT_API_PYQTv2 = 'PyQt4v2' # forced to Version 2 API
QT_API_PYSIDE = 'PySide' # only supports Version 2 API
QT_API_PYQT5 = 'PyQt5' # use PyQt5 API; Version 2 with module shim
ETS = dict(pyqt=(QT_API_PYQTv2, 4), pyside=(QT_API_PYSIDE, 4),
pyqt5=(QT_API_PYQT5, 5))
# ETS is a dict of env variable to (QT_API, QT_MAJOR_VERSION)
# If the ETS QT_API environment variable is set, use it, but only
# if the varible if of the same major QT version. Note that
# ETS requires the version 2 of PyQt4, which is not the platform
# default for Python 2.x.
QT_API_ENV = os.environ.get('QT_API')
if rcParams['backend'] == 'Qt5Agg':
QT_RC_MAJOR_VERSION = 5
elif rcParams['backend'] == 'Qt4Agg':
QT_RC_MAJOR_VERSION = 4
else:
# A different backend was specified, but we still got here because a Qt
# related file was imported. This is allowed, so lets try and guess
# what we should be using.
if "PyQt4" in sys.modules or "PySide" in sys.modules:
# PyQt4 or PySide is actually used.
QT_RC_MAJOR_VERSION = 4
else:
# This is a fallback: PyQt5
QT_RC_MAJOR_VERSION = 5
QT_API = None
# check if any binding is already imported, if so silently ignore the
# rcparams/ENV settings and use what ever is already imported.
if 'PySide' in sys.modules:
# user has imported PySide before importing mpl
QT_API = QT_API_PYSIDE
if 'PyQt4' in sys.modules:
# user has imported PyQt4 before importing mpl
# this case also handles the PyQt4v2 case as once sip is imported
# the API versions can not be changed so do not try
QT_API = QT_API_PYQT
if 'PyQt5' in sys.modules:
# the user has imported PyQt5 before importing mpl
QT_API = QT_API_PYQT5
if (QT_API_ENV is not None) and QT_API is None:
try:
QT_ENV_MAJOR_VERSION = ETS[QT_API_ENV][1]
except KeyError:
raise RuntimeError(
('Unrecognized environment variable %r, valid values are:'
' %r, %r or %r' % (QT_API_ENV, 'pyqt', 'pyside', 'pyqt5')))
if QT_ENV_MAJOR_VERSION == QT_RC_MAJOR_VERSION:
# Only if backend and env qt major version are
# compatible use the env variable.
QT_API = ETS[QT_API_ENV][0]
if QT_API is None:
# No ETS environment or incompatible so use rcParams.
if rcParams['backend'] == 'Qt5Agg':
QT_API = rcParams['backend.qt5']
elif rcParams['backend'] == 'Qt4Agg':
QT_API = rcParams['backend.qt4']
else:
# A non-Qt backend was specified, no version of the Qt
# bindings is imported, but we still got here because a Qt
# related file was imported. This is allowed, fall back to Qt5
# using which ever binding the rparams ask for.
QT_API = rcParams['backend.qt5']
# We will define an appropriate wrapper for the differing versions
# of file dialog.
_getSaveFileName = None
# Flag to check if sip could be imported
_sip_imported = False
# Now perform the imports.
if QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYQT5):
try:
import sip
_sip_imported = True
except ImportError:
# Try using PySide
QT_API = QT_API_PYSIDE
cond = ("Could not import sip; falling back on PySide\n"
"in place of PyQt4 or PyQt5.\n")
verbose.report(cond, 'helpful')
if _sip_imported:
if QT_API == QT_API_PYQTv2:
if QT_API_ENV == 'pyqt':
cond = ("Found 'QT_API=pyqt' environment variable. "
"Setting PyQt4 API accordingly.\n")
else:
cond = "PyQt API v2 specified."
try:
sip.setapi('QString', 2)
except:
res = 'QString API v2 specification failed. Defaulting to v1.'
verbose.report(cond + res, 'helpful')
# condition has now been reported, no need to repeat it:
cond = ""
try:
sip.setapi('QVariant', 2)
except:
res = 'QVariant API v2 specification failed. Defaulting to v1.'
verbose.report(cond + res, 'helpful')
if QT_API == QT_API_PYQT5:
try:
from PyQt5 import QtCore, QtGui, QtWidgets
_getSaveFileName = QtWidgets.QFileDialog.getSaveFileName
except ImportError:
# fell through, tried PyQt5, failed fall back to PyQt4
QT_API = rcParams['backend.qt4']
QT_RC_MAJOR_VERSION = 4
# needs to be if so we can re-test the value of QT_API which may
# have been changed in the above if block
if QT_API in [QT_API_PYQT, QT_API_PYQTv2]: # PyQt4 API
from PyQt4 import QtCore, QtGui
try:
if sip.getapi("QString") > 1:
# Use new getSaveFileNameAndFilter()
_getSaveFileName = QtGui.QFileDialog.getSaveFileNameAndFilter
else:
# Use old getSaveFileName()
def _getSaveFileName(*args, **kwargs):
return (QtGui.QFileDialog.getSaveFileName(*args, **kwargs),
None)
except (AttributeError, KeyError):
# call to getapi() can fail in older versions of sip
def _getSaveFileName(*args, **kwargs):
return QtGui.QFileDialog.getSaveFileName(*args, **kwargs), None
try:
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
try:
QtCore.Slot = QtCore.pyqtSlot
except AttributeError:
# Not a perfect match but works in simple cases
QtCore.Slot = QtCore.pyqtSignature
QtCore.Property = QtCore.pyqtProperty
__version__ = QtCore.PYQT_VERSION_STR
except NameError:
# QtCore did not get imported, fall back to pyside
QT_API = QT_API_PYSIDE
if QT_API == QT_API_PYSIDE: # try importing pyside
try:
from PySide import QtCore, QtGui, __version__, __version_info__
except ImportError:
raise ImportError(
"Matplotlib qt-based backends require an external PyQt4, PyQt5,\n"
"or PySide package to be installed, but it was not found.")
if __version_info__ < (1, 0, 3):
raise ImportError(
"Matplotlib backend_qt4 and backend_qt4agg require PySide >=1.0.3")
_getSaveFileName = QtGui.QFileDialog.getSaveFileName
# Apply shim to Qt4 APIs to make them look like Qt5
if QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYSIDE):
'''Import all used QtGui objects into QtWidgets
Here I've opted to simple copy QtGui into QtWidgets as that
achieves the same result as copying over the objects, and will
continue to work if other objects are used.
'''
QtWidgets = QtGui
| {
"repo_name": "marcsans/cnn-physics-perception",
"path": "phy/lib/python2.7/site-packages/matplotlib/backends/qt_compat.py",
"copies": "4",
"size": "7088",
"license": "mit",
"hash": 3804414104428006400,
"line_mean": 35.5360824742,
"line_max": 79,
"alpha_frac": 0.6273984199,
"autogenerated": false,
"ratio": 3.634871794871795,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006443298969072165,
"num_lines": 194
} |
""" A Qt API selector that can be used to switch between PyQt and PySide.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
from matplotlib import rcParams, verbose
# Available APIs.
QT_API_PYQT = 'PyQt4' # API is not set here; Python 2.x default is V 1
QT_API_PYQTv2 = 'PyQt4v2' # forced to Version 2 API
QT_API_PYSIDE = 'PySide' # only supports Version 2 API
QT_API_PYQT5 = 'PyQt5' # use PyQt5 API; Version 2 with module shim
ETS = dict(pyqt=(QT_API_PYQTv2, 4), pyside=(QT_API_PYSIDE, 4),
pyqt5=(QT_API_PYQT5, 5))
# ETS is a dict of env variable to (QT_API, QT_MAJOR_VERSION)
# If the ETS QT_API environment variable is set, use it, but only
# if the varible if of the same major QT version. Note that
# ETS requires the version 2 of PyQt4, which is not the platform
# default for Python 2.x.
QT_API_ENV = os.environ.get('QT_API')
if rcParams['backend'] == 'Qt5Agg':
QT_RC_MAJOR_VERSION = 5
else:
QT_RC_MAJOR_VERSION = 4
QT_API = None
if (QT_API_ENV is not None):
try:
QT_ENV_MAJOR_VERSION = ETS[QT_API_ENV][1]
except KeyError:
raise RuntimeError(
('Unrecognized environment variable %r, valid values are:'
' %r, %r or %r' % (QT_API_ENV, 'pyqt', 'pyside', 'pyqt5')))
if QT_ENV_MAJOR_VERSION == QT_RC_MAJOR_VERSION:
# Only if backend and env qt major version are
# compatible use the env variable.
QT_API = ETS[QT_API_ENV][0]
if QT_API is None:
# No ETS environment or incompatible so use rcParams.
if rcParams['backend'] == 'Qt5Agg':
QT_API = rcParams['backend.qt5']
else:
QT_API = rcParams['backend.qt4']
# We will define an appropriate wrapper for the differing versions
# of file dialog.
_getSaveFileName = None
# Flag to check if sip could be imported
_sip_imported = False
# Now perform the imports.
if QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYQT5):
try:
import sip
_sip_imported = True
except ImportError:
# Try using PySide
QT_API = QT_API_PYSIDE
if _sip_imported:
if QT_API == QT_API_PYQTv2:
if QT_API_ENV == 'pyqt':
cond = ("Found 'QT_API=pyqt' environment variable. "
"Setting PyQt4 API accordingly.\n")
else:
cond = "PyQt API v2 specified."
try:
sip.setapi('QString', 2)
except:
res = 'QString API v2 specification failed. Defaulting to v1.'
verbose.report(cond + res, 'helpful')
# condition has now been reported, no need to repeat it:
cond = ""
try:
sip.setapi('QVariant', 2)
except:
res = 'QVariant API v2 specification failed. Defaulting to v1.'
verbose.report(cond + res, 'helpful')
if QT_API in [QT_API_PYQT, QT_API_PYQTv2]: # PyQt4 API
from PyQt4 import QtCore, QtGui
try:
if sip.getapi("QString") > 1:
# Use new getSaveFileNameAndFilter()
_getSaveFileName = QtGui.QFileDialog.getSaveFileNameAndFilter
else:
# Use old getSaveFileName()
def _getSaveFileName(*args, **kwargs):
return (QtGui.QFileDialog.getSaveFileName(*args, **kwargs),
None)
except (AttributeError, KeyError):
# call to getapi() can fail in older versions of sip
def _getSaveFileName(*args, **kwargs):
return QtGui.QFileDialog.getSaveFileName(*args, **kwargs), None
else: # PyQt5 API
from PyQt5 import QtCore, QtGui, QtWidgets
_getSaveFileName = QtWidgets.QFileDialog.getSaveFileName
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
try:
QtCore.Slot = QtCore.pyqtSlot
except AttributeError:
# Not a perfect match but works in simple cases
QtCore.Slot = QtCore.pyqtSignature
QtCore.Property = QtCore.pyqtProperty
__version__ = QtCore.PYQT_VERSION_STR
else: # try importing pyside
from PySide import QtCore, QtGui, __version__, __version_info__
if __version_info__ < (1, 0, 3):
raise ImportError(
"Matplotlib backend_qt4 and backend_qt4agg require PySide >=1.0.3")
_getSaveFileName = QtGui.QFileDialog.getSaveFileName
# Apply shim to Qt4 APIs to make them look like Qt5
if QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYSIDE):
'''Import all used QtGui objects into QtWidgets
Here I've opted to simple copy QtGui into QtWidgets as that
achieves the same result as copying over the objects, and will
continue to work if other objects are used.
'''
QtWidgets = QtGui
| {
"repo_name": "uglyboxer/linear_neuron",
"path": "net-p3/lib/python3.5/site-packages/matplotlib/backends/qt_compat.py",
"copies": "10",
"size": "4816",
"license": "mit",
"hash": -8200818618592231000,
"line_mean": 32.6783216783,
"line_max": 79,
"alpha_frac": 0.6220930233,
"autogenerated": false,
"ratio": 3.575352635486266,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9197445658786266,
"avg_score": null,
"num_lines": null
} |
""" A Qt API selector that can be used to switch between PyQt and PySide.
"""
import os
from matplotlib import rcParams, verbose
# Available APIs.
QT_API_PYQT = 'PyQt4' # API is not set here; Python 2.x default is V 1
QT_API_PYQTv2 = 'PyQt4v2' # forced to Version 2 API
QT_API_PYSIDE = 'PySide' # only supports Version 2 API
ETS = dict(pyqt=QT_API_PYQTv2, pyside=QT_API_PYSIDE)
# If the ETS QT_API environment variable is set, use it. Note that
# ETS requires the version 2 of PyQt4, which is not the platform
# default for Python 2.x.
QT_API_ENV = os.environ.get('QT_API')
if QT_API_ENV is not None:
try:
QT_API = ETS[QT_API_ENV]
except KeyError:
raise RuntimeError(
'Unrecognized environment variable %r, valid values are: %r or %r' %
(QT_API_ENV, 'pyqt', 'pyside'))
else:
# No ETS environment, so use rcParams.
QT_API = rcParams['backend.qt4']
# We will define an appropriate wrapper for the differing versions
# of file dialog.
_getSaveFileName = None
# Now perform the imports.
if QT_API in (QT_API_PYQT, QT_API_PYQTv2):
import sip
if QT_API == QT_API_PYQTv2:
if QT_API_ENV == 'pyqt':
cond = ("Found 'QT_API=pyqt' environment variable. "
"Setting PyQt4 API accordingly.\n")
else:
cond = "PyQt API v2 specified."
try:
sip.setapi('QString', 2)
except:
res = 'QString API v2 specification failed. Defaulting to v1.'
verbose.report(cond+res, 'helpful')
# condition has now been reported, no need to repeat it:
cond = ""
try:
sip.setapi('QVariant', 2)
except:
res = 'QVariant API v2 specification failed. Defaulting to v1.'
verbose.report(cond+res, 'helpful')
from PyQt4 import QtCore, QtGui
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
try:
QtCore.Slot = QtCore.pyqtSlot
except AttributeError:
QtCore.Slot = pyqtSignature # Not a perfect match but
# works in simple cases
QtCore.Property = QtCore.pyqtProperty
__version__ = QtCore.PYQT_VERSION_STR
try :
if sip.getapi("QString") > 1 :
# Use new getSaveFileNameAndFilter()
_get_save = QtGui.QFileDialog.getSaveFileNameAndFilter
else :
# Use old getSaveFileName()
_getSaveFileName = QtGui.QFileDialog.getSaveFileName
except (AttributeError, KeyError) :
# call to getapi() can fail in older versions of sip
_getSaveFileName = QtGui.QFileDialog.getSaveFileName
else: # can only be pyside
from PySide import QtCore, QtGui, __version__, __version_info__
if __version_info__ < (1,0,3):
raise ImportError(
"Matplotlib backend_qt4 and backend_qt4agg require PySide >=1.0.3")
_get_save = QtGui.QFileDialog.getSaveFileName
if _getSaveFileName is None:
def _getSaveFileName(self, msg, start, filters, selectedFilter):
return _get_save(self, msg, start, filters, selectedFilter)[0]
| {
"repo_name": "lthurlow/Network-Grapher",
"path": "proj/external/matplotlib-1.2.1/lib/matplotlib/backends/qt4_compat.py",
"copies": "12",
"size": "3172",
"license": "mit",
"hash": -7093282470855796000,
"line_mean": 33.4782608696,
"line_max": 79,
"alpha_frac": 0.6261034048,
"autogenerated": false,
"ratio": 3.6585928489042674,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
""" A Qt API selector that can be used to switch between PyQt and PySide.
This uses the ETS 4.0 selection pattern of:
PySide first, PyQt with API v2. second.
Do not use this if you need PyQt with the old QString/QVariant API.
"""
import os
import sys
# Available APIs.
QT_API_PYQT = 'pyqt'
QT_API_PYSIDE = 'pyside'
def prepare_pyqt4():
# For PySide compatibility, use the new-style string API that automatically
# converts QStrings to Unicode Python strings. Also, automatically unpack
# QVariants to their underlying objects.
import sip
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
# Select Qt binding, using the QT_API environment variable if available.
QT_API = os.environ.get('QT_API')
if QT_API is None:
try:
import PySide
if PySide.__version__ < '1.0.3':
# old PySide, fallback on PyQt
raise ImportError
from PySide import QtCore, QtGui, QtNetwork
QT_API = QT_API_PYSIDE
except ImportError:
try:
prepare_pyqt4()
import PyQt4
from PyQt4 import QtCore, QtGui, QtNetwork
if QtCore.PYQT_VERSION_STR < '4.7':
# PyQt 4.6 has issues with null strings returning as None
raise ImportError
QT_API = QT_API_PYQT
except ImportError:
raise ImportError('Cannot import PySide >= 1.0.3 or PyQt4 >= 4.7')
elif QT_API == QT_API_PYQT:
# Note: This must be called *before* PyQt4 is imported.
prepare_pyqt4()
# Now peform the imports.
if QT_API == QT_API_PYQT:
from PyQt4 import QtCore, QtGui, QtNetwork
if QtCore.PYQT_VERSION_STR < '4.7':
raise ImportError("IPython requires PyQt4 >= 4.7, found %s"%QtCore.PYQT_VERSION_STR)
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
elif QT_API == QT_API_PYSIDE:
import PySide
if PySide.__version__ < '1.0.3':
raise ImportError("IPython requires PySide >= 1.0.3, found %s"%PySide.__version__)
from PySide import QtCore, QtGui, QtNetwork
else:
raise RuntimeError('Invalid Qt API %r, valid values are: %r or %r' %
(QT_API, QT_API_PYQT, QT_API_PYSIDE))
sys.modules['qt.QtCore'] = QtCore
sys.modules['qt.QtGui'] = QtGui
sys.modules['qt.QtNetwork'] = QtNetwork
| {
"repo_name": "Answeror/lit",
"path": "qt.py",
"copies": "1",
"size": "2364",
"license": "mit",
"hash": -7866494565222801000,
"line_mean": 31.8333333333,
"line_max": 92,
"alpha_frac": 0.6476311337,
"autogenerated": false,
"ratio": 3.497041420118343,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9636303511412299,
"avg_score": 0.0016738084812088609,
"num_lines": 72
} |
""" A Qt based color gradient editor for vtkLookupTables and
color transfer functions.
This code is distributed under the conditions of the BSD license.
Based on a wxPython by Pete Schmitt <schmitt@colorado.edu>
Author: Prabhu Ramachandran <prabhu@enthought.com>
Copyright (c) 2012-2013 Enthought, Inc., Mumbai, India.
"""
# Enthought library imports
from pyface.qt import QtCore, QtGui
# Local imports
from .gradient_editor import (ColorControlPoint, ChannelBase, FunctionControl,
GradientEditorWidget)
##########################################################################
# `QGradientControl` class.
##########################################################################
class QGradientControl(QtGui.QWidget):
"""Widget which displays the gradient represented by an GradientTable
object (and does nothing beyond that)"""
def __init__(self, parent, gradient_table, width, height ):
"""master: panel in which to place the control. GradientTable is the
Table to which to attach."""
super(QGradientControl, self).__init__(parent=parent)
self.resize(width, height)
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent, True)
self.width = width
self.height = height
self.gradient_table = gradient_table
assert( gradient_table.size == width )
self.setMinimumSize(100, 50)
# currently only able to use gradient tables in the same size as the
# canvas width
def paintEvent(self, event):
"""Paint handler."""
super(QGradientControl, self).paintEvent(event)
painter = QtGui.QPainter(self)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0) )
painter.setBrush(brush)
painter.setBackgroundMode(QtCore.Qt.OpaqueMode)
sz = self.size()
width, height = sz.width(), sz.height()
xform = self.gradient_table.scaling_function
start_y = 0
end_y = height
if xform:
# if a scaling transformation is provided, paint the original
# gradient under the scaled gradient.
start_y = height/2
# paint the original gradient as it stands in the table.
color = QtGui.QColor()
for x in range(width):
(r,g,b,a) = self.gradient_table.get_pos_rgba_color_lerped(float(x)/(width-1))
color.setRgb(int(255*r), int(255*g), int(255*b))
painter.setPen(color)
brush.setColor(color)
painter.drawLine(x, start_y, x, end_y)
if xform:
# paint the scaled gradient below
end_y = start_y
start_y = 0
for x in range(width):
f = float(x)/(width-1)
(r,g,b,a) = self.gradient_table.get_pos_rgba_color_lerped(xform(f))
color.set(int(255*r), int(255*g), int(255*b))
brush.setColor(color)
painter.drawLine(x, start_y, x, end_y)
##########################################################################
# `Channel` class.
##########################################################################
class Channel(ChannelBase):
def paint(self, painter):
"""Paint current channel into Canvas (a canvas of a function control
object).
Contents of the canvas are not deleted prior to painting,
so more than one channel can be painted into the same canvas.
"""
table = self.control.table
# only control points which are active for the current channel
# are to be painted. filter them out.
relevant_control_points = [
x for x in table.control_points if self.name in x.active_channels
]
# lines between control points
color = QtGui.QColor(*self.rgb_color)
painter.setPen(color)
brush = QtGui.QBrush(color)
painter.setBrush(brush)
painter.setBackgroundMode(QtCore.Qt.OpaqueMode)
for k in range( len(relevant_control_points) - 1 ):
cur_point = relevant_control_points[k]
next_point = relevant_control_points[1+k]
painter.drawLine(self.get_pos_index(cur_point.pos),
self.get_value_index(cur_point.color),
self.get_pos_index(next_point.pos),
self.get_value_index(next_point.color))
# control points themself.
color = QtCore.Qt.black
painter.setPen(color)
for control_point in relevant_control_points:
x = self.get_pos_index( control_point.pos )
y = self.get_value_index( control_point.color )
radius=6
#print(x,y)
painter.drawRect(x-(radius/2.0), y-(radius/2.0), radius, radius)
painter.drawRect(100,80,6,6)
##########################################################################
# `QFunctionControl` class.
##########################################################################
class QFunctionControl(QtGui.QWidget, FunctionControl):
"""Widget which displays a rectangular regions on which hue, sat, val
or rgb values can be modified. An function control can have one or more
attached color channels."""
# Radius around a control point center in which we'd still count a
# click as "clicked the control point"
control_pt_click_tolerance = 4
ChannelFactory = Channel
def __init__(self, master, gradient_table, color_space, width, height):
"""Initialize a function control widget on tkframe master.
Parameters:
-----------
master: The master widget. Note that this widget *must* have
the methods specified in the `AbstractGradientEditorWidget`
interface.
on_table_changed: Callback function taking a bool argument of meaning
'FinalUpdate'. FinalUpdate is true if a control point is dropped,
created or removed and false if the update is due to a control point
currently beeing dragged (but not yet dropped)
color_space: String which specifies the channels painted on this control.
May be any combination of h,s,v,r,g,b,a in which each channel
occurs only once.
set_status_text: a callback used to set the status text
when using the editor.
"""
FunctionControl.__init__(self, master, gradient_table, color_space,
width, height)
QtGui.QWidget.__init__(self, parent=master)
self.resize(width, height)
self.setMinimumSize(100, 50)
######################################################################
# Qt event handlers.
######################################################################
def paintEvent(self, event):
super(QFunctionControl, self).paintEvent(event)
painter = QtGui.QPainter(self)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
painter.setBrush(brush)
width, height = self.size().width(), self.size().height()
painter.drawRect(0, 0, width, height)
for channel in self.channels:
channel.paint(painter)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.cur_drag = self.find_control_point(event.x(), event.y())
super(QFunctionControl, self).mousePressEvent(event)
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
if self.cur_drag:
self.table_config_changed( final_update = True )
self.cur_drag = None
elif event.button() == QtCore.Qt.RightButton:
# toggle control point. check if there is a control point
# under the mouse. If yes, delete it, if not, create one
# at that point.
cur_control_point = self.find_control_point(event.x(), None)
if cur_control_point:
# found a marker at the click position. delete it and return,
# unless it is a fixed marker (at pos 0 or 1)..
if ( cur_control_point[1].fixed ):
# in this case do nothing. Fixed markers cannot be deleted.
return
self.table.control_points.remove(cur_control_point[1])
self.table_config_changed(final_update=True)
else:
# since there was no marker to remove at the point, we assume
# that we should place one there
new_control_point = ColorControlPoint(active_channels=self.active_channels_string)
new_control_point.set_pos(self.channels[0].get_index_pos(event.x()))
# set new control point color to the color currently present
# at its designated position
new_control_point.color = self.table.get_pos_color(new_control_point.pos)
self.table.insert_control_point(new_control_point)
self.table_config_changed(final_update = True)
if isinstance(event, QtGui.QMouseEvent):
super(QFunctionControl, self).mouseReleaseEvent(event)
def leaveEvent(self, event):
if self.cur_drag:
self.table_config_changed( final_update = True )
self.cur_drag = None
super(QFunctionControl, self).leaveEvent(event)
def resizeEvent(self, event):
sz = self.size()
self.width = sz.width()
self.height = sz.height()
def mouseMoveEvent(self, event):
# currently dragging a control point?
channel = None
point = None
if self.cur_drag:
channel = self.cur_drag[0]
point = self.cur_drag[1]
if ( not point.fixed ):
point.set_pos( channel.get_index_pos(event.x()) )
point.activate_channels( self.active_channels_string )
self.table.sort_control_points()
channel.set_value_index( point.color, event.y() )
self.table_config_changed( final_update = False )
screenX = event.x()
screenY = event.y()
width, height = self.size().width(), self.size().height()
master = self.master
s1, s2 = master.get_table_range()
if channel is not None:
name = self.text_map[channel.name]
pos = s1 + (s2 - s1)*point.pos
val = channel.get_value(point.color)
txt = '%s: (%.3f, %.3f)'%(name, pos, val)
else:
x = s1 + (s2 - s1)*float(screenX)/(width-1)
y = 1.0 - float(screenY)/(height-1)
txt = "position: (%.3f, %.3f)"%(x, y)
self.master.set_status_text(txt)
##########################################################################
# `QGradientEditorWidget` class.
##########################################################################
class QGradientEditorWidget(QtGui.QWidget, GradientEditorWidget):
"""A Gradient Editor widget that can be used anywhere.
"""
def __init__(self, master, vtk_table, on_change_color_table=None,
colors=None):
"""
Parameters:
-----------
vtk_table : the `tvtk.LookupTable` or `tvtk.VolumeProperty` object
to set.
on_change_color_table : A callback called when the color table
changes.
colors : list of 'rgb', 'hsv', 'h', 's', 'v', 'a'
(Default : ['rgb', 'hsv', 'a'])
'rgb' creates one panel to edit Red, Green and Blue
colors.
'hsv' creates one panel to edit Hue, Saturation and
Value.
'h', 's', 'v', 'r', 'g', 'b', 'a' separately
specified creates different panels for each.
"""
GradientEditorWidget.__init__(self, master, vtk_table,
on_change_color_table, colors)
QtGui.QWidget.__init__(self, master)
gradient_preview_width = self.gradient_preview_width
gradient_preview_height = self.gradient_preview_height
channel_function_width = self.channel_function_width
channel_function_height = self.channel_function_height
# set up all the panels in a grid
# 6x2 size: 6 rows, 2 columns...
grid = QtGui.QGridLayout()
grid.setColumnStretch(0, 0)
grid.setColumnStretch(1, 1)
# "Gradient Viewer" panel, in position (0,1) for sizer
self.gradient_control = QGradientControl(self,
self.gradient_table,
gradient_preview_width,
gradient_preview_height)
self.setToolTip('Right click for menu')
grid.addWidget(QtGui.QLabel("", self), 0, 0)
grid.addWidget(self.gradient_control, 0, 1)
# Setup the context menu to fire for the Gradient control alone.
gc = self.gradient_control
gc.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
gc.customContextMenuRequested.connect(self.contextMenuEventOnGradient)
# Add the function controls:
function_controls = self.function_controls
editor_data = self.editor_data
row = 1
for color in self.colors:
data = editor_data[color]
control = QFunctionControl(self, self.gradient_table, color,
channel_function_width,
channel_function_height)
txt = data[0] + self.tooltip_text
control.setToolTip(txt)
# Add name of editor (to left side of editor)
grid.addWidget(QtGui.QLabel(data[1], self), row, 0)
# Add the "RGB" control point editor
grid.addWidget(control, row, 1)
function_controls.append(control)
row += 1
# The status text.
self.text = QtGui.QLabel('status', self)
grid.addWidget(self.text, row, 0, 1, 2)
self.setLayout(grid)
self.show()
######################################################################
# `GradientEditorWidget` interface.
######################################################################
def set_status_text(self, msg):
self.text.setText(msg)
######################################################################
# Qt event methods.
######################################################################
def contextMenuEventOnGradient(self, pos):
menu = QtGui.QMenu(self)
saveAction = menu.addAction("Save as")
loadAction = menu.addAction("Load")
action = menu.exec_(self.mapToGlobal(pos))
if action == saveAction:
self.on_save()
elif action == loadAction:
self.on_load()
def on_save(self, event=None):
"""
Open "Save" dialog, write lookuptable to 3 files: ``*.lut``
(lookuptable) ``*.grad`` (gradient table for use with this program),
and ``*.jpg`` (image of the gradient)
"""
wildcard = "Gradient Files (*.grad);;All Files (*.*)"
filename, filter = QtGui.QFileDialog.getSaveFileName(self,
"Save LUT to...",
'',
wildcard)
if filename:
self.save(filename)
def on_load(self, event=None):
"""
Load a ``*.grad`` lookuptable file.
"""
wildcard = "Gradient Files (*.grad);;All Files (*.*)"
filename, filter = QtGui.QFileDialog.getOpenFileName(self,
"Open gradient file...",
'',
wildcard)
if filename:
self.load(filename)
##########################################################################
# `QGradientEditor` class.
##########################################################################
class QGradientEditor(QtGui.QMainWindow):
""" QMainWindow that displays the gradient editor window,
i.e. the thing that contains the gradient display, the function
controls and the buttons.
"""
def __init__(self, vtk_table, on_change_color_table = None, colors=None):
"""Initialize the gradient editor window.
Parameters
----------
vtk_table: Instance of vtkLookupTable, designating the table which is
to be edited.
on_change_color_table: Callback function taking no arguments. Called
when the color table was changed and rendering is
requested.
"""
super(QGradientEditor, self).__init__()
self.setWindowTitle("Color Gradient Editor")
self.widget = QGradientEditorWidget(self, vtk_table,
on_change_color_table,
colors)
self.setCentralWidget(self.widget)
self.resize(300, 500)
self.statusBar()
## Set up the MenuBar
menu = self.menuBar()
file_menu = menu.addMenu("&File")
file_action = QtGui.QAction("&Save", self)
file_action.setStatusTip("Save CTF")
file_action.triggered.connect(self.widget.on_save)
file_menu.addAction(file_action)
load_action = QtGui.QAction("&Load", self)
load_action.setStatusTip("Load CTF")
load_action.triggered.connect(self.widget.on_load)
file_menu.addAction(load_action)
quit_action = QtGui.QAction("&Quit", self)
quit_action.setStatusTip("Quit application")
quit_action.triggered.connect(QtGui.QApplication.instance().quit)
file_menu.addAction(quit_action)
help_menu = menu.addMenu("&Help")
action = QtGui.QAction("&Help", self)
action.setStatusTip("Help")
action.triggered.connect(self.on_help)
help_menu.addAction(action)
action = QtGui.QAction("&About", self)
action.setStatusTip("About application")
action.triggered.connect(self.on_about)
help_menu.addAction(action)
def on_help(self, event=None):
""" Help defining the mouse interactions """
message = "Right click to add control points. Left click to move control points"
QtGui.QMessageBox.information(self, 'Help', message)
def on_about(self, event=None):
""" Who wrote the program?"""
message = 'tk Gradient Editor for MayaVi1: Gerald Knizia (cgk.d@gmx.net)\n'\
'wxPython port: Pete Schmitt (schmitt@colorado.edu)\n'\
'Qt port: Prabhu Ramachandran\n'\
'Enhanced for Mayavi2: Prabhu Ramachandran'
QtGui.QMessageBox.information(self, 'About gradient editor', message)
def main():
from .traitsui_gradient_editor import make_test_table
import sys
table, ctf, otf = make_test_table(lut=False)
# the actual gradient editor code.
def on_color_table_changed():
"""If we had a vtk window running, update it here"""
print("Update Render Window")
app = QtGui.QApplication.instance()
editor = QGradientEditor(table,
on_color_table_changed,
colors=['rgb', 'a', 'h', 's', 'v'],
)
editor.setWindowTitle("Gradient editor")
editor.show()
sys.exit(app.exec_())
##########################################################################
# Test application.
##########################################################################
if __name__ == "__main__":
main()
| {
"repo_name": "dmsurti/mayavi",
"path": "tvtk/util/qt_gradient_editor.py",
"copies": "1",
"size": "19827",
"license": "bsd-3-clause",
"hash": 1920206198132523000,
"line_mean": 39.3808553971,
"line_max": 98,
"alpha_frac": 0.5411307813,
"autogenerated": false,
"ratio": 4.416796613945199,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0026978835460569448,
"num_lines": 491
} |
"""A QT widget to create the buttons for the i3Void QT window.
"""
from PyQt5.QtWidgets import QApplication, QColorDialog, QWidget, QPushButton, QHBoxLayout, QVBoxLayout
from PyQt5.QtGui import QColor
class ButtonWidget(QWidget):
"""A QT Widget that:
1) Instantiates and Styles the "Close" and "Color" buttons
2) Creates a color picker when the Color button is clicked
"""
def __init__(self, parent=None):
"""Initialize the buttons, their click event handlers, and their position.
"""
QWidget.__init__(self, parent)
self._parent = parent
self._current_color_hex = "#2e2e2e"
self.color_button = QPushButton('Color', self)
self.color_button.clicked.connect(self.showColorPicker)
self.color_button.resize(self.color_button.sizeHint())
self.close_button = QPushButton(' X ', self)
self.close_button.clicked.connect(self._parent.closeWindow)
self.close_button.resize(self.close_button.sizeHint())
hbox = QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(self.color_button)
hbox.addWidget(self.close_button)
vbox = QVBoxLayout()
vbox.addLayout(hbox)
vbox.addStretch(1)
self.setLayout(vbox)
self.repaint()
def showColorPicker(self):
"""Show the color picker when the Color button is clicked.
"""
color = QColor(0, 0, 0)
color.setNamedColor(self._current_color_hex)
selectedColor = QColorDialog.getColor(color)
if selectedColor.isValid():
self._current_color_hex = selectedColor.name()
self._parent.setWindowColor(self._current_color_hex)
| {
"repo_name": "destinmoulton/i3void",
"path": "gui/buttonwidget.py",
"copies": "1",
"size": "1710",
"license": "mit",
"hash": 4273235093330475000,
"line_mean": 30.6666666667,
"line_max": 102,
"alpha_frac": 0.6444444444,
"autogenerated": false,
"ratio": 3.8952164009111616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5039660845311161,
"avg_score": null,
"num_lines": null
} |
"""A QT widget to create the buttons for the i3Void QT window.
"""
from PyQt5.QtWidgets import QColorDialog, QWidget, QPushButton, QVBoxLayout, QHBoxLayout
from PyQt5.QtGui import QColor
class ButtonWidget(QWidget):
def __init__(self, parent=None):
"""Initialize the buttons, their click event handlers, and their position.
"""
QWidget.__init__(self, parent)
self._parent = parent
self._current_color_hex = "#2e2e2e"
self.color_button = QPushButton('Color', self)
self.color_button.clicked.connect(self.showColorPicker)
self.color_button.resize(self.color_button.sizeHint())
self.close_button = QPushButton(' X ', self)
self.close_button.clicked.connect(self._parent.closeWindow)
self.close_button.resize(self.close_button.sizeHint())
hbox = QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(self.color_button)
hbox.addWidget(self.close_button)
vbox = QVBoxLayout()
vbox.addLayout(hbox)
vbox.addStretch(1)
self.setLayout(vbox)
self.repaint()
def showColorPicker(self):
"""Show the color picker when the Color button is clicked.
"""
color = QColor(0, 0, 0)
color.setNamedColor(self._current_color_hex)
selected_color = QColorDialog.getColor(color)
if selected_color.isValid():
self._current_color_hex = selected_color.name()
self._parent.setWindowColor(self._current_color_hex)
| {
"repo_name": "destinmoulton/i3void",
"path": "gui/fadingwidget.py",
"copies": "1",
"size": "1529",
"license": "mit",
"hash": -7635817469475519000,
"line_mean": 30.2040816327,
"line_max": 88,
"alpha_frac": 0.6442119032,
"autogenerated": false,
"ratio": 3.861111111111111,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5005323014311112,
"avg_score": null,
"num_lines": null
} |
"""aquam URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
# Core Django imports
from django.conf.urls import include, url
from django.contrib import admin
# Apps imports
import views as aquam_views
import apps.solutions.views as solutions_views
import apps.geoanalytics.views as geoanalytics_views
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r"^$", aquam_views.index, name="index"),
url(r"^index/$", aquam_views.index, name="index"),
url(r"^solutions/$", aquam_views.solutions, name="solutions"),
url(r"^geoanalytics/$", aquam_views.geoanalytics, name="geoanalytics"),
url(r"^contact/$", aquam_views.contact, name="contact"),
# Water Use Analyzer
url(r"^solutions/water-use-analyzer/$", solutions_views.water_use_analyzer, name="water-use-analyzer"),
url(r"^solutions/water-use-analyzer/get-water-use/$", solutions_views.get_water_use, name="get-water-use"),
url(r"^solutions/water-use-analyzer/get-horizontal-length/$", solutions_views.get_horizontal_length,
name="get-horizontal-length"),
url(r"^solutions/water-use-analyzer/get-water-use-per-horizontal-foot/$",
solutions_views.get_water_use_per_horizontal_foot, name="get-water-use-per-horizontal-foot"),
url(r"^solutions/water-use-analyzer/get-annual-water-use/$", solutions_views.get_annual_water_use,
name="get-annual-water"),
url(r"^solutions/water-use-analyzer/get-annual-horizontal-feet-dilled/$",
solutions_views.get_annual_horizontal_feet_drilled, name="get-annual-horizontal-feet-drilled"),
url(r"^solutions/water-use-analyzer/get-annual-bbls-ft-metric/$", solutions_views.get_annual_bbls_ft_metric,
name="get-annual-bbls-ft-metric"),
url(r"^solutions/water-use-analyzer/get_linear_fitting", solutions_views.get_linear_fitting,
name="get-linear-fitting"),
url(r"^solutions/water-use-analyzer/get_quadratic_fitting", solutions_views.get_quadratic_fitting,
name="get-quadratic-fitting"),
url(r"^solutions/water-use-analyzer/get-cubic-fitting", solutions_views.get_cubic_fitting,
name="get-cubic-fitting"),
# Produced Water Modeler
url(r"^solutions/produced-water-modeler/$", solutions_views.produced_water_modeler, name="produced-water-modeler"),
url(r"^solutions/produced-water-modeler/get-arp-model/$", solutions_views.get_arp_model, name="get-arp-model"),
url(r"^solutions/produced-water-modeler/get-arp-prediction/.*$", solutions_views.get_arp_prediction,
name="get-arp-prediction"),
# Water Quality Analyzer
url(r"^solutions/water-quality-analyzer/$", solutions_views.water_quality_analyzer, name="water-quality-analyzer"),
url(r"^solutions/water-quality-analyzer/get-water-quality-result/.*$", solutions_views.get_water_quality_result,
name="get-water-quality-result"),
url(r"^solutions/water-quality-analyzer/get-water-quality-settings/$",
solutions_views.get_water_quality_settings, name="get-water-quality-settings"),
# Water Treatment Analyzer
url(r"^solutions/water-treatment-analyzer/$", solutions_views.water_treatment_analyzer,
name="water-treatment-analyzer"),
url(r"^solutions/water-treatment-analyzer/get-water-treatment-iteration-result/.*$",
solutions_views.get_treatment_iteration_result, name="get-water-treatment-iteration-result"),
url(r"^solutions/water-treatment-analyzer/get-water-treatment-general-settings/$",
solutions_views.get_water_treatment_general_settings, name="get-water-treatment-general-settings"),
url(r"^solutions/water-treatment-analyzer/get-water-treatment-location-settings/$",
solutions_views.get_water_treatment_location_settings, name="get-water-treatment-location-settings"),
# Water Use Geoanalyzer
url(r"^geoanalytics/water-use-geoanalyzer/$", geoanalytics_views.water_use_geoanalyzer,
name="water-use-geoanalyzer"),
url(r"^geoanalytics/water-use-geoanalyzer/get-geo-water-use/$", geoanalytics_views.get_geo_water_use,
name="get-geo-water-use"),
# Transportation & Cost Optimizer
url(r"^geoanalytics/transportation-cost-optimizer/$", geoanalytics_views.transportation_cost_optimizer,
name="transportation-cost-optimizer")
]
| {
"repo_name": "tcqiuyu/aquam",
"path": "aquam/aquam/urls.py",
"copies": "1",
"size": "4806",
"license": "mit",
"hash": -6145140951771080000,
"line_mean": 55.5411764706,
"line_max": 119,
"alpha_frac": 0.7207657095,
"autogenerated": false,
"ratio": 3.3053645116918844,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9499445769973497,
"avg_score": 0.005336890243677437,
"num_lines": 85
} |
"""A quantum tic tac toe running in command line"""
from qiskit import Aer
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import CompositeGate
from qiskit import execute
import numpy as np
from composite_gates import cry,cnx,any_x,bus_or,x_bus
class Move():
def __init__(self,indices,player,q1=None,q2=None):
"""A data structure for game moves"""
self.indices = indices
self.player=player
self.q1=q1
self.q2=q2
def __str__(self):
return str([self.indices,self.player,self.q1,self.q2])
class Board():
def __init__(self,x,y,print_info=False):
#quantum register, classical register, quantum circuit.
self.print_info=print_info
self.q = QuantumRegister(1)
self.c = ClassicalRegister(1)
self.qc = QuantumCircuit(self.q, self.c)
self.qc.cry = cry
self.qc.x_bus = x_bus
self.qc.cnx = cnx
self.qc.any_x = any_x
self.qc.bus_or = bus_or
#the dimensions of the bord
self.x=x
self.y=y
#To keep track of what is in each cell, no entanglement etc.
#Provides a graphic of the game state.
self.cells = np.empty((x,y),dtype=object)
self.cells[:]='' #Initially game is empty.
self.game_full = False
self.moves = []
def __str__(self):
return str(self.cells)
def add_move(self,indices,player):
"""Adds a move if it is non-clashing, otherwise passes it on"""
for index in indices:
if index[0] >= self.x:
return 'Index out of range'
if index[1] >= self.y:
return 'Index out of range'
status = self._add_move(indices,player)
if status=='ok':
if player==0:
char = 'X'
elif player==1:
char = 'O'
char+=str(len(self.moves))
for index in indices:
s = self.cells[index[0],index[1]]
if s: #If the cell has some text
#Add char with a comma
self.cells[index[0],index[1]]+=' '+char
else: #cell is empty so just add char
self.cells[index[0],index[1]]+=char
print(self.cells)
return status
def _add_move(self,indices,player):
"""Actually adds the move if not clashing,
otherwise passes it to _add_clashing_move"""
if len(indices)==2:
if indices[0]==indices[1]:
indices = [indices[0]]
num=len(indices)
caught_clashes = False #turns true if all moves are safe clashes
for existing_move in self.moves:
for index in indices:
if index in existing_move.indices:
if len(existing_move.indices)==1:
return 'overfull'
#This move will ALWAYS be there, if it can.
#hence, overfull.
else:
#captures any clash
caught_clashes = True
if caught_clashes:
return self._add_clashing_move(indices,player)
else:
#Reach this section if there are no clashes at all
if num==1:
self.moves.append(Move(indices,player)) #No control needed
return 'ok'
else:
self.q.size+=2 #indicator qubit, and move qubit
q1 = self.q[self.q.size-2] #To make this readable...
q2 = self.q[self.q.size-1]
self.qc.h(q1) #the last qubit in register.
self.qc.x(q2)
self.qc.cx(q1,q2)
self.moves.append(Move(indices,player,q1,q2))
return 'ok'
def _add_clashing_move(self,indices,player):
"""Adds a clashing move"""
if len(indices)==1: #100% of qubit is on one clashing spot.
#This spot COULD be occupied.
self.q.size+=1 #Only one bit needed, move happens or not.
index = indices[0]
bus = []
for existing_move in self.moves:
if index in existing_move.indices:
if index==existing_move.indices[0]:
bus.append(existing_move.q1)
elif index==existing_move.indices[1]:
bus.append(existing_move.q2)
#Now if any entry on the bus is true, our qubit is false.
self.qc.x(self.q[self.q.size-1]) # make it 1
self.qc.any_x(self.qc,*bus,self.q[self.q.size-1])
#negate is any dependents are true.
#So the new move can happen if none of the others happen.
self.moves.append(Move(indices,player,self.q[self.q.size-1]))
return 'ok'
elif len(indices)==2:
#Check first spot is not occupied, then second spot if first
#is not occupied.
self.q.size+=2 #Two bits needed (maybe) for each index.
#This can be optimized, in effect only one qubit is needed,
#and its result indicates the selected qubit.
#However, then some control qubit is needed too.
#Since there are moves that could possibly be erased completely!
bus0 = []
bus1 = []
for existing_move in self.moves:
if indices[0] in existing_move.indices:
if indices[0]==existing_move.indices[0]:
bus0.append(existing_move.q1)
elif indices[0]==existing_move.indices[1]:
bus0.append(existing_move.q2)
if indices[1] in existing_move.indices:
if indices[1]==existing_move.indices[0]:
bus1.append(existing_move.q1)
elif indices[1]==existing_move.indices[1]:
bus1.append(existing_move.q2)
#Now if any entry on the bus is true, our first qubit is false.
q1 = self.q[self.q.size-2] #a bit easier to look at (:
q2 = self.q[self.q.size-1]
if bus0:
self.qc.x(q1)
self.qc.cnx(self.qc,*bus0,q1)
else: self.qc.h(q1)
#And now the second qubit is 1 only if none of its competitors
#are 1, and likewise if the previous qubit is zero.
self.qc.x(q2)
self.qc.bus_or(self.qc,q2,bus1,[q1])
self.moves.append(Move(indices,player,q1,q2))
return 'ok'
def run(self):
"""Game loop"""
self.running=True
if self.print_info:
print("Welcome to Quantum tic tac toe!")
print("At each turn choose if to make one or two moves.")
print("Playing one move at a time is a classic tic tac toe game.")
print("At each turn the game state is printed.")
print("This constitutes a 3x3 grid (standard game!).")
print("You will see empty cells if no move was made on that part of the board.")
print("Moves made by X are marked with Xi, 'i' some number.")
print("e.g. X3 is the third move, played by X. When a move is made in a super position,")
print("You will see its label, say X3, appear in several places.")
print("This means your move is in a superposition of two classical moves!")
print("A superposition of classical moves does not guarantee that a spot is occupied,")
print("so other players can attempt to occupy it too.")
print("Then the new move will be anti-correlated with the move already in that spot.")
print("And so the game branches out into many simultaneous states.")
print("The outcome is then computed by simulation...")
print("so don't make too many quantum moves or it will take long to compute!")
print("Enter 'q' at any time to quit")
print("Enter 'end' to end the game, and compute the winner(s).")
print("Good luck!")
while self.running:
self.ask_player(0)
self.ask_player(1)
if self.game_full:
self.compute_winner()
def ask_player(self,player):
"""Ask a player for move details"""
asking=False
if self.running:
asking = True
while asking:
if player==0:
player_name = 'X'
elif player==1:
player_name = 'O'
print("PLAYER "+player_name+" :")
cells = self.question('Play in 1 or 2 cells?')
if cells=='1':
x = int(self.question('x index:'))
y = int(self.question('y index:'))
status = self.add_move([[y,x]],player)
if status == 'ok':
asking = False
else: print(status)
elif cells=='2':
x1 = int(self.question('x1 index:'))
y1 = int(self.question('y1 index:'))
x2 = int(self.question('x2 index:'))
y2 = int(self.question('y2 index:'))
status = self.add_move([[y1,x1],[y2,x2]],player)
if status == 'ok':
asking = False
else: print(status)
if not self.running:
asking=False
def question(self,text):
"""ask user a question"""
if self.running:
answer = input(text)
if answer=='q':
self.running=False
return None
elif answer=='end':
self.game_full = True
self.running = False
else:
return answer
else: return None
def compute_winner(self):
"""Find overall game winner, by finding winners of each outcome"""
self.c.size = self.q.size #Make them the same
self.qc.measure(self.q, self.c) #Measure
backend = Aer.get_backend('qasm_simulator')
job_sim = execute(self.qc, backend=backend, shots=100)
sim_result = job_sim.result()
print("simulation: ", sim_result)
print(sim_result.get_counts(self.qc))
self.counts = sim_result.get_counts(self.qc)
for count in self.counts: #Takes key names
c = list(count)[:-1] #splits key '1011' => ['1','0','1','1']
c = c[::-1] #invert it so it goes 0 up...
#Ignore the last bit since I dont know how to get rid of it
#It is zero always.
#The reason it is included is that I create a quantum register and
#then start adding operations, quantum registers need at least one bit.
counter = 0
weight = self.counts[count]
empty = np.zeros((self.x,self.y),dtype=str)
for m in self.moves:
if m.player == 0:
char = 'x'
elif m.player==1:
char = 'o'
result = []
if m.q1:
result.append(c[counter])
counter+=1
if m.q2:
result.append(c[counter])
counter+=1
#print(result)
if len(result) == len(m.indices):
#print(m)
if result[0]=='1':
empty[m.indices[0][0],m.indices[0][1]] = char
if len(result)>1:
if result[1]=='1':
if result[0]=='1':
print('problem! a move appeard in two places.')
print(m)
empty[m.indices[1][0],m.indices[1][1]] = char
elif not result: #Then it was a classcal move
empty[m.indices[0][0],m.indices[0][1]] = char
xscore,oscore=self.winners(empty)
print('X wins: '+str(xscore))
print('O wins: '+str(oscore))
print('Shots: '+str(weight))
print(empty)
def winners(self,empty):
"""Compute winners of a board"""
oscore = 0
xscore = 0
for x in range(self.x):
if empty[x,1]==empty[x,0] and empty[x,2]==empty[x,1]:
if empty[x,0]=='o':
oscore+=1
elif empty[x,0]=='x':
xscore +=1
for y in range(self.y):
if empty[1,y]==empty[0,y] and empty[2,y]==empty[0,y]:
if empty[0,y]=='o':
oscore+=1
elif empty[0,y]=='x':
xscore +=1
if empty[0,0]==empty[1,1] and empty[1,1]==empty[2,2]:
if empty[0,0]=='o':
oscore+=1
elif empty[0,0]=='x':
xscore += 1
if empty[2,0]==empty[1,1] and empty[1,1]==empty[0,2]:
if empty[2,0]=='o':
oscore+=1
elif empty[2,0]=='x':
xscore += 1
return [xscore,oscore]
def _populate_board(self):
"""Automatically populate as below, for testing purposes"""
self.add_move([[2,2],[0,0]],1)
self.add_move([[1,1],[1,2]],0)
self.add_move([[1,2],[2,1]],1)
self.add_move([[2,1]],0)
self.add_move([[0,1]],1)
self.add_move([[1,0]],0)
self.add_move([[2,0]],1)
self.add_move([[2,2]],0)
self.add_move([[0,0]],1)
self.add_move([[0,2]],0)
self.add_move([[1,1]],1)
self.add_move([[1,2]],0)
if __name__=="__main__":
B= Board(3,3)
B.run()
#B._populate_board()
#a = B.compute_winner()
| {
"repo_name": "antoniomezzacapo/qiskit-tutorial",
"path": "community/games/game_engines/q_tic_tac_toe.py",
"copies": "1",
"size": "13845",
"license": "apache-2.0",
"hash": 3690901531888223000,
"line_mean": 40.8277945619,
"line_max": 101,
"alpha_frac": 0.5029252438,
"autogenerated": false,
"ratio": 3.86731843575419,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9756393382529724,
"avg_score": 0.022770059404893156,
"num_lines": 331
} |
"""AQue is an asynchronous work queue, and a set of CLI and Python tools to use
and manage it.
See: `aque <command> --help` for more on individual commands.
"""
import argparse
import cProfile
import os
import pkg_resources
from aque.brokers import get_broker
from aque.queue import Queue
class AliasedSubParsersAction(argparse._SubParsersAction):
def add_parser(self, name, **kwargs):
aliases = kwargs.pop('aliases', [])
parser = super(AliasedSubParsersAction, self).add_parser(name, **kwargs)
for alias in aliases:
pass # self._name_parser_map[alias] = parser
return parser
def argument(*args, **kwargs):
return args, kwargs
def group(title, *args):
return title, args
def command(*args, **kwargs):
def _decorator(func):
func.__aque_command__ = (args, kwargs)
return func
return _decorator
def main(argv=None):
parser = argparse.ArgumentParser(
prog='aque',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__,
)
parser.register('action', 'parsers', AliasedSubParsersAction)
subparsers = parser.add_subparsers(metavar='COMMAND')
parser.add_argument('--broker',
dest='broker_url',
default=os.environ.get('AQUE_BROKER'),
help='URL of broker to use (default: $AQUE_BROKER)',
)
parser.add_argument('--profile')
funcs = [ep.load() for ep in pkg_resources.iter_entry_points('aque_commands')]
funcs.sort(key=lambda f: f.__aque_command__[1].get('name', f.__name__))
for func in funcs:
args, kwargs = func.__aque_command__
name = kwargs.pop('name', func.__name__)
kwargs.setdefault('aliases', [])
kwargs.setdefault('formatter_class', argparse.RawDescriptionHelpFormatter)
subparser = subparsers.add_parser(name, **kwargs)
subparser.set_defaults(func=func)
for arg_args, arg_kwargs in args:
if isinstance(arg_args, basestring):
group = subparser.add_argument_group(arg_args)
for arg_args, arg_kwargs in arg_kwargs:
group.add_argument(*arg_args, **arg_kwargs)
else:
subparser.add_argument(*arg_args, **arg_kwargs)
args = parser.parse_args(argv)
if not args.func:
parser.print_usage()
exit(1)
args.broker = get_broker(args.broker_url)
args.queue = Queue(args.broker)
try:
if args.profile:
res = cProfile.runctx('args.func(args) or 0', globals(), locals(), args.profile)
else:
res = args.func(args) or 0
finally:
args.broker.close()
if __name__ == '__main__':
exit(res)
else:
return res
| {
"repo_name": "mikeboers/aque",
"path": "aque/commands/main.py",
"copies": "1",
"size": "2756",
"license": "bsd-3-clause",
"hash": -6802286515577280000,
"line_mean": 27.1224489796,
"line_max": 92,
"alpha_frac": 0.6175616836,
"autogenerated": false,
"ratio": 3.7909215955983493,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4908483279198349,
"avg_score": null,
"num_lines": null
} |
"""aque kill - Kill (or signal) tasks.
Marks a task's status as "killed" (preventing it from running), and sends
the given signal to any process that is currently running the task.
E.g.:
$ aque kill -sINT 1234
$ aque kill -s9 5678
"""
import os
import signal
import sys
from aque.commands.main import command, argument
from aque.queue import Queue
@command(
argument('-s', '--signal', default='KILL', help='a signal accepted by kill(1) (default: 9 or KILL)'),
argument('tid', nargs='+', type=int, help='the task(s) to kill/signal'),
help='kill (or signal) tasks',
description=__doc__,
)
def kill(args):
# From the worker's perspective, and signal means that the task is dead.
args.broker.set_status_and_notify(args.tid, 'killed', None)
try:
sig = getattr(signal, args.signal, None)
if sig is None:
sig = getattr(signal, 'SIG' + args.signal, None)
if sig is None:
int(args.signal)
except ValueError:
print>>sys.stderr, 'unknown signal', args.signal
return 1
# Actually send across
args.broker.trigger(['signal_task.%d' % tid for tid in args.tid], args.tid, sig)
| {
"repo_name": "mikeboers/aque",
"path": "aque/commands/kill.py",
"copies": "1",
"size": "1186",
"license": "bsd-3-clause",
"hash": -3547986540953411600,
"line_mean": 25.9545454545,
"line_max": 105,
"alpha_frac": 0.6441821248,
"autogenerated": false,
"ratio": 3.488235294117647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9625950037797103,
"avg_score": 0.0012934762241088545,
"num_lines": 44
} |
"""aque output -- Fetch (or watch) stdout/stderr of a task.
Fetches output of a task that has already run, and optionally waits for all
output yet to be generated. Watching terminates when the tasks complete.
"""
import csv
import os
import sys
from Queue import Queue
from aque.commands.main import command, argument
@command(
argument('-w', '--watch', action='store_true', help='watch for more output until the task(s) terminate'),
argument('-f', '--format', default='{data}'),
argument('tids', nargs='+', type=int, metavar='TID', help='ID(s) of the tasks to get output of'),
help='fetch (or watch) stdout/stderr of a task',
description=__doc__,
)
def output(args):
if args.watch:
watching = set(args.tids)
queue = Queue()
@args.broker.bind(['output_log.%d' % x for x in args.tids])
def on_log(tid, fd, offset, data):
queue.put((tid, fd, offset, data))
@args.broker.bind(['task_status.%s' % x for x in args.tids])
def on_status(tids, status):
if status in ('success', 'error', 'killed'):
for tid in tids:
queue.put((tid, None, None, None))
found = args.broker.fetch(args.tids)
watching.intersection_update(found)
for task in found.itervalues():
if task['status'] != 'pending':
queue.put((task['id'], None, None, None))
queue.put((None, None, None, None))
max_offsets = dict((tid, {1: -1, 2: -1}) for tid in args.tids)
for tid, ctime, fd, offset, data in args.broker.get_output(args.tids):
stream = {1: sys.stdout, 2: sys.stderr}.get(fd)
if stream:
max_offsets[tid][fd] = max(max_offsets[tid][fd], offset)
stream.write(args.format.format(**locals()))
stream.flush()
if args.watch:
while watching:
tid, fd, offset, data = queue.get()
if fd is None:
try:
watching.remove(tid)
except KeyError:
pass
continue
stream = {1: sys.stdout, 2: sys.stderr}.get(fd)
if stream:
if offset <= max_offsets[tid][fd]:
continue
else:
max_offsets[tid][fd] = offset
stream.write(args.format.format(**locals()))
stream.flush()
| {
"repo_name": "mikeboers/aque",
"path": "aque/commands/output.py",
"copies": "1",
"size": "2431",
"license": "bsd-3-clause",
"hash": -5411198487174532000,
"line_mean": 29.3875,
"line_max": 109,
"alpha_frac": 0.5491567256,
"autogenerated": false,
"ratio": 3.810344827586207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4859501553186207,
"avg_score": null,
"num_lines": null
} |
"""aque rm - Remove tasks from the queue.
Removes given tasks, or tasks matching given statuses. By default only removes
tasks submitted by the current user, but may operate on all tasks via `-x`.
"""
import os
import sys
from aque.commands.main import command, argument
from aque.queue import Queue
@command(
argument('-e', '--error', action='store_true', help='remove all tasks which errored'),
argument('-s', '--success', action='store_true', help='remove all tasks which succeeded'),
argument('-k', '--killed', action='store_true', help='remove all tasks which were killed'),
argument('-p', '--pending', action='store_true', help='remove all tasks which are pending'),
argument('-c', '--complete', action='store_true', help='remove all tasks which completed (same as `-esk`)'),
argument('-a', '--all', action='store_true', help='remove all tasks (same as `-eskp`)'),
argument('-x', '--all-users', action='store_true', help='affect tasks of other users as well'),
argument('-v', '--verbose', action='store_true'),
argument('tids', nargs='*', metavar='TID', help='specific task(s) to remove'),
help='remove tasks from the queue',
description=__doc__,
)
def rm(args):
statuses = set()
if args.pending:
statuses.add('pending')
if args.error:
statuses.add('error')
if args.success:
statuses.add('success')
if args.killed:
statuses.add('killed')
if args.complete:
statuses.add('error')
statuses.add('success')
statuses.add('killed')
if args.all:
statuses = (None, )
if not statuses and not args.tids:
exit(1)
base_filter = {}
if not args.all_users:
base_filter['user'] = os.getlogin()
to_delete = [int(x) for x in args.tids]
for status in statuses:
filter_ = base_filter.copy()
if status:
filter_['status'] = status
for task in args.broker.search(filter_, ['id']):
if args.verbose:
print task['id']
to_delete.append(task['id'])
args.broker.delete(to_delete)
if not args.verbose:
print 'removed', len(to_delete), 'tasks'
| {
"repo_name": "mikeboers/aque",
"path": "aque/commands/rm.py",
"copies": "1",
"size": "2209",
"license": "bsd-3-clause",
"hash": 6656868315746332000,
"line_mean": 30.5571428571,
"line_max": 112,
"alpha_frac": 0.6106835672,
"autogenerated": false,
"ratio": 3.7890222984562607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9879483468065577,
"avg_score": 0.004044479518136635,
"num_lines": 70
} |
"""A Queryable interface into the Python AST.
Given an ast type and keyword arguments, the QueryConstructor attempts to
construct a query string which can be checked (read: eval'd (hopefully
safely?)) against a given AST.
## Examples
**Get function definitions from `ast` module**
>>> import ast
>>> import inspect
>>> source = ast.parse(inspect.getsource(ast))
>>> q = QueryConstructor(ast.FunctionDef)
>>> q.visit(source)
>>> len(q.results)
16
**Get function definition from `ast` module at line 32**
>>> q = QueryConstructor(ast.FunctionDef, lineno=32)
>>> q.visit(source)
>>> len(q.results)
1
**Look for imports, find the name of the first import**
>>> q = QueryConstructor(ast.Import)
>>> q.visit(source)
>>> q.results[0].names[0].name
'inspect'
**Get the value of the `__author__` variable of module**
>>> q = QueryConstructor(ast.Name, id='__author__')
>>> q.visit(ast.parse(inspect.getsource(inspect)))
>>> len(q.results)
1
>>> q.results[0].parent.value.s
'Ka-Ping Yee <ping@lfw.org>'
"""
import ast
from docs.visitors.query.query import QueryVisitor
class QueryConstructor(object):
"""Wraps QueryVisitor and constructs custom query from kwargs and ast type
"""
def __init__(self, ast_type, *args, **kw):
super(QueryConstructor, self).__init__()
self._ast_type = ast_type
items = []
for attr, value in kw.iteritems():
if isinstance(value, basestring):
items.append('node.%s == "%s"' % (attr, value))
else:
items.append('node.%s == %s' % (attr, value))
self._query = ' and '.join(items)
type_check = 'isinstance(node, ast.%s)' % (self._ast_type.__name__, )
if self._query:
self._visitor = QueryVisitor(
' and '.join((type_check, self._query))
)
else:
self._visitor = QueryVisitor(type_check)
def visit(self, *args, **kw):
"""Visits an ast.AST instance and its children
Uses a `QueryVisitor` object.
"""
return self._visitor.visit(*args, **kw)
@property
def results(self):
"""Results of the query"""
return self._visitor.results
| {
"repo_name": "codebrowse/docs",
"path": "docs/visitors/query/query_constructor.py",
"copies": "1",
"size": "2170",
"license": "mit",
"hash": 1507506055136301000,
"line_mean": 22.8461538462,
"line_max": 76,
"alpha_frac": 0.6216589862,
"autogenerated": false,
"ratio": 3.580858085808581,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47025170720085807,
"avg_score": null,
"num_lines": null
} |
"""A query to run against a DAO (abstracted from the persistent level)."""
from protorpc import messages
from sqlalchemy import func, not_
class Operator(messages.Enum):
EQUALS = 0 # Case insensitive comparison for strings, exact comparison otherwise
LESS_THAN = 1
GREATER_THAN = 2
LESS_THAN_OR_EQUALS = 3
GREATER_THAN_OR_EQUALS = 4
NOT_EQUALS = 5
# Note: we don't support contains or exact string comparison at this stage
class PropertyType(messages.Enum):
STRING = 0
DATE = 1
DATETIME = 2
ENUM = 3
INTEGER = 4
CODE = 5
class FieldJsonContainsFilter(object):
"""
Filter json field using JSON_CONTAINS
"""
def __init__(self, field_name, operator, value):
self.field_name = field_name
self.operator = operator
self.value = value
def add_to_sqlalchemy_query(self, query, field):
if self.value is None:
return query.filter(field.is_(None))
if self.operator == Operator.NOT_EQUALS:
return query.filter(func.json_contains(field, self.value, "$") == 0)
else:
return query.filter(func.json_contains(field, self.value, "$") == 1)
class FieldLikeFilter(object):
"""
Handle SQL Like filters
"""
def __init__(self, field_name, operator, value):
self.field_name = field_name
self.operator = operator
self.value = value
def add_to_sqlalchemy_query(self, query, field):
if self.value is None:
return query.filter(field.is_(None))
if self.operator == Operator.NOT_EQUALS:
return query.filter(not_(field.like("%{0}%".format(self.value))))
else:
return query.filter(field.like("%{0}%".format(self.value)))
class FieldFilter(object):
def __init__(self, field_name, operator, value):
self.field_name = field_name
self.operator = operator
self.value = value
def add_to_sqlalchemy_query(self, query, field):
if self.value is None:
return query.filter(field.is_(None))
query = {
Operator.EQUALS: query.filter(field == self.value),
Operator.LESS_THAN: query.filter(field < self.value),
Operator.GREATER_THAN: query.filter(field > self.value),
Operator.LESS_THAN_OR_EQUALS: query.filter(field <= self.value),
Operator.GREATER_THAN_OR_EQUALS: query.filter(field >= self.value),
Operator.NOT_EQUALS: query.filter(field != self.value),
}.get(self.operator)
if not query:
raise ValueError("Invalid operator: %r." % self.operator)
return query
class OrderBy(object):
def __init__(self, field_name, ascending):
self.field_name = field_name
self.ascending = ascending
class Query(object):
def __init__(
self,
field_filters,
order_by,
max_results,
pagination_token,
a_id=None,
always_return_token=False,
include_total=False,
offset=False,
options=None,
invalid_filters=None
):
self.field_filters = field_filters
self.order_by = order_by
self.offset = offset
self.max_results = max_results
self.pagination_token = pagination_token
self.ancestor_id = a_id
self.always_return_token = always_return_token
self.include_total = include_total
self.options = options
self.invalid_filters = invalid_filters
class Results(object):
def __init__(self, items, pagination_token=None, more_available=False, total=None):
self.items = items
self.pagination_token = pagination_token
self.more_available = more_available
self.total = total
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/query.py",
"copies": "1",
"size": "3763",
"license": "bsd-3-clause",
"hash": 508207372228244740,
"line_mean": 29.8442622951,
"line_max": 87,
"alpha_frac": 0.6157321286,
"autogenerated": false,
"ratio": 3.907580477673936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5023312606273935,
"avg_score": null,
"num_lines": null
} |
"""aque status - List tasks in the queue and their status.
Lists all tasks in the queue (limited to the current user by default) and
their status, arguments, or any other fields.
All of the fields of the standard task prototype are availible to `--filter`,
`--csv`, and `--pattern`, in addition to the following computed values:
args_string: arguments and kwargs as they would be passed to a function
func_name: an entrypoints-style name of the function
func_signature: a representation of the called function and arguments
name_or_func: the task's name, if set, or the the name of the function
num_dependencies: the number of dependencies we have resolved
running_time: a `datetime.timedelta` or None of the running time
"""
import csv
import os
import sys
from aque.commands.main import command, argument
def walk_tasks(tasks, max_depth=0, depth_first=False, _depth=1):
"""Return an iterator of (depth, task, dependencies)."""
for task in tasks:
dependencies = filter(lambda t: isinstance(t, dict), task.get('dependencies', ()))
if not depth_first:
yield _depth, task, dependencies
if not max_depth or _depth < max_depth:
for x in walk_tasks(dependencies, max_depth, depth_first, _depth+1):
yield x
if depth_first:
yield _depth, task, dependencies
@command(
argument('-d', '--depth', type=int, default=1, help='maximum depth of dependencies to; 0 for unlimited'),
argument('--flat', action='store_true', help='don\'t group tasks by their dependencies'),
argument('-x', '--all-users', action='store_true', help='display tasks of all users'),
argument('-f', '--filter', help='''Python expression determining if a given task should be
displayed, e.g. `status in ('success', 'error')`'''),
argument('-c', '--csv', help='comma-separated list of fields to output as a CSV'),
argument('-p', '--pattern',
default='{id:6d} {user:s} {status:7s} {pattern:7s} {name_or_func}',
help='`str.format()` pattern for formatting each task'),
argument('tids', nargs='*', type=int, metavar='TID', help='specific tasks to display'),
help='list tasks in the queue and their status',
description=__doc__,
)
def status(args):
if args.tids:
tasks_by_id = args.broker.fetch(args.tids)
else:
filter_ = {}
if not args.all_users:
filter_['user'] = os.getlogin()
tasks_by_id = dict((t['id'], t) for t in args.broker.search(filter_))
if args.flat:
tasks = sorted(tasks_by_id.itervalues(), key=lambda t: t['id'])
else:
# Resolve all dependencies, and figure out what remains on the top level.
top_level_by_id = tasks_by_id.copy()
for task in tasks_by_id.itervalues():
dep_ids = task.get('dependencies', [])
for tid in dep_ids:
top_level_by_id.pop(tid, None)
task['dependencies'] = [tasks_by_id.get(tid, tid) for tid in sorted(dep_ids)]
tasks = sorted(top_level_by_id.itervalues(), key=lambda t: t['id'])
if args.csv:
fields = [f.strip() for f in args.csv.split(',')]
writer = csv.writer(sys.stdout)
writer.writerow(fields)
if args.filter:
filter_ = compile(args.filter, '<--filter>', 'eval')
else:
filter_ = None
for depth, task, deps in walk_tasks(tasks, max_depth=args.depth):
arg_specs = []
for arg in (task.get('args') or ()):
arg_specs.append(repr(arg))
for k, v in sorted((task.get('kwargs') or {}).iteritems()):
arg_specs.append("%s=%r" % (k, v))
func = task.get('func')
try:
func_name = '%s:%s' % (func.__module__, func.__name__)
except AttributeError:
func_name = str(func or '')
func_spec = '%s(%s)' % (func_name, ', '.join(arg_specs))
task['func_name'] = func_name
task['func_signature'] = func_spec
task['args_string'] = ', '.join(arg_specs)
task['pattern'] = task['pattern'] or '-'
task['name'] = task['name'] or ''
task['name_or_func'] = '"%s"' % task['name'] if task['name'] else func_name
task['num_dependencies'] = len(deps)
task['depth'] = depth
if task.get('first_active') is not None and task.get('last_active') is not None:
task['running_time'] = task['last_active'] - task['first_active']
else:
task['running_time'] = None
# This isn't normally good form, but since the implementation of this
# thing allows you to do bad stuff, this isn't an added security risk.
if filter_ and not eval(filter_, {}, task):
continue
if args.csv:
data = [str(task.get(f)) for f in fields]
writer.writerow(data)
else:
print '\t' * (depth-1) + args.pattern.format(**task)
| {
"repo_name": "mikeboers/aque",
"path": "aque/commands/status.py",
"copies": "1",
"size": "4957",
"license": "bsd-3-clause",
"hash": 4387136556685551600,
"line_mean": 38.031496063,
"line_max": 109,
"alpha_frac": 0.5977405689,
"autogenerated": false,
"ratio": 3.7439577039274923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48416982728274927,
"avg_score": null,
"num_lines": null
} |
''' Aquest modul inclou les funcions d'exportacio implementades'''
from json2html import *
import webbrowser
import tempfile
import sys
import ldap3.core.exceptions
import re
from taulaClass import Taula
from time_functions import convertir_temps
def print_results(llista, json):
''' Imprimiex el resultat de la consulta ldap per pantalla
La variable amplada_col marca l'amplada de les columnes. Si es retornen massa atributs, els resultats
es superposen i no es veu correctament'''
amplada_col= 20
taula = Taula(results_2(llista), amplada_col, True, "|", "-")
print(taula)
def export_json(adObj):
return re.sub(r'\d{18}', convertir_temps, adObj.c.response_to_json(), count=0, flags=0)
def export_csv(adObj):
return re.sub(r'\d{18}', convertir_temps, results(adObj.c.response), count=0, flags=0)
#return results(adObj.c.response)
def export_pdf(adObj):
return ("En proces de desenvolupament")
def export_html(adObj):
html = '<html><head></head><h1>Titol de l informe</h1><body>' + json2html.convert(
json = adObj.c.response_to_json()).replace('<th>entries</th>','') + '</body><html>'
# return html
return re.sub(r'\d{18}', convertir_temps, html, count=0, flags=0)
def result_open_html(adObj):
'''Obri el resultats de la darrera consulta a l'aplicacio html'''
try:
fp = tempfile.NamedTemporaryFile(suffix='.html', delete=False)
fp.write(str.encode(export_html(adObj)))
fp.seek(0)
webbrowser.open(fp.name)
except:
print ("Unexpected error:", sys.exc_info()[0])
return
def results(llista):
''' Crear el resultat de la consulta ldap en format csv'''
separador = "\t"
atributs = []
for elem in llista:
if elem.get('attributes'): atributs = atributs + list(elem.get('attributes').keys())
atributs = sorted(set(atributs))
print(atributs)
contingut = "dn\t" + str(atributs)[2:-2].replace("', '","\t") +"\n"
for elem in llista:
if elem.get('dn'):
contingut = contingut + str(elem.get('dn')) + "\t"
for atribut in atributs:
if elem.get('attributes'):
if elem.get('attributes').get(atribut):
contingut = contingut + str(elem.get('attributes').get(atribut)).replace("\', \'", "; ") + "\t"
else : contingut = contingut + "\t"
contingut = contingut[:-1] + "\n"
return contingut
def results_2(llista):
''' Crear el resultat de com un llista de llistes. Els atributs de Cada objecte ldap corresponen a un llista'''
atributs = []
for elem in llista:
if elem.get('attributes'): atributs = atributs + list(elem.get('attributes').keys())
atributs = sorted(set(atributs))
atributs.insert(0,'dn')
# print(atributs)
contingut = []
contingut.append(atributs)
for elem in llista:
linia = []
if elem.get('dn'):
linia.append(str(elem.get('dn')))
for atribut in atributs:
if elem.get('attributes') and atribut != 'dn':
if elem.get('attributes').get(atribut):
if isinstance(elem.get('attributes').get(atribut),list):
linia.append(re.sub(r'\d{18}', convertir_temps,str(elem.get('attributes').get(
atribut)).replace("\', \'", "; ")[2:-2], count=0, flags=0))
else: linia.append(re.sub(r'\d{18}', convertir_temps,str(elem.get('attributes').get(
atribut)), count=0, flags=0))
else : linia.append('')
if linia: contingut.append(linia)
return contingut
| {
"repo_name": "rpiza/adaudita",
"path": "python/exports.py",
"copies": "1",
"size": "3716",
"license": "mit",
"hash": -5564271301371619000,
"line_mean": 34.3904761905,
"line_max": 115,
"alpha_frac": 0.5971474704,
"autogenerated": false,
"ratio": 3.154499151103565,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42516466215035653,
"avg_score": null,
"num_lines": null
} |
# a queue based on a two stack implementation. One stack (first) is used for
# push operations. The other (second) is refilled whenever pop is called on the
# queue and the second stack is empty. This is filled with all the elements
# from the first stack, then the first stack is cleared before returning
# the head of the second stack (pop operation)
# Note that we are filling the second stack reading iteratively from the first
# one and in doing so we reverse the sequence, thus the end of the queue is
# always at the top of the second stack.
class Queue2Stacks(object):
def __init__(self):
self.first = []
self.second = []
def _refill(self):
if len(self.first)>0:
for e in self.first:
self.second.append(e)
self.first = []
def peek(self):
if len(self.second)==0:
self._refill()
if len(self.second)>0:
return self.second[0]
else:
return None
def pop(self):
if len(self.second)>0:
e = self.second[0]
self.second = self.second[1:]
return e
else:
if len(self.first)>0:
self._refill()
return self.pop()
else:
return None
def put(self, value):
self.first.append(value)
| {
"repo_name": "LupoDiRoma/miscellanea",
"path": "python/queue2stacks.py",
"copies": "1",
"size": "1351",
"license": "apache-2.0",
"hash": -6113826231373486000,
"line_mean": 31.1666666667,
"line_max": 79,
"alpha_frac": 0.5817912657,
"autogenerated": false,
"ratio": 4.081570996978852,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003774076835035089,
"num_lines": 42
} |
""" A Queue using a linked list like structure """
from typing import Any
class Node:
def __init__(self, data: Any) -> None:
self.data = data
self.next = None
def __str__(self) -> str:
return f"{self.data}"
class LinkedQueue:
"""
>>> queue = LinkedQueue()
>>> queue.is_empty()
True
>>> queue.put(5)
>>> queue.put(9)
>>> queue.put('python')
>>> queue.is_empty();
False
>>> queue.get()
5
>>> queue.put('algorithms')
>>> queue.get()
9
>>> queue.get()
'python'
>>> queue.get()
'algorithms'
>>> queue.is_empty()
True
>>> queue.get()
Traceback (most recent call last):
...
IndexError: dequeue from empty queue
"""
def __init__(self) -> None:
self.front = self.rear = None
def __iter__(self):
node = self.front
while node:
yield node.data
node = node.next
def __len__(self) -> int:
"""
>>> queue = LinkedQueue()
>>> for i in range(1, 6):
... queue.put(i)
>>> len(queue)
5
>>> for i in range(1, 6):
... assert len(queue) == 6 - i
... _ = queue.get()
>>> len(queue)
0
"""
return len(tuple(iter(self)))
def __str__(self) -> str:
"""
>>> queue = LinkedQueue()
>>> for i in range(1, 4):
... queue.put(i)
>>> queue.put("Python")
>>> queue.put(3.14)
>>> queue.put(True)
>>> str(queue)
'1 <- 2 <- 3 <- Python <- 3.14 <- True'
"""
return " <- ".join(str(item) for item in self)
def is_empty(self) -> bool:
"""
>>> queue = LinkedQueue()
>>> queue.is_empty()
True
>>> for i in range(1, 6):
... queue.put(i)
>>> queue.is_empty()
False
"""
return len(self) == 0
def put(self, item) -> None:
"""
>>> queue = LinkedQueue()
>>> queue.get()
Traceback (most recent call last):
...
IndexError: dequeue from empty queue
>>> for i in range(1, 6):
... queue.put(i)
>>> str(queue)
'1 <- 2 <- 3 <- 4 <- 5'
"""
node = Node(item)
if self.is_empty():
self.front = self.rear = node
else:
assert isinstance(self.rear, Node)
self.rear.next = node
self.rear = node
def get(self) -> Any:
"""
>>> queue = LinkedQueue()
>>> queue.get()
Traceback (most recent call last):
...
IndexError: dequeue from empty queue
>>> queue = LinkedQueue()
>>> for i in range(1, 6):
... queue.put(i)
>>> for i in range(1, 6):
... assert queue.get() == i
>>> len(queue)
0
"""
if self.is_empty():
raise IndexError("dequeue from empty queue")
assert isinstance(self.front, Node)
node = self.front
self.front = self.front.next
if self.front is None:
self.rear = None
return node.data
def clear(self) -> None:
"""
>>> queue = LinkedQueue()
>>> for i in range(1, 6):
... queue.put(i)
>>> queue.clear()
>>> len(queue)
0
>>> str(queue)
''
"""
self.front = self.rear = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| {
"repo_name": "TheAlgorithms/Python",
"path": "data_structures/queue/linked_queue.py",
"copies": "1",
"size": "3550",
"license": "mit",
"hash": 2918122864144365000,
"line_mean": 22.5099337748,
"line_max": 56,
"alpha_frac": 0.4397183099,
"autogenerated": false,
"ratio": 3.821313240043057,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4761031549943057,
"avg_score": null,
"num_lines": null
} |
"""aque worker - Run a worker which processes the queue.
A worker is responsible for actually running the tasks on the queue, and for
mananging the CPU, memory, and other resources availible on the machine.
"""
import logging
import signal
import sys
from aque.commands.main import command, argument
from aque.worker import Worker
@command(
argument('-1', '--one', action='store_true', help='run only a single task'),
argument('-2', '--to-end', action='store_true', help='run only until there is nothing pending on the queue'),
argument('-c', '--cpus', type=int, metavar='CPU_COUNT', help='how many CPUs to use'),
help='run a worker',
description=__doc__,
)
def worker(args):
def on_hup(signum, frame):
logging.getLogger(__name__).info('HUP! Stopping worker from taking more work.')
worker.stop()
signal.signal(signal.SIGHUP, on_hup)
worker = Worker(args.broker, max_cpus=args.cpus)
try:
if args.one:
worker.run_one()
elif args.to_end:
worker.run_to_end()
else:
worker.run_forever()
except KeyboardInterrupt:
pass
| {
"repo_name": "mikeboers/aque",
"path": "aque/commands/worker.py",
"copies": "1",
"size": "1146",
"license": "bsd-3-clause",
"hash": 3109273827608129000,
"line_mean": 28.3846153846,
"line_max": 113,
"alpha_frac": 0.6492146597,
"autogenerated": false,
"ratio": 3.7573770491803278,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9901002945250629,
"avg_score": 0.0011177527259398606,
"num_lines": 39
} |
"""aque xargs - xargs-like submitter of multiple tasks.
Submits multiple tasks with the same base command, taking the rest of the
arguments from stdin.
"""
from __future__ import division
import argparse
import itertools
import os
import sys
import shlex
import psutil
from aque.commands.main import main, command, argument, group
from aque import utils
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
def tokenize_lines(count):
for lines in grouper(sys.stdin, count):
tokens = []
for line in lines:
tokens.extend(shlex.split(line))
yield tokens
def tokenize_all():
return [itertools.chain.from_iterable(shlex.split(line) for line in sys.stdin)]
def tokenize_words(count):
return grouper(tokenize_all()[0], count)
@command(
group('xargs compatibility',
argument('-L', '--lines', type=int, metavar='N', help='''how many lines of input to use
for arguments of a single task (for compatibility with `xargs -L`)'''),
argument('-n', '--words', type=int, metavar='N', help='''how many works of input to use
for arguments of a single task (for compatibility with `xargs -n`)'''),
argument('-P', '--maxprocs', type=int, metavar='N', help='''how many tasks to run at once
(for compatibility with `xargs -P` and exclusive with --cpus)'''),
),
argument('-s', '--shell', action='store_true', help='''the first argument is
executed as a shell script, with the rest provided to it as arguments'''),
argument('-w', '--watch', action='store_true', help='watch the stdout/stderr of the task as it executes'),
argument('--name', help='the task\'s name (for `aque status`)'),
argument('-p', '--priority', type=int, help='higher ones go first'),
argument('-c', '--cpus', type=int, help='how many CPUs to use per task'),
argument('--host', help='the host(s) to run on'),
argument('--platform', help='the platform to run on'),
argument('-v', '--verbose', action='store_true', help='print IDs of all tasks'),
argument('command', nargs=argparse.REMAINDER),
help='xargs-like submitter of multiple tasks',
description=__doc__,
)
def xargs(args):
ids = []
if args.lines:
token_iter = tokenize_lines(args.lines)
elif args.words:
token_iter = tokenize_words(args.words)
else:
token_iter = tokenize_all()
if args.cpus:
cpus = args.cpus
elif args.maxprocs:
cpus = psutil.cpu_count() / args.maxprocs
else:
cpus = None
options = {'environ': os.environ}
for k in ('cwd', 'host', 'platform', 'priority'):
v = getattr(args, k, None)
if v is not None:
options[k] = getattr(args, k)
prototypes = []
for tokens in token_iter:
cmd = list(args.command)
if args.shell:
cmd.insert(0, os.environ.get('SHELL', '/bin/bash'))
cmd.insert(1, '-c')
cmd.insert(3, 'aque-submit')
cmd.extend(t for t in tokens if t is not None)
prototype = options.copy()
prototype.update(
pattern='shell',
args=cmd,
cpus=cpus,
name=' '.join(cmd),
# Magic prioritization!
io_paths=utils.paths_from_args(cmd),
)
prototypes.append(prototype)
future_map = args.queue.submit_many(prototypes)
if args.verbose:
print '\n'.join(str(tid) for tid in sorted(f.id for f in future_map.itervalues()))
future = args.queue.submit_ex(
pattern=None,
name=args.name or 'xargs ' + ' '.join(args.command),
dependencies=future_map.values(),
)
if args.watch:
args = ['output', '--watch']
args.extend(str(f.id) for f in future_map.itervalues())
args.append(str(future.id))
return main(args)
print future.id
| {
"repo_name": "mikeboers/aque",
"path": "aque/commands/xargs.py",
"copies": "1",
"size": "4030",
"license": "bsd-3-clause",
"hash": -8037556018976320000,
"line_mean": 29.0746268657,
"line_max": 110,
"alpha_frac": 0.6099255583,
"autogenerated": false,
"ratio": 3.7628384687208216,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48727640270208217,
"avg_score": null,
"num_lines": null
} |
"""A quick and dirty example of using Mayavi to overlay anatomy and activation.
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import numpy as np
from enthought.mayavi import mlab
from fiac_util import load_image_fiac
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
MASK = load_image_fiac('group', 'mask.nii')
AVGANAT = load_image_fiac('group', 'avganat.nii')
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
def view_thresholdedT(design, contrast, threshold, inequality=np.greater):
"""
A mayavi isosurface view of thresholded t-statistics
Parameters
----------
design: one of ['block', 'event']
contrast: str
threshold: float
inequality: one of [np.greater, np.less]
"""
maska = np.asarray(MASK)
tmap = np.array(load_image_fiac('group', design, contrast, 't.nii'))
test = inequality(tmap, threshold)
tval = np.zeros(tmap.shape)
tval[test] = tmap[test]
# XXX make the array axes agree with mayavi2
avganata = np.array(AVGANAT)
avganat_iso = mlab.contour3d(avganata * maska, opacity=0.3, contours=[3600],
color=(0.8,0.8,0.8))
avganat_iso.actor.property.backface_culling = True
avganat_iso.actor.property.ambient = 0.3
tval_iso = mlab.contour3d(tval * MASK, color=(0.8,0.3,0.3),
contours=[threshold])
return avganat_iso, tval_iso
#-----------------------------------------------------------------------------
# Script entry point
#-----------------------------------------------------------------------------
if __name__ == '__main__':
# A simple example use case
design = 'block'
contrast = 'sentence_0'
threshold = 0.3
print 'Starting thresholded view with:'
print 'Design=',design,'contrast=',contrast,'threshold=',threshold
view_thresholdedT(design, contrast, threshold)
| {
"repo_name": "yarikoptic/NiPy-OLD",
"path": "examples/fiac/view_contrasts_3d.py",
"copies": "1",
"size": "2234",
"license": "bsd-3-clause",
"hash": -5860623298086610000,
"line_mean": 30.9142857143,
"line_max": 80,
"alpha_frac": 0.4615040286,
"autogenerated": false,
"ratio": 4.296153846153846,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007232995400486635,
"num_lines": 70
} |
## A quick and dirty linked list implementation
from typing import Any, List, Optional
from collections.abc import Iterable
from .exceptions import *
class Node:
""" A node in our linked list """
def __init__(self, value: Any, next: Any =None, previous: Any =None) -> None:
if value is None:
raise ValueError(
f'{self.__class__.__name__} expects a value that is not {type(None)}'
)
self.value = value
if next:
self.next = Node(next) if type(next) is not Node else next
else:
self.next = next
if previous:
self.previous = Node(previous) if type(previous) is not Node \
else previous
else:
self.previous = previous
def nxt(self) -> 'Node':
if self.next:
return self.next
else:
raise NoNextNodeError('Not linked to another node')
def prv(self) -> 'Node':
if self.previous:
return self.previous
else:
raise NoPreviousNodeError('Not linked to previous node')
def linkn(self, other: 'Node') -> None:
self.next = other
def linkp(self, other: 'Node') -> None:
self.previous = other
def __rshift__(self, other: 'Node') -> 'Node':
""" Syntactic sugar for calling self.linkn, but chaining links too; allows
the syntax N1 >> N2 >> N3 >> N4 ... for constructing linked lists
"""
self.linkn(other)
return self
def __lshift__(self, other: 'Node') -> 'Node':
""" Syntactic sugar for calling self.linkp, and allows the converse,
N1 << N2 << N3 << N4 ..., for linking the opposite direction
"""
self.linkp(other)
return self
def remove_prev_link(self) -> None:
if self.previous:
self.previous = None
def remove_post_link(self) -> None:
if self.next:
self.next = None
def has_prev(self) -> bool:
return self.previous is not None
def has_next(self) -> bool:
return self.next is not None
class DoublyLinkedList(Iterable):
""" Doubly linked list implementation """
_size = 0
def __init__(self, head_val: Any =None) -> None:
if head_val is not None:
self.head = Node(head_val) if type(head_val) is not Node else head_val
self._size += 1
else:
self.head = None
self.last = self.head
def __iter__(self) -> 'DoublyLinkedList':
self._start = None
return self
def __next__(self) -> Node:
try:
if self._start is None:
self._start = self.head
else:
self._start = self._start.nxt()
except (NoNextNodeError, AttributeError):
# self._start is _still_ None
raise StopIteration
else:
return self._start
@classmethod
def fromlist(cls, some_list: List[Any]) -> 'DoublyLinkedList':
""" Convert a list to a doubly linked list object """
dll = cls()
for item in some_list:
dll += Node(item)
return dll
def to_list(self) -> List[Any]:
return [item.value for item in self] if self.size() else []
def addLast(self, other: Any) -> None:
if self.head is None:
# There's nothing at the start of the linked list
self.head = Node(other) if type(other) is not Node else other
self.last = self.head
else:
# Link the last element to `other` to make `other` the last element
self.last.linkn(other)
self.last = self.last.nxt()
# Now it looks like this:
# N1 -> N2 -> ... -> Nn -> other
self._size += 1
def __iadd__(self, other: Node) -> 'DoublyLinkedList':
""" Syntactic sugar for adding a node to the linked list; must be of Node
type, however---similar to lists and __iadd__
"""
if type(other) is not Node:
raise TypeError(
f'{self.__class__.__name__} expected type Node, received {type(other)}'
)
self.addLast(other)
return self
def size(self) -> int:
return self._size
def insertBefore(self, other: Any, node: Node) -> None:
""" Insert a value or node before another node---does not guarantee a doubly
linked list.
"""
if node.has_prev():
node.previous >> (other if type(other) is Node else Node(other)) << node
else:
# `node` has no prior (it must be the head of the ll)
node << (Node(other) if type(other) is not Node else other)
node.prv() >> node
self.head = node.prv()
self._size += 1
def insertAfter(self, other: Any, node: Node) -> None:
""" Insert a value or node after another node """
if node.has_next():
node >> (other if type(other) is Node else Node(other)) << node.next
else:
# `node` has no next (it must be the end of the ll)
node >> (other if type(other) is Node else Node(other))
node.nxt() << node
self._size += 1
def clear(self) -> None:
""" Clear entire linked list """
for node in self:
del node
self.head = None
self.last = self.head
self._size = 0
def search(self, value: Any) -> Optional[Node]:
""" Search for a node containing some value """
for node in self:
if node.value == value:
return node | {
"repo_name": "bjd2385/doublylinkedlist",
"path": "src/linked.py",
"copies": "1",
"size": "5659",
"license": "mit",
"hash": 6128785234618469000,
"line_mean": 29.7608695652,
"line_max": 85,
"alpha_frac": 0.5389644814,
"autogenerated": false,
"ratio": 4.071223021582734,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011344895270448854,
"num_lines": 184
} |
# a quick and dirty xml module for parsing and generating xml/html
#
# this is a very poor man's xml parser
# it uses the python syntax parser for parsing xml code
# and defines a tag class called T. xml code is first translated to
# valid python code and then evaluated. works also under jython.
#
# (c) f.jamitzky 2006
class T:
def __init__(self,name,args=[]):
arglist=name.split(" ")
self._name=arglist[0]
self._kw={}
self._args=args
if len(arglist)>1:
kw={}
for i in arglist[1:]:
key, val= i.split("=")
kw[key]=val
self._kw=kw
def __len__(self):
return len(self._args)
def __str__(self):
if self._args==[]:
if self._kw=={}:
txt="<"+self._name+"/>"
else:
txt="<"+self._name
for i in self._kw.keys():
txt+=" "+str(i)+"="+str(self._kw[i])+" "
txt=txt[:-1]+"/>"
else:
if self._kw=={}:
txt="<"+self._name+">"
else:
txt="<"+self._name
for i in self._kw.keys():
txt+=" "+str(i)+"="+str(self._kw[i])+" "
txt=txt[:-1]+">"
for arg in self._args:
txt+=str(arg)
txt+="</"+self._name+">"
return txt
def __repr__(self):
return str(self)
def __getitem__(self,key):
if type(key)==type(0):
return self._args[key]
elif type(key)==type(""):
return self._kw[key]
def __setitem__(self,key,value):
if type(key)==type(0):
if key<len(self._args):
self._args[key]=value
else:
self._args.insert(key,value)
else:
self._kw[key]=value
def keys(self):
return self._kw.keys()
def tags(self):
lst=[]
for i in range(len(self)):
try:
lst.append(self[i]._name)
except:
pass
return lst
def get_tag_by_name(self,strg):
lst=[]
for i in range(len(self)):
try:
if self[i]._name==strg:
lst.append(self[i])
except:
pass
if len(lst)==1:
return lst[0]
else:
return lst
def __getattr__(self,key):
try:
return self.get_tag_by_name(key)
except:
if self.__dict__.has_key(key):
return self.__dict__[key]
else:
raise AttributeError, "Name does not exist '%s.'" % (key)
def append(self,val):
self._args.append(val)
def xml2code(instr):
data=instr.replace("[","<lbracket/>").replace("]","<rbracket/>")
data=data.replace("\n","").replace('"',"'")
data=data.replace("?>","?/>").replace("-->","--/>")
data=data.replace("</","[]endtag[").replace("/>","[]mptytag[")
data=data.replace("<","[]starttag[").replace(">","[]closetag[")
data=data.split("[")
outstr=''
i=-1
lendata=len(data)
while i<lendata-1:
i+=1
x=data[i]
x=x.strip()
if len(x)==0:
continue
if x[0]=="]":
if x[1]=="s":
outstr+='T("'+data[i+1]+'",['
i=i+2
if data[i][0:2]=="]m":
outstr+=']),'
elif x[1]=="e":
outstr+=']),'
i=i+2
else:
outstr+='"'+x+'",'
outstr="T('root',["+outstr+"])"
outstr=outstr.replace(",)",")")
return outstr
def xml(strg):
return eval(xml2code(strg))[0]
print "parsing xml:"
data="""<a><a b='a'><b a='b'/></a><b>/a</b>b<a/><a/></a>"""
print "xml string:"
print data
tt=xml(data)
print "print:"
print tt
print "print tags:"
print tt.tags()
print "get tag 'a':"
print tt.a
print "generating html:"
html=xml("<html><head/><body/></html>")
html.body.append("Hello World from jython")
html.head['title']="Hello World"
print html
print ""
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/476225_very_poor_mans_xml_parser/recipe-476225.py",
"copies": "1",
"size": "4115",
"license": "mit",
"hash": -6656297988899784000,
"line_mean": 26.8040540541,
"line_max": 73,
"alpha_frac": 0.4505467801,
"autogenerated": false,
"ratio": 3.538263112639725,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9372199410770858,
"avg_score": 0.023322096393773174,
"num_lines": 148
} |
"""A quick demo of oscillation, stimulation, and background noise."""
import numpy as np
from fakespikes import neurons, util, rates
import seaborn as sns
import matplotlib.pyplot as plt
sns.__file__ # pylint
plt.ion()
# -- USER SETTINGS -----------------------------------------------------------
seed = 42
n = 50 # neuron number
t = 5 # run 10 seconds
Istim = 2 # Avg rate of 'natural' stimulation
Sstim = 0.1 * Istim # Avg st dev of natural firing
Iosc = 10 # Avg rate of the oscillation
f = 1 # Freq of oscillation
Iback = 10 # Avg rate of the background noise
# Timing
dt = 0.001
rate = 1 / dt
# -- SIM ---------------------------------------------------------------------
# Init spikers
nrns = neurons.Spikes(n, t, dt=dt, seed=42)
times = nrns.times # brevity
# Create biases
osc = rates.osc(times, Iosc, f)
stim = rates.stim(times, Istim, Sstim, seed)
noise = rates.constant(times, Iback)
# Simulate spiking
spks_osc = nrns.poisson(osc)
spks_stim = nrns.poisson(stim)
spks_noise = nrns.poisson(noise)
# Reformat and plot a raster
spks = np.hstack([spks_osc, spks_stim, spks_noise]) # Stack 'em for plotting
ns, ts = util.to_spiketimes(times, spks)
plt.plot(ts, ns, 'o')
# and their summed rates
plt.figure()
plt.subplot(311)
plt.plot(times, spks_osc.sum(1), label='osc')
plt.legend()
plt.subplot(312)
plt.plot(times, spks_stim.sum(1), label='stim')
plt.legend()
plt.subplot(313)
plt.plot(times, spks_noise.sum(1), label='background')
plt.legend()
plt.xlabel("Time (s)")
plt.ylabel("Rate")
| {
"repo_name": "voytekresearch/fakespikes",
"path": "fakespikes/examples/demo.py",
"copies": "1",
"size": "1517",
"license": "mit",
"hash": -4561869655559814700,
"line_mean": 24.7118644068,
"line_max": 78,
"alpha_frac": 0.6367831246,
"autogenerated": false,
"ratio": 2.878557874762808,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4015340999362808,
"avg_score": null,
"num_lines": null
} |
'''A quick demo of the factorization machine layer.'''
from __future__ import print_function
import numpy as np
import time
import torch
import torch.nn.functional as F
from fmpytorch.second_order.fm import FactorizationMachine
from torch.autograd import Variable
N_BATCH = 10000
INPUT_SIZE = 100
HIDDEN_SIZE = 100
N_FACTORS_FM = 5
BATCH_SIZE = 16
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.linear = torch.nn.Linear(INPUT_SIZE, HIDDEN_SIZE)
self.dropout = torch.nn.Dropout(.5)
self.fm = FactorizationMachine(HIDDEN_SIZE, 5)
def forward(self, x):
x = self.linear(x)
x = self.dropout(x)
x = self.fm(x)
return x
np.random.seed(1)
torch.manual_seed(1)
model = MyModel()
opt = torch.optim.Adam(model.parameters(), lr=.01)
model.train()
def true_function(input):
'''A dummy function to learn'''
return np.sum(input, axis=1)
start = time.time()
for batch in range(10000):
cur_x = np.random.random(size=(BATCH_SIZE, INPUT_SIZE)).astype(np.float32)
cur_y = true_function(cur_x)
cur_x, cur_y = Variable(torch.from_numpy(cur_x)), Variable(torch.from_numpy(cur_y))
opt.zero_grad()
out = model(cur_x)
loss = F.mse_loss(out, cur_y)
loss.backward()
opt.step()
print(batch, loss)
end = time.time()
elapsed = end-start
print("{:.3f}ms per batch".format(elapsed/100 * 1000))
| {
"repo_name": "jmhessel/fmpytorch",
"path": "fmpytorch/examples/toy.py",
"copies": "1",
"size": "1432",
"license": "mit",
"hash": -3661230137820016600,
"line_mean": 23.2711864407,
"line_max": 87,
"alpha_frac": 0.6585195531,
"autogenerated": false,
"ratio": 3.072961373390558,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4231480926490558,
"avg_score": null,
"num_lines": null
} |
"""A quick example for training a part-of-speech tagger, without worrying
about the tokenization, or other language-specific customizations."""
from __future__ import unicode_literals
from __future__ import print_function
import plac
from os import path
import os
from spacy.vocab import Vocab
from spacy.tokenizer import Tokenizer
from spacy.tagger import Tagger
import random
# You need to define a mapping from your data's part-of-speech tag names to the
# Universal Part-of-Speech tag set, as spaCy includes an enum of these tags.
# See here for the Universal Tag Set:
# http://universaldependencies.github.io/docs/u/pos/index.html
# You may also specify morphological features for your tags, from the universal
# scheme.
TAG_MAP = {
'N': {"pos": "NOUN"},
'V': {"pos": "VERB"},
'J': {"pos": "ADJ"}
}
# Usually you'll read this in, of course. Data formats vary.
# Ensure your strings are unicode.
DATA = [
(
["I", "like", "green", "eggs"],
["N", "V", "J", "N"]
),
(
["Eat", "blue", "ham"],
["V", "J", "N"]
)
]
def ensure_dir(*parts):
path_ = path.join(*parts)
if not path.exists(path_):
os.mkdir(path_)
return path_
def main(output_dir):
ensure_dir(output_dir)
ensure_dir(output_dir, "pos")
ensure_dir(output_dir, "vocab")
vocab = Vocab(tag_map=TAG_MAP)
tokenizer = Tokenizer(vocab, {}, None, None, None)
# The default_templates argument is where features are specified. See
# spacy/tagger.pyx for the defaults.
tagger = Tagger.blank(vocab, Tagger.default_templates())
for i in range(5):
for words, tags in DATA:
tokens = tokenizer.tokens_from_list(words)
tagger.train(tokens, tags)
random.shuffle(DATA)
tagger.model.end_training(path.join(output_dir, 'pos', 'model'))
vocab.strings.dump(path.join(output_dir, 'vocab', 'strings.txt'))
if __name__ == '__main__':
plac.call(main)
| {
"repo_name": "rebeling/spaCy",
"path": "examples/train_pos_tagger.py",
"copies": "4",
"size": "1998",
"license": "mit",
"hash": -580316464888538200,
"line_mean": 27.5428571429,
"line_max": 79,
"alpha_frac": 0.6346346346,
"autogenerated": false,
"ratio": 3.3807106598984773,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006309523809523809,
"num_lines": 70
} |
# A quick example with seaborn visualization library
import sense
# !pip install --upgrade pip
!pip install seaborn
import seaborn as sns
sns.set(style="ticks")
df = sns.load_dataset("anscombe")
sns.lmplot("x", "y", col="dataset", hue="dataset", data=df,
col_wrap=2, ci=None, palette="bright", size=3,
scatter_kws={"s": 60, "alpha": .4})
## Boken
## Example from [Bokeh Gallery](http://bokeh.pydata.org/en/latest/docs/gallery/burtin.html)
from collections import OrderedDict
from math import log, sqrt
import numpy as np
import pandas as pd
from six.moves import cStringIO as StringIO
!pip install bokeh
from bokeh.plotting import figure, show, output_notebook
antibiotics = """
bacteria, penicillin, streptomycin, neomycin, gram
Mycobacterium tuberculosis, 800, 5, 2, negative
Salmonella schottmuelleri, 10, 0.8, 0.09, negative
Proteus vulgaris, 3, 0.1, 0.1, negative
Klebsiella pneumoniae, 850, 1.2, 1, negative
Brucella abortus, 1, 2, 0.02, negative
Pseudomonas aeruginosa, 850, 2, 0.4, negative
Escherichia coli, 100, 0.4, 0.1, negative
Salmonella (Eberthella) typhosa, 1, 0.4, 0.008, negative
Aerobacter aerogenes, 870, 1, 1.6, negative
Brucella antracis, 0.001, 0.01, 0.007, positive
Streptococcus fecalis, 1, 1, 0.1, positive
Staphylococcus aureus, 0.03, 0.03, 0.001, positive
Staphylococcus albus, 0.007, 0.1, 0.001, positive
Streptococcus hemolyticus, 0.001, 14, 10, positive
Streptococcus viridans, 0.005, 10, 40, positive
Diplococcus pneumoniae, 0.005, 11, 10, positive
"""
drug_color = OrderedDict([
("Penicillin", "#0d3362"),
("Streptomycin", "#c64737"),
("Neomycin", "black" ),
])
gram_color = {
"positive" : "#aeaeb8",
"negative" : "#e69584",
}
df = pd.read_csv(StringIO(antibiotics),
skiprows=1,
skipinitialspace=True,
engine='python')
width = 800
height = 800
inner_radius = 90
outer_radius = 300 - 10
minr = sqrt(log(.001 * 1E4))
maxr = sqrt(log(1000 * 1E4))
a = (outer_radius - inner_radius) / (minr - maxr)
b = inner_radius - a * maxr
def rad(mic):
return a * np.sqrt(np.log(mic * 1E4)) + b
big_angle = 2.0 * np.pi / (len(df) + 1)
small_angle = big_angle / 7
x = np.zeros(len(df))
y = np.zeros(len(df))
output_notebook()
p = figure(plot_width=width, plot_height=height, title="",
x_axis_type=None, y_axis_type=None,
x_range=[-420, 420], y_range=[-420, 420],
min_border=0, outline_line_color="black",
background_fill="#f0e1d2", border_fill="#f0e1d2")
p.line(x+1, y+1, alpha=0)
# annular wedges
angles = np.pi/2 - big_angle/2 - df.index.to_series()*big_angle
colors = [gram_color[gram] for gram in df.gram]
p.annular_wedge(
x, y, inner_radius, outer_radius, -big_angle+angles, angles, color=colors,
)
# small wedges
p.annular_wedge(x, y, inner_radius, rad(df.penicillin),
-big_angle+angles+5*small_angle, -big_angle+angles+6*small_angle,
color=drug_color['Penicillin'])
p.annular_wedge(x, y, inner_radius, rad(df.streptomycin),
-big_angle+angles+3*small_angle, -big_angle+angles+4*small_angle,
color=drug_color['Streptomycin'])
p.annular_wedge(x, y, inner_radius, rad(df.neomycin),
-big_angle+angles+1*small_angle, -big_angle+angles+2*small_angle,
color=drug_color['Neomycin'])
# circular axes and lables
labels = np.power(10.0, np.arange(-3, 4))
radii = a * np.sqrt(np.log(labels * 1E4)) + b
p.circle(x, y, radius=radii, fill_color=None, line_color="white")
p.text(x[:-1], radii[:-1], [str(r) for r in labels[:-1]],
text_font_size="8pt", text_align="center", text_baseline="middle")
# radial axes
p.annular_wedge(x, y, inner_radius-10, outer_radius+10,
-big_angle+angles, -big_angle+angles, color="black")
# bacteria labels
xr = radii[0]*np.cos(np.array(-big_angle/2 + angles))
yr = radii[0]*np.sin(np.array(-big_angle/2 + angles))
label_angle=np.array(-big_angle/2+angles)
label_angle[label_angle < -np.pi/2] += np.pi # easier to read labels on the left side
p.text(xr, yr, df.bacteria, angle=label_angle,
text_font_size="9pt", text_align="center", text_baseline="middle")
# OK, these hand drawn legends are pretty clunky, will be improved in future release
p.circle([-40, -40], [-370, -390], color=list(gram_color.values()), radius=5)
p.text([-30, -30], [-370, -390], text=["Gram-" + gr for gr in gram_color.keys()],
text_font_size="7pt", text_align="left", text_baseline="middle")
p.rect([-40, -40, -40], [18, 0, -18], width=30, height=13,
color=list(drug_color.values()))
p.text([-15, -15, -15], [18, 0, -18], text=list(drug_color.keys()),
text_font_size="9pt", text_align="left", text_baseline="middle")
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
show(p)
from bokeh.plotting import figure, output_notebook, show
# prepare some data
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
# output to static HTML file
output_notebook()
# create a new plot with a title and axis labels
p = figure(title="simple line example", x_axis_label='x', y_axis_label='y')
# add a line renderer with legend and line thickness
p.line(x, y, legend="Temp.", line_width=2)
# show the results
show(p) | {
"repo_name": "ryanswanstrom/Sense.io-Projects",
"path": "python-visualization/analysis.py",
"copies": "1",
"size": "5637",
"license": "mit",
"hash": -2876461299429224400,
"line_mean": 33.5889570552,
"line_max": 91,
"alpha_frac": 0.6109632783,
"autogenerated": false,
"ratio": 2.7444011684518013,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8820680602683959,
"avg_score": 0.0069367688135683805,
"num_lines": 163
} |
# A quickie trial integration of geiger counter parts
import machine
import micropython
import network
import ntptime
from g1 import Geiger, GLog, Gwsgi, GReportPeriodically
from gu import update_bssids
from ws2 import WS
class Thing:
pass
micropython.alloc_emergency_exception_buf(100)
g = Thing()
def main():
ntptime.settime()
g.uid = machine.unique_id()
g.wlan = network.WLAN()
update_bssids(g)
geiger = Geiger()
glog = GLog(geiger)
grep = GReportPeriodically(g, glog, host='put.into.com')
gw = Gwsgi(glog)
ws = WS(gw.wsgi_app)
print("ready to start")
geiger.start()
glog.start()
grep.start()
ws.start()
#ws.verbose = True
try:
while True:
ws.handle_one(10)
except Exception as e:
print(e)
ws.stop()
if __name__ == '__main__':
main()
"""
httpd = make_server('', 8000, demo_app)
print("Serving HTTP on port 8000...")
# Respond to requests until process is killed
httpd.serve_forever()
# Alternative: serve one request, then exit
httpd.handle_request()
"""
| {
"repo_name": "pramasoul/ESP-geiger",
"path": "t2wsgi.py",
"copies": "1",
"size": "1079",
"license": "mit",
"hash": -8882913943300041000,
"line_mean": 19.358490566,
"line_max": 60,
"alpha_frac": 0.6478220575,
"autogenerated": false,
"ratio": 3.1005747126436782,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9223675232039179,
"avg_score": 0.004944307620899875,
"num_lines": 53
} |
# A quick python routine to display formating of Tracker.dump() for visual inspection
import gc
import copy
import pytracker as pyt
from StringIO import StringIO
import pickle
#gc.set_debug(gc.DEBUG_STATS | gc.DEBUG_COLLECTABLE | gc.DEBUG_UNCOLLECTABLE | gc.DEBUG_INSTANCES | gc.DEBUG_OBJECTS | gc.DEBUG_LEAK | gc.DEBUG_SAVEALL )
t = pyt.Tracker()
pyt.set_global_tracker(t)
class foo(pyt.Trackable):
def __init__(self, msg):
self.msg = "message is: " + str(msg)
self._set_data_bundle( ('bundle', msg, str(self)) )
class foos(pyt.Trackable):
__slots__ = ['msg']
def __init__(self, msg):
self.msg = "message is: " + str(msg)
self._set_data_bundle( ('bundle', msg, str(self)) )
def __getstate__(self):
return (self.msg, super(foos, self).__getstate__())
def __setstate__(self, arg):
self.msg = arg[0]
super(foos, self).__setstate__(arg[1])
t.set_object_tracking(foo)
t.set_object_tracking(foos)
t.set_object_tracking(pyt.Trackable)
def pickled(obj):
buf = StringIO()
pickle.dump(obj, buf, pickle.HIGHEST_PROTOCOL)
return buf.getvalue()
def unpickle(buf):
return pickle.load(StringIO(buf))
print "\nTesting normal derived object\n"
x = foo('foo object')
print "data bundle before: " + str(x._get_data_bundle())
print "foo before: " + str(x)
print "data bundle before: " + str(x._get_data_bundle())
print "msg before: " + x.msg
t.dump()
px = pickled(x)
x = None
t.dump()
x = unpickle(px)
y = copy.deepcopy(x)
print "foo after: " + str(x)
print "data bundle after: " + str(x._get_data_bundle())
print "msg after: " + x.msg
t.dump()
x = None
t.dump()
y = None
t.dump()
print "\nTesting bare trackable\n"
x = pyt.Trackable()
x._set_data_bundle('hardcoded bundle')
print "data bundle before: " + str(x._get_data_bundle())
print "bar before: " + str(x)
print "data bundle before: " + str(x._get_data_bundle())
t.dump()
px = pickled(x)
x = None
t.dump()
x = unpickle(px)
y = copy.deepcopy(x)
print "bar after: " + str(x)
print "data bundle after: " + str(x._get_data_bundle())
t.dump()
x = None
t.dump()
y = None
t.dump()
print "\nTesting subclass with slots\n"
x = foos('foos object')
print "data bundle before: " + str(x._get_data_bundle())
print "foos before: " + str(x)
print "data bundle before: " + str(x._get_data_bundle())
print "msg before: " + x.msg
t.dump()
px = pickled(x)
x = None
t.dump()
x = unpickle(px)
y = copy.deepcopy(x)
print "foos after: " + str(x)
print "data bundle after: " + str(x._get_data_bundle())
print "msg after: " + x.msg
t.dump()
x = None
t.dump()
y = None
t.dump()
| {
"repo_name": "garywiz/pytracker",
"path": "pyt_tests/copytests.py",
"copies": "1",
"size": "2599",
"license": "bsd-3-clause",
"hash": -755561269652705000,
"line_mean": 22.8440366972,
"line_max": 153,
"alpha_frac": 0.6444786456,
"autogenerated": false,
"ratio": 2.779679144385027,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39241577899850266,
"avg_score": null,
"num_lines": null
} |
# A quick runthrough of writing a client for Messidge
# (c) 2017 David Preece, this work is in the public domain
import logging
import time
from messidge.client.connection import Connection, cmd
from messidge import default_location
class Controller:
def __init__(self):
self.nodes = []
def _resource_offer(self, msg):
self.nodes = msg.params['nodes']
commands = {b'resource_offer': cmd(['nodes'])}
logging.basicConfig(level=logging.DEBUG)
# takes it's server address, pk and sk from the configuration in (default) ~/.messidge
conn = Connection(default_location())
controller = Controller()
conn.register_commands(controller, Controller.commands)
conn.start().wait_until_ready()
# an asynchronous command
conn.send_cmd(b'write_note', {'note': time.ctime(time.time())})
# synchronous, but guaranteed to not be called before 'write_note' has been processed by the broker
reply = conn.send_blocking_cmd(b'fetch_notes')
print("Here are the notes: " + str(reply.params['notes']))
# asynchronous via callback
# note that the callback is called by the background (loop) thread
def async_callback(msg):
print("Async callback: " + str(msg.params['notes']))
conn.send_cmd(b'fetch_notes', reply_callback=async_callback)
print("This will print before the async callback is triggered...")
# an exception is raised from a blocking call
try:
conn.send_blocking_cmd(b'raise_exception')
except ValueError as e:
print("Expected! ValueError raised because: " + str(e))
# get the nodes to do something for us by passing their public key
for node_pk in controller.nodes:
reply = conn.send_blocking_cmd(b'divide', {'node': node_pk, 'dividend': 10.0, 'devisor': 5.0})
print("10.0/5.0=" + str(reply.params['quotient']))
try:
conn.send_blocking_cmd(b'divide', {'node': node_pk, 'dividend': 10.0, 'devisor': 0.0})
except ValueError as e:
print("Expected! ValueError raised because: " + str(e))
conn.disconnect()
| {
"repo_name": "RantyDave/messidge",
"path": "demo/client.py",
"copies": "1",
"size": "1973",
"license": "bsd-2-clause",
"hash": -6206784185058825000,
"line_mean": 33.0172413793,
"line_max": 99,
"alpha_frac": 0.7090724785,
"autogenerated": false,
"ratio": 3.49822695035461,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.969880675459711,
"avg_score": 0.0016985348514999624,
"num_lines": 58
} |
# A quick script to analyze metabolite variability as a function of regulation strength
import os, sys, numpy as np, scipy as sp, pandas as pd, pdb, matplotlib.pyplot as plt, scipy.stats as st
# Read in the interactions and metabolite concentrations
reg = pd.read_csv('../res/ecoli_interactions.csv',header = 0,index_col = 0)
conc = pd.read_csv('../data/ecoli_metabolites_kochanowski2017.csv',header = 0,index_col = 0)
conc = conc.ix[:,1:14]
reg = reg.ix[:,['bigg.metabolite','EC_number']].drop_duplicates()
mcounts = reg.groupby('bigg.metabolite').count()
mvar = conc.std(axis = 1)/conc.mean(axis = 1)
mvar.index = [item.split('_')[0] for item in mvar.index]
mcounts.index = [item.split('_')[0] if ('_d' not in item and '_t' not in item) else item for item in mcounts.index]
ixmets = set(mvar.index).intersection(mcounts.index)
df = pd.DataFrame()
df['Regulation'] = mcounts.ix[ixmets,0]
df['Variability'] = mvar.ix[ixmets]
plt.ion()
plt.figure(1)
plt.plot(mvar.ix[ixmets],mcounts.ix[ixmets,0],'o')
plt.xlabel('Temporal Variation, std/mean')
plt.ylabel('Number of Regulatory Edges')
print(st.spearmanr(mvar.ix[ixmets],mcounts.ix[ixmets,0])) | {
"repo_name": "eladnoor/small-molecule-regulation",
"path": "python/metvariability.py",
"copies": "1",
"size": "1148",
"license": "mit",
"hash": 8751724131602398000,
"line_mean": 40.0357142857,
"line_max": 115,
"alpha_frac": 0.7142857143,
"autogenerated": false,
"ratio": 2.651270207852194,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3865555922152194,
"avg_score": null,
"num_lines": null
} |
# A quick script to convert from the NE demo format
# : [194-200:PERSON@-Infinity, 212-225:PERSON@-Infinity, 346-363:PERSON@-Infinity, 447-453:ORGANIZATION@-Infinity, 643-649:PERSON@-Infinity, 654-664:LOCATION@-Infinity, 742-753:PERSON@-Infinity, 802-841:ORGANIZATION@-Infinity, 848-853:PERSON@-Infinity, 880-889:LOCATION@-Infinity, 934-940:LOCATION@-Infinity, 1019-1023:ORGANIZATION@-Infinity, 1096-1106:ORGANIZATION@-Infinity, 1234-1240:ORGANIZATION@-Infinity, 1250-1256:PERSON@-Infinity, 1367-1377:PERSON@-Infinity, 1521-1527:PERSON@-Infinity, 1732-1738:PERSON@-Infinity, 1882-1890:LOCATION@-Infinity]
# to an annotated file.
# The format I established in RunFileChunker was
# Working on <filename>
# then search for " : " at the beginning of a line.
#
# Guts
#
import re, os, sys
ENTRY_PAT = re.compile("(\d+)-(\d+):(.+?)@")
# We need to tokenize, and then align.
def createAnnotatedDocument(taskImpl, path, docString):
jsonIO = MAT.DocumentIO.getDocumentIO('mat-json')
d = jsonIO.readFromSource(path, task = taskImpl)
# This isn't really right.
aTypes = {}
for key in tagTable.keys():
aTypes[key.lower()] = d.findAnnotationType(key)
taskImpl.getStep("Demo", "zone").do(d)
entries = docString.split(", ")
for e in entries:
m = ENTRY_PAT.match(e)
if m is not None:
start = m.group(1)
end = m.group(2)
tag = m.group(3)
d.createAnnotation(int(start), int(end), aTypes[tag.lower()])
else:
print "Don't get record", e
d.adjustTagsToTokens(taskImpl)
d.setStepsDone(["zone", "tag"])
outPath = os.path.join(os.path.dirname(path), os.path.basename(path) + ".json")
print "Writing", outPath
jsonIO.writeToTarget(d, outPath)
#
# Toplevel
#
if len(sys.argv) != 3:
print "Usage: lingpipe_to_mat.py mat_home lingpipe_output"
sys.exit(1)
[MAT_HOME, LP_OUTPUT] = sys.argv[1:]
sys.path.insert(0, os.path.join(MAT_HOME, "lib", "mat", "python"))
import MAT.Document
import MAT.PluginMgr
fp = open(LP_OUTPUT, "r")
s = fp.read()
fp.close()
plugins = MAT.PluginMgr.LoadPlugins()
task = plugins["Named Entity"]
taskImpl = task.getTaskImplementation('Demo', [])
DOC_PAT = re.compile("^ : [[](.*)[]]$", re.M)
START_PAT = re.compile("^Working on (.*)", re.M)
i = 0
while i < len(s):
m = START_PAT.search(s, i)
if m is None:
break
path = m.group(1)
m = DOC_PAT.search(s, m.end())
if m is None:
break
docString = m.group(1)
createAnnotatedDocument(taskImpl, path, docString)
i = m.end()
| {
"repo_name": "VHAINNOVATIONS/DmD",
"path": "scrubber/MIST_2_0_4/src/MAT/sample/ne/utils/lingpipe_to_mat.py",
"copies": "1",
"size": "2597",
"license": "apache-2.0",
"hash": 7913041541835738000,
"line_mean": 29.1976744186,
"line_max": 552,
"alpha_frac": 0.6461301502,
"autogenerated": false,
"ratio": 2.8855555555555554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9012543957562648,
"avg_score": 0.00382834963858162,
"num_lines": 86
} |
## A quick script to fix the ridiculous format of the raw FAA download data (at least it's available though!)
import csv
# MASTER.txt is from https://www.faa.gov/licenses_certificates/aircraft_certification/aircraft_registry/releasable_aircraft_download/
with open("MASTER.txt") as orig_file:
orig_file_reader = csv.reader(orig_file, delimiter=",")
with open("/temp/MASTER_CLEANED.csv", "w", newline='') as clean_file:
writer = csv.writer(clean_file)
for orig_record in orig_file_reader:
# some of the data contains trailing spaces/tabs, so we remove those first
new_row = [old_field_data.strip() for old_field_data in orig_record]
# convert Mode-S Hex to lowercase in the new CSV
new_row[33] = new_row[33].lower()
# The data has a trailing comma on every single row (including the header row), so remove it as well
new_row_without_trailing_comma = new_row[:-1]
# write out a new CSV, which is then imported into Postgres
writer.writerow(new_row_without_trailing_comma)
| {
"repo_name": "GISDev01/adsbpostgis",
"path": "externaldata/faaopendata/faa_data_cleaner.py",
"copies": "1",
"size": "1094",
"license": "apache-2.0",
"hash": -7624592556230690000,
"line_mean": 53.7,
"line_max": 133,
"alpha_frac": 0.6718464351,
"autogenerated": false,
"ratio": 3.695945945945946,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48677923810459456,
"avg_score": null,
"num_lines": null
} |
"""A quick script to format blog posts in Jekyll style
Note that the files created by this script are different from
the ones currently in posts/. They've been edited to avoid
duplicates, lost file errors, and consistent styling"""
import csv
from datetime import datetime
CSV_FILE = 'blog_entries.csv'
class post():
"""A convenient interface for post data"""
# Constants for retrieving info from the csv list
CONTENT = 1
CREATION_DATE = 2
SLUG = 12
TAGS = 15
TITLE = 17
def __init__(self, row):
self.content = row[self.CONTENT]
self.creation_date = datetime.strptime(row[self.CREATION_DATE], '%Y-%m-%d %H:%M:%S')
self.slug = row[self.SLUG]
self.tags = row[self.TAGS]
self.title = row[self.TITLE]
def __str__(self):
"""Returns the string form of the post data
Abridges the post content to 10 characters
"""
return str([self.content[:10], self.creation_date, self.slug, self.tags, self.title])
def file_name(self):
"""Formats the file name to be read by Jekyll"""
return datetime.strftime(self.creation_date, '%Y-%m-%d-') + self.slug + '.html'
def file_content(self):
# Gray Matter
result = '---\n'
result += 'layout: post\n'
result += 'title: "%s"\n' % self.title
result += 'date: %s\n' % datetime.strftime(self.creation_date, '%Y-%m-%d %H:%M:%S')
result += 'tags: [%s]\n' % self.tags
result += 'author: %s\n' % 'PiE Team'
result += '---\n\n'
#Content
result += self.content
return result
def write_file(self):
f = open(self.file_name(), 'w')
f.write(self.file_content())
f.close()
if __name__ == "__main__":
with open( CSV_FILE,'r') as userFile:
userFileReader = csv.reader(userFile)
# Skip the label line
next(userFileReader)
post_list = []
for row in userFileReader:
post_list.append(post(row))
for post in post_list:
post.write_file()
| {
"repo_name": "pioneers/website",
"path": "_csv_scraper/converter.py",
"copies": "2",
"size": "2082",
"license": "apache-2.0",
"hash": -4248747632466003500,
"line_mean": 27.1351351351,
"line_max": 93,
"alpha_frac": 0.58117195,
"autogenerated": false,
"ratio": 3.5834767641996557,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5164648714199656,
"avg_score": null,
"num_lines": null
} |
"""A quick script to generate maven compatible pom.xml file from given build
rule."""
from xml.dom import minidom
import argparse
import json
import logging
import os
import xml.etree.ElementTree as ET
import mool.core_cmds as cc
import mool.shared_utils as su
DEFAULT_POM_FILE = 'pom.xml'
class Error(su.Error):
"""Error class for this module."""
def _parse_command_line(program_name, cmd_line):
"""Command line arguments parser."""
arg_parser = argparse.ArgumentParser(prog=program_name)
arg_parser.add_argument('-g', '--group', type=str, default='',
help='group id, example org.apache.hadoop')
arg_parser.add_argument('-a', '--artifact', type=str, default='',
help='artifact id, example hadoop-client')
arg_parser.add_argument('-v', '--version', type=str, default='',
help='version number, example 1.2.3')
arg_parser.add_argument('-r', '--build_rule', type=str, required=True,
help='java build rule to generate pom from')
pom_file = os.path.abspath(os.path.join('.', DEFAULT_POM_FILE))
arg_parser.add_argument('-o', '--pom_path', type=str, default=pom_file,
help='full path to output pom.xml file')
return arg_parser.parse_args(cmd_line)
def _validate_helper(dict_obj, key, alternative):
"""Validate that one of the given parameters is valid. Print a pretty
message on failure."""
if key not in dict_obj and not alternative:
raise Error('Unable to determine value of {}!'.format(key))
return alternative or dict_obj[key]
def _get_dependency_elem(dep_dict):
"""Returns a dependency element from given dictionary."""
elem = ET.Element('dependency')
# TODO: Move these constants to some common file.
keys = ['groupId', 'artifactId', 'version', 'classifier', 'scope']
for key in keys:
if dep_dict[key]:
elem.append(_get_data_element(key, dep_dict[key]))
return elem
def _get_data_element(tag, data):
"""Returns an element with given tag and text body."""
elem = ET.Element(tag)
elem.text = data
return elem
def _get_repositories_element(repo_set):
"""Returns the set of repositories formatted in pom format."""
repositories = ET.Element('repositories')
counter = 0
for repo in repo_set:
counter = counter + 1
elem = ET.Element('repository')
elem.append(_get_data_element('id', 'repo{}'.format(counter)))
elem.append(_get_data_element('name', 'Repository {}'.format(counter)))
elem.append(_get_data_element('layout', 'default'))
elem.append(_get_data_element('url', repo))
repositories.append(elem)
return repositories
def _generate_pom_file(args, deps_file):
"""Generate pom file from given JSON file having all the dependency
details."""
with open(deps_file, 'r') as deps_file_obj:
data = json.load(deps_file_obj)
# Collect project specific information.
group_id = _validate_helper(data, 'groupId', args.group)
artifact_id = _validate_helper(data, 'artifactId', args.artifact)
version = _validate_helper(data, 'version', args.version)
root = ET.Element('project', xmlns='http://maven.apache.org/POM/4.0.0')
root.append(_get_data_element('modelVersion', '4.0.0'))
root.append(_get_data_element('groupId', group_id))
root.append(_get_data_element('artifactId', artifact_id))
root.append(_get_data_element('version', version))
root.append(_get_data_element('packaging', 'jar'))
dependencies = ET.Element('dependencies')
repo_set = set()
for dep in data.get('deps', []):
dependencies.append(_get_dependency_elem(dep))
repo_set.add(dep['repoUrl'])
# Dump the xml to given file.
root.append(dependencies)
root.append(_get_repositories_element(repo_set))
xml_string = ET.tostring(root, encoding='utf-8', method='xml')
with open(args.pom_path, 'w') as pom_obj:
pom_obj.write(minidom.parseString(xml_string).toprettyxml(indent=' '))
def main(program_name, cmd_line):
"""Main function to trigger rule building and its pom generation."""
args = _parse_command_line(program_name, cmd_line)
ret_code, builder = cc.generic_core_cmd_handler(
[cc.BUILD_COMMAND, args.build_rule], {})
if ret_code != 0:
msg = 'Error building rule %s!', args.build_rule
logging.error(msg)
return (1, msg)
rule_details = builder.get_rules_map()[args.build_rule]
deps_file_path = rule_details[su.EXPORTED_MVN_DEPS_FILE_KEY]
_generate_pom_file(args, deps_file_path)
msg = 'Successfully created {}.'.format(args.pom_path)
logging.info(msg)
return (0, msg)
| {
"repo_name": "rocketfuel/mool",
"path": "build_tool/bu.scripts/extensions/pom_builder.py",
"copies": "1",
"size": "4566",
"license": "bsd-3-clause",
"hash": -113515528979756880,
"line_mean": 35.8225806452,
"line_max": 76,
"alpha_frac": 0.6756460797,
"autogenerated": false,
"ratio": 3.459090909090909,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.957160106727047,
"avg_score": 0.01262718430408775,
"num_lines": 124
} |
# A quick test job to exercise the whole stack w/o rewriting this to be more
# general.
from __future__ import print_function
import os
import sys
import time
import requests
from pyspark import SparkContext, SQLContext
import pyspark.sql.functions as sql
import pyspark.sql.types as types
sc = SparkContext(appName="iDigBioParquetMini")
sqlContext = SQLContext(sc)
out_dir = "/tmp"
out_fn_base = "idigbio-minitest"
dataset_date = time.strftime("%Y%m%dT%H%M%S")
nodes = "c18node14.acis.ufl.edu,c18node2.acis.ufl.edu,c18node6.acis.ufl.edu,c18node10.acis.ufl.edu,c18node12.acis.ufl.edu"
index = "idigbio"
query = '{"query": {"bool": {"must": [{"term":{"genus":"eucalyptus"}}]}}}'
# Get field list from API endpoint
meta_fields_records = (requests
.get("http://search.idigbio.org/v2/meta/fields/records")
.json()
)
field_set = set()
for k,v in meta_fields_records.items():
if v.get("fieldName", False):
field_set.add(k)
if k == "data":
for kd,vd in v.items():
if vd.get("fieldName", False):
field_set.add("data.{0}".format(kd))
# Remove known fields that cause problems
bad_field_set = set({'commonnames', 'flags', 'recordids', 'mediarecords'})
field_set -= bad_field_set
fields = ",".join(field_set)
# Read in dataframe
df = (sqlContext.read.format("org.elasticsearch.spark.sql")
.option("es.read.field.include", fields)
.option("es.nodes", nodes)
.option("es.query", query)
.load("{0}/records".format(index))
.cache()
)
# Write out the whole thing
(df
.write
.mode("overwrite")
.parquet(os.path.join(out_dir,
"{0}-{1}.parquet".format(out_fn_base, dataset_date)))
)
# Write out a small 100k version for testing
(df
.limit(100 * 1000)
.write
.mode("overwrite")
.parquet(os.path.join(out_dir,
"{0}-{1}-100k.parquet".format(out_fn_base, dataset_date)))
)
# Write out a larger 1M version for testing
(df
.limit(1000 * 1000)
.write
.mode("overwrite")
.parquet(os.path.join(out_dir,
"{0}-{1}-1M.parquet".format(out_fn_base, dataset_date)))
)
| {
"repo_name": "bio-guoda/guoda-datasets",
"path": "iDigBio/es_load_job_mini.py",
"copies": "1",
"size": "2206",
"license": "mit",
"hash": 6489691687392886000,
"line_mean": 28.4133333333,
"line_max": 122,
"alpha_frac": 0.6223934723,
"autogenerated": false,
"ratio": 3.0810055865921786,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.905156602997399,
"avg_score": 0.03036660578363777,
"num_lines": 75
} |
""" A quite inelegant and temporary way of working around Django's problem
with south migrations and custom user models.
This script replaces all the necessary part of migrations files to be more
generic.
INSTRUCTIONS
============
Replace variable "MIGRATIONS_LOCATION".
@@TD: detect by being in directory called 'migrations' rather than prefix of "0",
will perform marginally better.
@@TD: check all variable names are sane.
@@TD: better detection of duplicate settings imports
"""
import copy
import os
import re
DEBUG = True
MIGRATIONS_LOCATION = "/home/me/websites/django-project/"
frozen_match = """ 'auth\.user': \{
'Meta': \{'object_name': 'User'\},
'date_joined': \('django\.db\.models\.fields\.DateTimeField', \[\], \{(.*)\}\),
'email': \('django\.db\.models\.fields\.EmailField', \[\], \{(.*)\}\),
'first_name': \('django\.db\.models\.fields\.CharField', \[\], \{(.*)\}\),
'groups': \('django\.db\.models\.fields\.related\.ManyToManyField', \[\], \{(.*)\}\),
'id': \('django\.db\.models\.fields\.AutoField', \[\], \{(.*)\}\),
'is_active': \('django\.db\.models\.fields\.BooleanField', \[\], \{(.*)\}\),
'is_staff': \('django\.db\.models\.fields\.BooleanField', \[\], \{(.*)\}\),
'is_superuser': \('django\.db\.models\.fields\.BooleanField', \[\], \{(.*)\}\),
'last_login': \('django\.db\.models\.fields\.DateTimeField', \[\], \{(.*)\}\),
'last_name': \('django\.db\.models\.fields\.CharField', \[\], \{(.*)\}\),
'password': \('django\.db\.models\.fields\.CharField', \[\], \{(.*)\}\),
'user_permissions': \('django\.db\.models\.fields\.related\.ManyToManyField', \[\], \{(.*)\}\),
'username': \('django\.db\.models\.fields\.CharField', \[\], \{(.*)\}\)
\},
"""
frozen_replace = """ settings.AUTH_USER_MODEL: {
'Meta': {'object_name': settings.AUTH_USER_MODEL.split('.')[-1]},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
"""
model_match = """from django.db import models"""
model_replace = """from django.db import models
from django.conf import settings"""
model_dupe_match = """from django.conf import settings
from django.conf import settings"""
model_dupe_replace = """from django.conf import settings"""
# Replacments are a 3 tuple:
# (match, replace, method)
#
# `match` is the text to be replaced.
# `replace` is what it is replaced with.
# `method` is which method should be used the choices are: 're' or 'str'.
REPLACEMENTS = [(frozen_match, frozen_replace, 're'),
(model_match, model_replace, 'str'),
(model_dupe_match, model_dupe_replace, 'str'),
(''', 'to': "orm['auth.User']"''',
""", 'to': "orm['{0}']".format(settings.AUTH_USER_MODEL)""", 'str'),
("""to=orm['auth.User']""",
"""to=orm[settings.AUTH_USER_MODEL]""", 'str')]
for root, dir, files in os.walk(MIGRATIONS_LOCATION):
path = root.split('/')
if DEBUG: print (len(path) - 1) *'---' , os.path.basename(root)
for file in files:
if DEBUG: print len(path)*'---', file
if file[:1] == "0":
if DEBUG: print (len(path) - 1) *'---' , root
if DEBUG: print file
for rep in REPLACEMENTS:
file_open = open(root+'/'+file, 'r')
file_contents = file_open.read()
file_open.flush()
file_open.close()
contents = copy.deepcopy(file_contents)
if rep[2] == 're':
matched = re.search(rep[0], file_contents)
if matched:
if DEBUG: print("Replacing: ")
if DEBUG: print(contents[matched.start():matched.start()+len(rep[0])])
contents_replace = re.sub(rep[0], rep[1], contents)
if DEBUG: print("With: ")
if DEBUG: print(contents_replace[matched.start():matched.start()+len(rep[1])+1])
file_write = open(root+'/'+file, 'w')
file_write.write(contents_replace)
file_write.flush()
file_write.close()
else:
if DEBUG: print('good')
if rep[2] == 'str':
start = contents.find(rep[0])
if start != -1:
if DEBUG: print("Replacing: ")
if DEBUG: print(contents[start:start+len(rep[0])])
contents_replace = contents.replace(rep[0], rep[1])
if DEBUG: print("With: ")
if DEBUG: print(contents_replace[start:start+len(rep[1])+1])
file_write = open(root+'/'+file, 'w')
file_write.write(contents_replace)
file_write.flush()
file_write.close()
else:
if DEBUG: print('good')
print('Migrations updated.')
| {
"repo_name": "elena/scripts.elena.github.io",
"path": "_posts/django/migrate_auth-replace/script.py",
"copies": "1",
"size": "5184",
"license": "mit",
"hash": 306988663734331200,
"line_mean": 42.2,
"line_max": 107,
"alpha_frac": 0.506558642,
"autogenerated": false,
"ratio": 3.848552338530067,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4855110980530067,
"avg_score": null,
"num_lines": null
} |
"""A quiz that manages questions."""
import random
from itertools import groupby
from .questions.base import Question, WrongAnswer
from .questions import *
def all_questions():
"""Return a list of all question classes."""
return list(Question.__subclasses__())
class Quiz(object):
"""Manage a session of questions and answers.
The quiz starts off with one questions, which is stored
as current_question. It will move on to the next Question
if the correct answer is supplied, or if a request is made
to move on to the next question.
When all questions have been asked, it loops around and starts
over again.
The quiz keeps track of questions answered in a list of tuples,
where each tuple contains the question, an answer supplied, and
a boolean indicating if the answer was correct. For example:
[(question, 'Answer text', False), (question, 'Answer Text', True)]
"""
def __init__(self):
"""Initialise the quiz."""
self.questions_asked = []
self.all_questions = all_questions()
self.prepare_questions()
def prepare_questions(self):
"""Prepare the questions."""
random.shuffle(self.all_questions)
self.question_iterator = iter(self.all_questions)
self.current_question = next(self.question_iterator)()
def next(self):
"""Move on to the next question and return it."""
try:
self.current_question = next(self.question_iterator)()
except StopIteration:
self.prepare_questions()
return self.current_question
def supply_answer(self, question, answer):
"""Check the answer and move on to the next question if it's correct.
Raises a WrongAnswer exception if the answer is incorrect.
"""
correct_answer = False
try:
correct_answer = question.check_answer(answer)
finally:
self.questions_asked.append(
(self.current_question, answer, correct_answer)
)
self.next()
return correct_answer
def last_answer_was_correct(self):
"""Check if the last answer was correct."""
if self.questions_asked:
return self.questions_asked[-1][-1]
def get_results(self):
"""Get the score and a log of questions and answers."""
questions_answered = {
question.get_question_text(): [i[1:]for i in answer]
for question, answer in groupby(self.questions_asked, key=lambda k: k[0])
}
number_of_questions = len(questions_answered)
number_correct = sum(
2 ** (1 - len(answer)) for answer in questions_answered.values()
if answer[-1][-1]
)
summary = "Total score: {} of {}".format(number_correct, number_of_questions)
results = ""
for question, answers in questions_answered.items():
results += '---\n\nQuestion: {}...\n\n'.format(question.split('\n', 1)[0])
for answer in answers:
results += 'Answer: {}\n'.format(answer[0])
results += 'Correct: {}\n\n'.format(bool(answer[1]))
return "{}\n\n{}".format(summary, results)
| {
"repo_name": "samuelfekete/Pythonometer",
"path": "pythonometer/quiz.py",
"copies": "1",
"size": "3228",
"license": "mit",
"hash": -2059579469150424300,
"line_mean": 34.4725274725,
"line_max": 86,
"alpha_frac": 0.615551425,
"autogenerated": false,
"ratio": 4.281167108753316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5396718533753315,
"avg_score": null,
"num_lines": null
} |
ar= [1,2,3]
ar.append(230)
#print(ar) #do not print ar.append(230) directly, otherwise it will return None
b=[]
b.append([1])
#print(b)
def triangle(n):
#valueN=[] #value of row N
result = []
#col = n
if n==0:
return []
elif n ==1:
return [[1]]
#return [].append([1])
else:
for i in range(n):
k=i+1 #be carefule about the value K
valueN=[]
#print(k,n)
for j in range(k):
if j==0 or j==k-1:
valueN.append(1)
else:
#print('MM')
temp = triangle(k-1)
#print('NN')
#print(temp[k-2][j-1])
#print(temp[k-2][j])
valueN.append(temp[k-2][j-1]+temp[k-2][j])
#print(valueN)# the current value of row K
#print(valueN) # after the Kth loop, get all value of row K
result.append(valueN) # get the final result
#print(result) #after all loop, get the final list
return result
print (triangle(0))
#>>> []
print (triangle(1))
#>>> [[1]]
print (triangle(2))
#>> [[1], [1, 1]]
print (triangle(3))
#>>> [[1], [1, 1], [1, 2, 1]]
print (triangle(6))
#>>> [[1], [1, 1], [1, 2, 1], [1, 3, 3, 1], [1, 4, 6, 4, 1], [1, 5, 10, 10, 5, 1]]
| {
"repo_name": "coodoing/udacity-searchengine",
"path": "course6_Star2.py",
"copies": "1",
"size": "1364",
"license": "apache-2.0",
"hash": 1973946464561018400,
"line_mean": 23.3571428571,
"line_max": 82,
"alpha_frac": 0.4428152493,
"autogenerated": false,
"ratio": 3.0514541387024607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8713806913224871,
"avg_score": 0.05609249495551784,
"num_lines": 56
} |
# AR5 Run -- January 2016
import glob, os, itertools
from downscale import DownscaleAR5
input_path = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/snap_prepped_data'
models = [ d for d in os.listdir(input_path) if os.path.isdir(os.path.join(input_path, d)) ]
variables = ['hur', 'tas', 'clt']
combinations = itertools.product( [input_path], models, variables )
# static args setup
clim_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/cld/akcan'
template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
base_path = '/Data/malindgren/downscale_outputs/AR5'
ncores = 16
for input_path, model, variable in combinations:
nc_list = glob.glob( os.path.join( input_path, model, variable, '*.nc' ) )
# unpack to historical and modeled
historical, = [ nc for nc in nc_list if '_historical' in nc ]
modeled = [ nc for nc in nc_list if '_historical' not in nc ]
historical_modeled = zip( itertools.repeat( historical, len( modeled ) ), modeled )
# run the historical first
args = {}
if variable == 'hur':
def clamp_vals( x ):
''' clamp the values following the relative humidity downscaling '''
x[ (x > 100) & (x < 500) ] = 95
return x
args.update( ar5_historical=historical, base_path=base_path, clim_path=clim_path, \
template_raster_fn=template_raster_fn, ncores=ncores, post_downscale_function=clamp_vals )
else:
args.update( ar5_historical=historical, base_path=base_path, clim_path=clim_path, \
template_raster_fn=template_raster_fn, ncores=ncores )
# run it
print( historical )
down = DownscaleAR5.DownscaleAR5( **args )
output = down.downscale_ar5_ts()
# now loop through the historical_modeled groupings
for historical, modeled in historical_modeled:
print( modeled )
args = {}
if variable == 'hur':
def clamp_vals( x ):
''' clamp the values following the relative humidity downscaling '''
x[ (x > 100) & (x < 500) ] = 95
return x
args.update( ar5_modeled=ar5_modeled, ar5_historical=historical, base_path=base_path, clim_path=clim_path, template_raster_fn=template_raster_fn, ncores=ncores, post_downscale_function=clamp_vals )
else:
args.update( ar5_modeled=ar5_modeled, ar5_historical=historical, base_path=base_path, clim_path=clim_path, template_raster_fn=template_raster_fn, ncores=ncores )
# run it
down = DownscaleAR5.DownscaleAR5( **args )
output = down.downscale_ar5_ts()
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# # move the prepped clouds data into the directory with the hur and tas
# import os, glob, shutil
# input_path = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/prepped/clt_prepped'
# output_path = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/prepped'
# # get the model names in the directory
# models = [ d for d in os.listdir(input_path) if os.path.isdir(os.path.join(input_path, d)) ]
# variable = 'clt'
# for model in models:
# files = glob.glob( os.path.join( input_path, model, variable, '*.nc' ) )
# if not os.path.exists( os.path.join( output_path, model, variable ) ):
# os.makedirs( os.path.join( output_path, model, variable ) )
# _ = [ shutil.copy( fn, os.path.join( output_path, model, variable ) ) for fn in files ]
| {
"repo_name": "ua-snap/downscale",
"path": "snap_scripts/epscor_sc/older_epscor_sc_scripts_archive/run_ar5_snap.py",
"copies": "1",
"size": "3439",
"license": "mit",
"hash": -7968540522036738000,
"line_mean": 33.39,
"line_max": 200,
"alpha_frac": 0.6876999128,
"autogenerated": false,
"ratio": 2.805057096247961,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8812407732184616,
"avg_score": 0.03606985537266892,
"num_lines": 100
} |
"""Arabic module
Features:
=========
* Arabic letters classification
* Text tokenization
* Strip Harakat ( all, except Shadda, tatweel, last_haraka)
* Sperate and join Letters and Harakat
* Reduce tashkeel
* Mesure tashkeel similarity ( Harakats, fully or partially vocalized, similarity with a template)
* Letters normalization ( Ligatures and Hamza)
Includes code written by 'Arabtechies', 'Arabeyes', 'Taha Zerrouki'.
.. todo::
Remove, rewrite, and/or refactor this due to GPL.
"""
import re
from cltk.phonology.arb.utils.pyarabic import stack
__author__ = ["Taha Zerrouki taha.zerrouki@gmail.com"]
__license__ = "GPL"
COMMA = "\u060C"
SEMICOLON = "\u061B"
QUESTION = "\u061F"
HAMZA = "\u0621"
ALEF_MADDA = "\u0622"
ALEF_HAMZA_ABOVE = "\u0623"
WAW_HAMZA = "\u0624"
ALEF_HAMZA_BELOW = "\u0625"
YEH_HAMZA = "\u0626"
ALEF = "\u0627"
BEH = "\u0628"
TEH_MARBUTA = "\u0629"
TEH = "\u062a"
THEH = "\u062b"
JEEM = "\u062c"
HAH = "\u062d"
KHAH = "\u062e"
DAL = "\u062f"
THAL = "\u0630"
REH = "\u0631"
ZAIN = "\u0632"
SEEN = "\u0633"
SHEEN = "\u0634"
SAD = "\u0635"
DAD = "\u0636"
TAH = "\u0637"
ZAH = "\u0638"
AIN = "\u0639"
GHAIN = "\u063a"
TATWEEL = "\u0640"
FEH = "\u0641"
QAF = "\u0642"
KAF = "\u0643"
LAM = "\u0644"
MEEM = "\u0645"
NOON = "\u0646"
HEH = "\u0647"
WAW = "\u0648"
ALEF_MAKSURA = "\u0649"
YEH = "\u064a"
MADDA_ABOVE = "\u0653"
HAMZA_ABOVE = "\u0654"
HAMZA_BELOW = "\u0655"
ZERO = "\u0660"
ONE = "\u0661"
TWO = "\u0662"
THREE = "\u0663"
FOUR = "\u0664"
FIVE = "\u0665"
SIX = "\u0666"
SEVEN = "\u0667"
EIGHT = "\u0668"
NINE = "\u0669"
PERCENT = "\u066a"
DECIMAL = "\u066b"
THOUSANDS = "\u066c"
STAR = "\u066d"
MINI_ALEF = "\u0670"
ALEF_WASLA = "\u0671"
FULL_STOP = "\u06d4"
BYTE_ORDER_MARK = "\ufeff"
# Diacritics
FATHATAN = "\u064b"
DAMMATAN = "\u064c"
KASRATAN = "\u064d"
FATHA = "\u064e"
DAMMA = "\u064f"
KASRA = "\u0650"
SHADDA = "\u0651"
SUKUN = "\u0652"
# Small Letters
SMALL_ALEF = "\u0670"
SMALL_WAW = "\u06E5"
SMALL_YEH = "\u06E6"
# Ligatures
LAM_ALEF = "\ufefb"
LAM_ALEF_HAMZA_ABOVE = "\ufef7"
LAM_ALEF_HAMZA_BELOW = "\ufef9"
LAM_ALEF_MADDA_ABOVE = "\ufef5"
SIMPLE_LAM_ALEF = "\u0644\u0627"
SIMPLE_LAM_ALEF_HAMZA_ABOVE = "\u0644\u0623"
SIMPLE_LAM_ALEF_HAMZA_BELOW = "\u0644\u0625"
SIMPLE_LAM_ALEF_MADDA_ABOVE = "\u0644\u0622"
# groups
LETTERS = (
ALEF,
BEH,
TEH,
TEH_MARBUTA,
THEH,
JEEM,
HAH,
KHAH,
DAL,
THAL,
REH,
ZAIN,
SEEN,
SHEEN,
SAD,
DAD,
TAH,
ZAH,
AIN,
GHAIN,
FEH,
QAF,
KAF,
LAM,
MEEM,
NOON,
HEH,
WAW,
YEH,
HAMZA,
ALEF_MADDA,
ALEF_HAMZA_ABOVE,
WAW_HAMZA,
ALEF_HAMZA_BELOW,
YEH_HAMZA,
)
TASHKEEL = (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN, SHADDA)
HARAKAT = (FATHATAN, DAMMATAN, KASRATAN, FATHA, DAMMA, KASRA, SUKUN)
SHORTHARAKAT = (FATHA, DAMMA, KASRA, SUKUN)
TANWIN = (FATHATAN, DAMMATAN, KASRATAN)
NOT_DEF_HARAKA = TATWEEL
LIGUATURES = (
LAM_ALEF,
LAM_ALEF_HAMZA_ABOVE,
LAM_ALEF_HAMZA_BELOW,
LAM_ALEF_MADDA_ABOVE,
)
HAMZAT = (
HAMZA,
WAW_HAMZA,
YEH_HAMZA,
HAMZA_ABOVE,
HAMZA_BELOW,
ALEF_HAMZA_BELOW,
ALEF_HAMZA_ABOVE,
)
ALEFAT = (
ALEF,
ALEF_MADDA,
ALEF_HAMZA_ABOVE,
ALEF_HAMZA_BELOW,
ALEF_WASLA,
ALEF_MAKSURA,
SMALL_ALEF,
)
WEAK = (ALEF, WAW, YEH, ALEF_MAKSURA)
YEHLIKE = (YEH, YEH_HAMZA, ALEF_MAKSURA, SMALL_YEH)
WAWLIKE = (WAW, WAW_HAMZA, SMALL_WAW)
TEHLIKE = (TEH, TEH_MARBUTA)
SMALL = (SMALL_ALEF, SMALL_WAW, SMALL_YEH)
MOON = (
HAMZA,
ALEF_MADDA,
ALEF_HAMZA_ABOVE,
ALEF_HAMZA_BELOW,
ALEF,
BEH,
JEEM,
HAH,
KHAH,
AIN,
GHAIN,
FEH,
QAF,
KAF,
MEEM,
HEH,
WAW,
YEH,
)
SUN = (TEH, THEH, DAL, THAL, REH, ZAIN, SEEN, SHEEN, SAD, DAD, TAH, ZAH, LAM, NOON)
ALPHABETIC_ORDER = {
ALEF: 1,
BEH: 2,
TEH: 3,
TEH_MARBUTA: 3,
THEH: 4,
JEEM: 5,
HAH: 6,
KHAH: 7,
DAL: 8,
THAL: 9,
REH: 10,
ZAIN: 11,
SEEN: 12,
SHEEN: 13,
SAD: 14,
DAD: 15,
TAH: 16,
ZAH: 17,
AIN: 18,
GHAIN: 19,
FEH: 20,
QAF: 21,
KAF: 22,
LAM: 23,
MEEM: 24,
NOON: 25,
HEH: 26,
WAW: 27,
YEH: 28,
HAMZA: 29,
ALEF_MADDA: 29,
ALEF_HAMZA_ABOVE: 29,
WAW_HAMZA: 29,
ALEF_HAMZA_BELOW: 29,
YEH_HAMZA: 29,
}
NAMES = {
ALEF: "ألف",
BEH: "باء",
TEH: "تاء",
TEH_MARBUTA: "تاء مربوطة",
THEH: "ثاء",
JEEM: "جيم",
HAH: "حاء",
KHAH: "خاء",
DAL: "دال",
THAL: "ذال",
REH: "راء",
ZAIN: "زاي",
SEEN: "سين",
SHEEN: "شين",
SAD: "صاد",
DAD: "ضاد",
TAH: "طاء",
ZAH: "ظاء",
AIN: "عين",
GHAIN: "غين",
FEH: "فاء",
QAF: "قاف",
KAF: "كاف",
LAM: "لام",
MEEM: "ميم",
NOON: "نون",
HEH: "هاء",
WAW: "واو",
YEH: "ياء",
HAMZA: "همزة",
TATWEEL: "تطويل",
ALEF_MADDA: "ألف ممدودة",
ALEF_MAKSURA: "ألف مقصورة",
ALEF_HAMZA_ABOVE: "همزة على الألف",
WAW_HAMZA: "همزة على الواو",
ALEF_HAMZA_BELOW: "همزة تحت الألف",
YEH_HAMZA: "همزة على الياء",
FATHATAN: "فتحتان",
DAMMATAN: "ضمتان",
KASRATAN: "كسرتان",
FATHA: "فتحة",
DAMMA: "ضمة",
KASRA: "كسرة",
SHADDA: "شدة",
SUKUN: "سكون",
}
# regular expretion
HARAKAT_PATTERN = re.compile(u"[" + u"".join(HARAKAT) + u"]", re.UNICODE)
# ~ """ pattern to strip Harakat"""
LASTHARAKA_PATTERN = re.compile(
u"[%s]$|[%s]" % (u"".join(HARAKAT), u"".join(TANWIN)), re.UNICODE
)
# ~ """ Pattern to strip only the last haraka """
SHORTHARAKAT_PATTERN = re.compile(u"[" + u"".join(SHORTHARAKAT) + u"]", re.UNICODE)
# ~ Pattern to lookup Short Harakat(Fatha, Damma, Kasra, sukun, tanwin),
# but not shadda
TASHKEEL_PATTERN = re.compile(u"[" + u"".join(TASHKEEL) + u"]", re.UNICODE)
# ~ """ Harakat and shadda pattern """
HAMZAT_PATTERN = re.compile(u"[" + u"".join(HAMZAT) + u"]", re.UNICODE)
# ~ """ all hamzat pattern"""
ALEFAT_PATTERN = re.compile(u"[" + u"".join(ALEFAT) + u"]", re.UNICODE)
# ~ """ all alef like letters """
LIGUATURES_PATTERN = re.compile(u"[" + u"".join(LIGUATURES) + u"]", re.UNICODE)
# ~ """ all liguatures pattern """
TOKEN_PATTERN = re.compile(u"([\w%s]+)" % u"".join(TASHKEEL), re.UNICODE)
TOKEN_REPLACE = re.compile("\t|\r|\f|\v| ")
# ~ """ pattern to tokenize a text"""
################################################
# { is letter functions
################################################
def is_sukun(archar):
"""Checks for Arabic Sukun Mark.
@param archar: arabic unicode char
@type archar: unicode
@return:
@rtype:Boolean
"""
return archar == SUKUN
def is_shadda(archar):
"""Checks for Arabic Shadda Mark.
@param archar: arabic unicode char
@type archar: unicode
@return:
@rtype:Boolean
"""
return archar == SHADDA
def is_tatweel(archar):
"""Checks for Arabic Tatweel letter modifier.
@param archar: arabic unicode char
@type archar: unicode
@return:
@rtype:Boolean
"""
return archar == TATWEEL
def is_tanwin(archar):
"""Checks for Arabic Tanwin Marks (FATHATAN, DAMMATAN, KASRATAN).
@param archar: arabic unicode char
@type archar: unicode
@return:
@rtype:Boolean
"""
return archar in TANWIN
def is_tashkeel(archar):
"""Checks for Arabic Tashkeel Marks:
- FATHA, DAMMA, KASRA, SUKUN,
- SHADDA,
- FATHATAN, DAMMATAN, KASRATAN.
@param archar: arabic unicode char
@type archar: unicode
@return:
@rtype:Boolean
"""
return archar in TASHKEEL
def is_haraka(archar):
"""Checks for Arabic Harakat Marks (FATHA, DAMMA, KASRA, SUKUN, TANWIN).
@param archar: arabic unicode char
@type archar: unicode
@return:
@rtype:Boolean
"""
return archar in HARAKAT
def is_shortharaka(archar):
"""Checks for Arabic short Harakat Marks (FATHA, DAMMA, KASRA, SUKUN).
@param archar: arabic unicode char
@type archar: unicode
@return:
@rtype:Boolean
"""
return archar in SHORTHARAKAT
def is_ligature(archar):
"""Checks for Arabic Ligatures like LamAlef.
(LAM_ALEF, LAM_ALEF_HAMZA_ABOVE, LAM_ALEF_HAMZA_BELOW, LAM_ALEF_MADDA_ABOVE)
@param archar: arabic unicode char
@type archar: unicode
@return:
@rtype:Boolean
"""
return archar in LIGUATURES
def is_hamza(archar):
"""Checks for Arabic Hamza forms.
HAMZAT are (HAMZA, WAW_HAMZA, YEH_HAMZA, HAMZA_ABOVE, HAMZA_BELOW, ALEF_HAMZA_BELOW, ALEF_HAMZA_ABOVE )
@param archar: arabic unicode char
@type archar: unicode
@return:
@rtype:Boolean
"""
return archar in HAMZAT
def is_alef(archar):
"""Checks for Arabic Alef forms.
ALEFAT = (ALEF, ALEF_MADDA, ALEF_HAMZA_ABOVE, ALEF_HAMZA_BELOW, ALEF_WASLA, ALEF_MAKSURA )
@param archar: arabic unicode char
@type archar: unicode
@return:
@rtype:Boolean
"""
return archar in ALEFAT
def is_yehlike(archar):
"""Checks for Arabic Yeh forms.
Yeh forms : YEH, YEH_HAMZA, SMALL_YEH, ALEF_MAKSURA
@param archar: arabic unicode char
@type archar: unicode
@return:
@rtype:Boolean
"""
return archar in YEHLIKE
def is_wawlike(archar):
"""Checks for Arabic Waw like forms.
Waw forms : WAW, WAW_HAMZA, SMALL_WAW
@param archar: arabic unicode char
@type archar: unicode
@return:
@rtype:Boolean
"""
return archar in WAWLIKE
def is_teh(archar):
"""Checks for Arabic Teh forms.
Teh forms : TEH, TEH_MARBUTA
@param archar: arabic unicode char
@type archar: unicode
@return:
@rtype:Boolean
"""
return archar in TEHLIKE
def is_small(archar):
"""Checks for Arabic Small letters.
SMALL Letters : SMALL ALEF, SMALL WAW, SMALL YEH
@param archar: arabic unicode char
@type archar: unicode
@return:
@rtype:Boolean
"""
return archar in SMALL
def is_weak(archar):
"""Checks for Arabic Weak letters.
Weak Letters : ALEF, WAW, YEH, ALEF_MAKSURA
@param archar: arabic unicode char
@type archar: unicode
@return:
@rtype:Boolean
"""
return archar in WEAK
def is_moon(archar):
"""Checks for Arabic Moon letters.
Moon Letters :
@param archar: arabic unicode char
@type archar: unicode
@return:
@rtype:Boolean
"""
return archar in MOON
def is_sun(archar):
"""Checks for Arabic Sun letters.
Moon Letters :
@param archar: arabic unicode char
@type archar: unicode
@return:
@rtype:Boolean
"""
return archar in SUN
#####################################
# { general letter functions
#####################################
def order(archar):
"""return Arabic letter order between 1 and 29.
Alef order is 1, Yeh is 28, Hamza is 29.
Teh Marbuta has the same ordre with Teh, 3.
@param archar: arabic unicode char
@type archar: unicode
@return: arabic order.
@rtype: integer
"""
return ALPHABETIC_ORDER.get(archar, 0)
def name(archar):
"""return Arabic letter name in arabic. Alef order is 1, Yeh is 28,
Hamza is 29. Teh Marbuta has the same ordre with Teh, 3.
@param archar: arabic unicode char
@type archar: unicode
@return: arabic name.
@rtype: unicode
"""
return NAMES.get(archar, u"")
def arabicrange():
u"""return a list of arabic characteres .
Return a list of characteres between \u060c to \u0652
@return: list of arabic characteres.
@rtype: unicode
"""
mylist = []
for i in range(0x0600, 0x00653):
try:
mylist.append(unichr(i))
except NameError:
# python 3 compatible
mylist.append(chr(i))
except ValueError:
pass
return mylist
#####################################
# { Has letter functions
#####################################
def has_shadda(word):
"""Checks if the arabic word contains shadda.
@param word: arabic unicode char
@type word: unicode
@return: if shadda exists
@rtype:Boolean
"""
if re.search(SHADDA, word):
return True
return False
#####################################
# { word and text functions
#####################################
def is_vocalized(word):
"""Checks if the arabic word is vocalized.
the word musn't have any spaces and pounctuations.
@param word: arabic unicode char
@type word: unicode
@return: if the word is vocalized
@rtype:Boolean
"""
if word.isalpha():
return False
for char in word:
if is_tashkeel(char):
return True
else:
return False
def is_vocalizedtext(text):
"""Checks if the arabic text is vocalized.
The text can contain many words and spaces
@param text: arabic unicode char
@type text: unicode
@return: if the word is vocalized
@rtype:Boolean
"""
if re.search(HARAKAT_PATTERN, text):
return True
else:
return False
def is_arabicstring(text):
"""Checks for an Arabic standard Unicode block characters
An arabic string can contain spaces, digits and pounctuation.
but only arabic standard characters, not extended arabic
@param text: input text
@type text: unicode
@return: True if all charaters are in Arabic block
@rtype: Boolean
"""
if re.search(
u"([^\u0600-\u0652%s%s%s\s\d])"
% (LAM_ALEF, LAM_ALEF_HAMZA_ABOVE, LAM_ALEF_MADDA_ABOVE),
text,
):
return False
return True
def is_arabicrange(text):
"""Checks for an Arabic Unicode block characters
@param text: input text
@type text: unicode
@return: True if all charaters are in Arabic block
@rtype: Boolean
"""
if re.search(u"([^\u0600-\u06ff\ufb50-\ufdff\ufe70-\ufeff\u0750-\u077f])", text):
return False
return True
def is_arabicword(word):
"""Checks for an valid Arabic word.
An Arabic word not contains spaces, digits and pounctuation
avoid some spelling error, TEH_MARBUTA must be at the end.
@param word: input word
@type word: unicode
@return: True if all charaters are in Arabic block
@rtype: Boolean
"""
if len(word) == 0:
return False
elif re.search(
u"([^\u0600-\u0652%s%s%s])"
% (LAM_ALEF, LAM_ALEF_HAMZA_ABOVE, LAM_ALEF_MADDA_ABOVE),
word,
):
return False
elif is_haraka(word[0]) or word[0] in (WAW_HAMZA, YEH_HAMZA):
return False
# if Teh Marbuta or Alef_Maksura not in the end
elif re.match(u"^(.)*[%s](.)+$" % ALEF_MAKSURA, word):
return False
elif re.match(
u"^(.)*[%s]([^%s%s%s])(.)+$" % (TEH_MARBUTA, DAMMA, KASRA, FATHA), word
):
return False
else:
return True
#####################################
# {Char functions
#####################################
def first_char(word):
"""
Return the first char
@param word: given word
@type word: unicode
@return: the first char
@rtype: unicode char
"""
return word[0]
def second_char(word):
"""
Return the second char
@param word: given word
@type word: unicode
@return: the first char
@rtype: unicode char
"""
return word[1:2]
def last_char(word):
"""
Return the last letter
example: zerrouki; 'i' is the last.
@param word: given word
@type word: unicode
@return: the last letter
@rtype: unicode char
"""
return word[-1:]
def secondlast_char(word):
"""
Return the second last letter example: zerrouki; 'k' is the second last.
@param word: given word
@type word: unicode
@return: the second last letter
@rtype: unicode char
"""
return word[-2:-1]
#####################################
# {Strip functions
#####################################
def strip_harakat(text):
"""Strip Harakat from arabic word except Shadda.
The striped marks are :
- FATHA, DAMMA, KASRA
- SUKUN
- FATHATAN, DAMMATAN, KASRATAN,
@param text: arabic text.
@type text: unicode.
@return: return a striped text.
@rtype: unicode.
"""
# if text:
# return re.sub(HARAKAT_PATTERN, u'', text)
# return text
if not text:
return text
elif is_vocalized(text):
for char in HARAKAT:
text = text.replace(char, "")
return text
def strip_lastharaka(text):
"""Strip the last Haraka from arabic word except Shadda.
The striped marks are:
- FATHA, DAMMA, KASRA
- SUKUN
- FATHATAN, DAMMATAN, KASRATAN
@param text: arabic text.
@type text: unicode.
@return: return a striped text.
@rtype: unicode.
"""
if text:
if is_vocalized(text):
return re.sub(LASTHARAKA_PATTERN, u"", text)
return text
def strip_tashkeel(text):
"""Strip vowels from a text, include Shadda.
The striped marks are:
- FATHA, DAMMA, KASRA
- SUKUN
- SHADDA
- FATHATAN, DAMMATAN, KASRATAN
@param text: arabic text.
@type text: unicode.
@return: return a striped text.
@rtype: unicode.
"""
if not text:
return text
elif is_vocalized(text):
for char in TASHKEEL:
text = text.replace(char, "")
return text
def strip_tatweel(text):
"""
Strip tatweel from a text and return a result text.
@param text: arabic text.
@type text: unicode.
@return: return a striped text.
@rtype: unicode.
"""
return text.replace(TATWEEL, "")
def strip_shadda(text):
"""
Strip Shadda from a text and return a result text.
@param text: arabic text.
@type text: unicode.
@return: return a striped text.
@rtype: unicode.
"""
return text.replace(SHADDA, "")
def normalize_ligature(text):
"""Normalize Lam Alef ligatures into two letters (LAM and ALEF),
and Tand return a result text.
Some systems present lamAlef ligature as a single letter,
this function convert it into two letters,
The converted letters into LAM and ALEF are: LAM_ALEF, LAM_ALEF_HAMZA_ABOVE, LAM_ALEF_HAMZA_BELOW, LAM_ALEF_MADDA_ABOVE
@param text: arabic text.
@type text: unicode.
@return: return a converted text.
@rtype: unicode.
"""
if text:
return LIGUATURES_PATTERN.sub("%s%s" % (LAM, ALEF), text)
return text
def normalize_hamza(word):
"""Standardize the Hamzat into one form of hamza,
replace Madda by hamza and alef.
Replace the LamAlefs by simplified letters.
@param word: arabic text.
@type word: unicode.
@return: return a converted text.
@rtype: unicode.
"""
if word.startswith(ALEF_MADDA):
if (
len(word) >= 3
and (word[1] not in HARAKAT)
and (word[2] == SHADDA or len(word) == 3)
):
word = HAMZA + ALEF + word[1:]
else:
word = HAMZA + HAMZA + word[1:]
# convert all Hamza from into one form
word = word.replace(ALEF_MADDA, HAMZA + HAMZA)
word = HAMZAT_PATTERN.sub(HAMZA, word)
return word
def separate(word, extract_shadda=False):
"""
separate the letters from the vowels, in arabic word,
if a letter hasn't a haraka, the not definited haraka is attributed.
return ( letters, vowels)
@param word: the input word
@type word: unicode
@param extract_shadda: extract shadda as seperate text
@type extract_shadda: Boolean
@return: ( letters, vowels)
@rtype:couple of unicode
"""
stack1 = stack.Stack(word)
# the word is inversed in the stack
stack1.items.reverse()
letters = stack.Stack()
marks = stack.Stack()
vowels = HARAKAT
last1 = stack1.pop()
# if the last element must be a letter,
# the arabic word can't starts with a haraka
# in th stack the word is inversed
while last1 in vowels:
last1 = stack1.pop()
while last1 != None:
if last1 in vowels:
# we can't have two harakats beside.
# the shadda is considered as a letter
marks.pop()
marks.push(last1)
elif last1 == SHADDA:
# is the element is a Shadda,
# the previous letter must have a sukun as mark,
# and the shadda take the indefinate mark
marks.pop()
marks.push(SUKUN)
marks.push(NOT_DEF_HARAKA)
letters.push(SHADDA)
else:
marks.push(NOT_DEF_HARAKA)
letters.push(last1)
last1 = stack1.pop()
if extract_shadda:
# the shadda is considered as letter
wordletters = u"".join(letters.items)
# print wordletters.encode('utf8')
shaddaplaces = re.sub(u"[^%s]" % SHADDA, TATWEEL, wordletters)
shaddaplaces = re.sub(u"%s%s" % (TATWEEL, SHADDA), SHADDA, shaddaplaces)
# print wordletters.encode('utf8')
wordletters = strip_shadda(wordletters)
# print wordletters.encode('utf8')
return (wordletters, u"".join(marks.items), shaddaplaces)
else:
return (u"".join(letters.items), u"".join(marks.items))
def joint(letters, marks):
"""joint the letters with the marks
the length ot letters and marks must be equal
return word
@param letters: the word letters
@type letters: unicode
@param marks: the word marks
@type marks: unicode
@return: word
@rtype: unicode
"""
# The length ot letters and marks must be equal
if len(letters) != len(marks):
return ""
stack_letter = stack.Stack(letters)
stack_letter.items.reverse()
stack_mark = stack.Stack(marks)
stack_mark.items.reverse()
word_stack = stack.Stack()
last_letter = stack_letter.pop()
last_mark = stack_mark.pop()
vowels = HARAKAT
while last_letter != None and last_mark != None:
if last_letter == SHADDA:
top = word_stack.pop()
if top not in vowels:
word_stack.push(top)
word_stack.push(last_letter)
if last_mark != NOT_DEF_HARAKA:
word_stack.push(last_mark)
else:
word_stack.push(last_letter)
if last_mark != NOT_DEF_HARAKA:
word_stack.push(last_mark)
last_letter = stack_letter.pop()
last_mark = stack_mark.pop()
if not (stack_letter.is_empty() and stack_mark.is_empty()):
return False
else:
return "".join(word_stack.items)
def vocalizedlike(word1, word2):
"""
if the two words has the same letters and the same harakats, this fuction return True.
The two words can be full vocalized, or partial vocalized
@param word1: first word
@type word1: unicode
@param word2: second word
@type word2: unicode
@return: if two words have similar vocalization
@rtype: Boolean
"""
if vocalized_similarity(word1, word2) < 0:
return False
else:
return True
# -------------------------
# Function def vaznlike(word1, wazn):
# -------------------------
def waznlike(word1, wazn):
"""If the word1 is like a wazn (pattern),
the letters must be equal,
the wazn has FEH, AIN, LAM letters.
this are as generic letters.
The two words can be full vocalized, or partial vocalized
@param word1: input word
@type word1: unicode
@param wazn: given word template وزن
@type wazn: unicode
@return: if two words have similar vocalization
@rtype: Boolean
"""
stack1 = stack.Stack(word1)
stack2 = stack.Stack(wazn)
root = stack.Stack()
last1 = stack1.pop()
last2 = stack2.pop()
vowels = HARAKAT
while last1 != None and last2 != None:
if last1 == last2 and last2 not in (FEH, AIN, LAM):
last1 = stack1.pop()
last2 = stack2.pop()
elif last1 not in vowels and last2 in (FEH, AIN, LAM):
root.push(last1)
# ~ print "t"
last1 = stack1.pop()
last2 = stack2.pop()
elif last1 in vowels and last2 not in vowels:
last1 = stack1.pop()
elif last1 not in vowels and last2 in vowels:
last2 = stack2.pop()
else:
break
# reverse the root letters
root.items.reverse()
# ~ print " the root is ", root.items#"".join(root.items)
if not (stack1.is_empty() and stack2.is_empty()):
return False
else:
return True
def shaddalike(partial, fully):
"""
If the two words has the same letters and the same harakats, this fuction return True.
The first word is partially vocalized, the second is fully
if the partially contians a shadda, it must be at the same place in the fully
@param partial: the partially vocalized word
@type partial: unicode
@param fully: the fully vocalized word
@type fully: unicode
@return: if contains shadda
@rtype: Boolean
"""
# المدخل ليس به شدة، لا داعي للبحث
if not has_shadda(partial):
return True
# المدخل به شدة، والنتيجة ليس بها شدة، خاطئ
elif not has_shadda(fully) and has_shadda(partial):
return False
# المدخل والمخرج بهما شدة، نتأكد من موقعهما
partial = strip_harakat(partial)
fully = strip_harakat(fully)
pstack = stack.Stack(partial)
vstack = stack.Stack(fully)
plast = pstack.pop()
vlast = vstack.pop()
# if debug: print "+0", Pstack, Vstack
while plast != None and vlast != None:
if plast == vlast:
plast = pstack.pop()
vlast = vstack.pop()
elif plast == SHADDA and vlast != SHADDA:
# if debug: print "+2", Pstack.items, Plast, Vstack.items, Vlast
break
elif plast != SHADDA and vlast == SHADDA:
# if debug: print "+2", Pstack.items, Plast, Vstack.items, Vlast
vlast = vstack.pop()
else:
# if debug: print "+2", Pstack.items, Plast, Vstack.items, Vlast
break
if not (pstack.is_empty() and vstack.is_empty()):
return False
else:
return True
def reduce_tashkeel(text):
"""Reduce the Tashkeel, by deleting evident cases.
@param text: the input text fully vocalized.
@type text: unicode.
@return : partially vocalized text.
@rtype: unicode.
"""
patterns = [
# delete all fathat, except on waw and yeh
u"(?<!(%s|%s))(%s|%s)" % (WAW, YEH, SUKUN, FATHA),
# delete damma if followed by waw.
u"%s(?=%s)" % (DAMMA, WAW),
# delete kasra if followed by yeh.
u"%s(?=%s)" % (KASRA, YEH),
# delete fatha if followed by alef to reduce yeh maftouha
# and waw maftouha before alef.
u"%s(?=%s)" % (FATHA, ALEF),
# delete fatha from yeh and waw if they are in the word begining.
u"(?<=\s(%s|%s))%s" % (WAW, YEH, FATHA),
# delete kasra if preceded by Hamza below alef.
u"(?<=%s)%s" % (ALEF_HAMZA_BELOW, KASRA),
]
reduced = text
for pat in patterns:
reduced = re.sub(pat, "", reduced)
return reduced
def vocalized_similarity(word1, word2):
"""
if the two words has the same letters and the same harakats, this function return True.
The two words can be full vocalized, or partial vocalized
@param word1: first word
@type word1: unicode
@param word2: second word
@type word2: unicode
@return: return if words are similar, else return negative number of errors
@rtype: Boolean / int
"""
stack1 = stack.Stack(word1)
stack2 = stack.Stack(word2)
last1 = stack1.pop()
last2 = stack2.pop()
err_count = 0
vowels = HARAKAT
while last1 != None and last2 != None:
if last1 == last2:
last1 = stack1.pop()
last2 = stack2.pop()
elif last1 in vowels and last2 not in vowels:
last1 = stack1.pop()
elif last1 not in vowels and last2 in vowels:
last2 = stack2.pop()
else:
# break
if last1 == SHADDA:
last1 = stack1.pop()
elif last2 == SHADDA:
last2 = stack2.pop()
else:
last1 = stack1.pop()
last2 = stack2.pop()
err_count += 1
if err_count > 0:
return -err_count
else:
return True
def tokenize(text=""):
"""
Tokenize text into words.
@param text: the input text.
@type text: unicode.
@return: list of words.
@rtype: list.
"""
if text == "":
return []
else:
# split tokens
mylist = TOKEN_PATTERN.split(text)
# don't remove newline \n
mylist = [TOKEN_REPLACE.sub("", x) for x in mylist if x]
# remove empty substring
mylist = [x for x in mylist if x]
return mylist
if __name__ == "__main__":
# ~WORDS = [u'الْدَرَاجَةُ', u'الدّرّاجة',
# ~u'سّلّامْ', ]
# ~for wrd in WORDS:
# ~l, m, s = separate(wrd, True)
# ~l = joint(l, s)
# ~print u'\t'.join([wrd, l, m, s]).encode('utf8')
# ~newword = joint(l, m)
# ~assert (newword != wrd)
print("like: ", vocalizedlike("مُتَوَهِّمًا", "متوهمًا"))
print("sim: ", vocalized_similarity("ثمّ", "ثُمَّ"))
print("like: ", vocalizedlike("ثمّ", "ثُمَّ"))
print("sim: ", vocalized_similarity("ثم", "ثُمَّ"))
print("like: ", vocalizedlike("ثم", "ثُمَّ"))
print("sim: ", vocalized_similarity("مُتَوَهِّمًا", "متوهمًا"))
print("sim: ", vocalized_similarity("مُتَوَهِّمًا", "متوهمًا"))
| {
"repo_name": "kylepjohnson/cltk",
"path": "src/cltk/phonology/arb/utils/pyarabic/araby.py",
"copies": "4",
"size": "29954",
"license": "mit",
"hash": -4748158924553462000,
"line_mean": 24.2677502139,
"line_max": 124,
"alpha_frac": 0.5890039949,
"autogenerated": false,
"ratio": 2.9127304999506953,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00037969052217857687,
"num_lines": 1169
} |
"""Arabic transliteration, Roman <-> Arabic Unicode. This implementation is based on the following resources:
1. http://languagelog.ldc.upenn.edu/myl/ldc/morph/buckwalter.html.
2. https://github.com/Alfanous-team/alfanous/blob/master/src/alfanous/Romanization.py
3. https://en.wikipedia.org/wiki/ArabTeX
"""
__author__ = ["Lakhdar Benzahia <lakhdar.benzahia@gmail.com>"]
__license__ = "MIT License. See LICENSE."
__reviewers__ = [
"Taha Zerrouki taha.zerrouki@gmail.com",
"Kyle P. Johnson <kyle@kyle-p-johnson.com>",
]
BUCKWALTER_TO_UNICODE = {
"'": "\u0621", # hamza-on-the-line
"|": "\u0622", # madda
">": "\u0623", # hamza-on-'alif
"&": "\u0624", # hamza-on-waaw
"<": "\u0625", # hamza-under-'alif
"}": "\u0626", # hamza-on-yaa'
"A": "\u0627", # bare 'alif
"b": "\u0628", # baa'
"p": "\u0629", # taa' marbuuTa
"t": "\u062A", # taa'
"v": "\u062B", # thaa'
"j": "\u062C", # jiim
"H": "\u062D", # Haa'
"x": "\u062E", # khaa'
"d": "\u062F", # daal
"*": "\u0630", # dhaal
"r": "\u0631", # raa'
"z": "\u0632", # zaay
"s": "\u0633", # siin
"$": "\u0634", # shiin
"S": "\u0635", # Saad
"D": "\u0636", # Daad
"T": "\u0637", # Taa'
"Z": "\u0638", # Zaa' (DHaa')
"E": "\u0639", # cayn
"g": "\u063A", # ghayn
"_": "\u0640", # taTwiil
"f": "\u0641", # faa'
"q": "\u0642", # qaaf
"k": "\u0643", # kaaf
"l": "\u0644", # laam
"m": "\u0645", # miim
"n": "\u0646", # nuun
"h": "\u0647", # haa'
"w": "\u0648", # waaw
"Y": "\u0649", # 'alif maqSuura
"y": "\u064A", # yaa'
"F": "\u064B", # fatHatayn
"N": "\u064C", # Dammatayn
"K": "\u064D", # kasratayn
"a": "\u064E", # fatHa
"u": "\u064F", # Damma
"i": "\u0650", # kasra
"~": "\u0651", # shaddah
"o": "\u0652", # sukuun
"`": "\u0670", # dagger 'alif
"{": "\u0671", # waSla
# extended here
"^": "\u0653", # Maddah
"#": "\u0654", # HamzaAbove
":": "\u06DC", # SmallHighSeen
"@": "\u06DF", # SmallHighRoundedZero
'"': "\u06E0", # SmallHighUprightRectangularZero
"[": "\u06E2", # SmallHighMeemIsolatedForm
";": "\u06E3", # SmallLowSeen
",": "\u06E5", # SmallWaw
".": "\u06E6", # SmallYa
"!": "\u06E8", # SmallHighNoon
"-": "\u06EA", # EmptyCentreLowStop
"+": "\u06EB", # EmptyCentreHighStop
"%": "\u06EC", # RoundedHighStopWithFilledCentre
"]": "\u06ED", #
}
ISO2332_TO_UNICODE = {
"ˌ": "\u0621", # hamza-on-the-line
# "|": "\u0622", # madda
"ˈ": "\u0623", # hamza-on-'alif
"ˈ": "\u0624", # hamza-on-waaw
# "<": "\u0625", # hamza-under-'alif
"ˈ": "\u0626", # hamza-on-yaa'
"ʾ": "\u0627", # bare 'alif
"b": "\u0628", # baa'
"ẗ": "\u0629", # taa' marbuuTa
"t": "\u062A", # taa'
"ṯ": "\u062B", # thaa'
"ǧ": "\u062C", # jiim
"ḥ": "\u062D", # Haa'
"ẖ": "\u062E", # khaa'
"d": "\u062F", # daal
"ḏ": "\u0630", # dhaal
"r": "\u0631", # raa'
"z": "\u0632", # zaay
"s": "\u0633", # siin
"š": "\u0634", # shiin
"ṣ": "\u0635", # Saad
"ḍ": "\u0636", # Daad
"ṭ": "\u0637", # Taa'
"ẓ": "\u0638", # Zaa' (DHaa')
"ʿ": "\u0639", # cayn
"ġ": "\u063A", # ghayn
# "_": "\u0640", # taTwiil
"f": "\u0641", # faa'
"q": "\u0642", # qaaf
"k": "\u0643", # kaaf
"l": "\u0644", # laam
"m": "\u0645", # miim
"n": "\u0646", # nuun
"h": "\u0647", # haa'
"w": "\u0648", # waaw
"ỳ": "\u0649", # 'alif maqSuura
"y": "\u064A", # yaa'
"á": "\u064B", # fatHatayn
"ú": "\u064C", # Dammatayn
"í": "\u064D", # kasratayn
"a": "\u064E", # fatHa
"u": "\u064F", # Damma
"i": "\u0650", # kasra
# "~": "\u0651", # shaddah
"°": "\u0652", # sukuun
# "`": "\u0670", # dagger 'alif
# "{": "\u0671", # waSla
##extended here
# "^": "\u0653", # Maddah
# "#": "\u0654", # HamzaAbove
# ":": "\u06DC", # SmallHighSeen
# "@": "\u06DF", # SmallHighRoundedZero
# "\": "\u06E0", # SmallHighUprightRectangularZero
# "[": "\u06E2", # SmallHighMeemIsolatedForm
# ";": "\u06E3", # SmallLowSeen
# ",": "\u06E5", # SmallWaw
# ".": "\u06E6", # SmallYa
# "!": "\u06E8", # SmallHighNoon
# "-": "\u06EA", # EmptyCentreLowStop
# "+": "\u06EB", # EmptyCentreHighStop
# "%": "\u06EC", # RoundedHighStopWithFilledCentre
# "]": "\u06ED" #
}
ARABTEX_TO_UNICODE = {
"'": "\u0621", # hamza-on-the-line
# "|": "\u0622", # madda
"a'": "\u0623", # hamza-on-'alif
"U'": "\u0624", # hamza-on-waaw
# "<": "\u0625", # hamza-under-'alif
"'y": "\u0626", # hamza-on-yaa'
"A": "\u0627", # bare 'alif
"b": "\u0628", # baa'
"T": "\u0629", # taa' marbuuTa
"t": "\u062A", # taa'
"_t": "\u062B", # thaa'
"j": "\u062C", # jiim
".h": "\u062D", # Haa'
"x": "\u062E", # khaa'
"d": "\u062F", # daal
"_d": "\u0630", # dhaal
"r": "\u0631", # raa'
"z": "\u0632", # zaay
"s": "\u0633", # siin
"^s": "\u0634", # shiin
".s": "\u0635", # Saad
".d": "\u0636", # Daad
".t": "\u0637", # Taa'
".z": "\u0638", # Zaa' (DHaa')
"`": "\u0639", # cayn
".g": "\u063A", # ghayn
# "_": "\u0640", # taTwiil # Missing
"f": "\u0641", # faa'
"q": "\u0642", # qaaf
"k": "\u0643", # kaaf
"l": "\u0644", # laam
"m": "\u0645", # miim
"n": "\u0646", # nuun
"h": "\u0647", # haa'
"w": "\u0648", # waaw
"I*": "\u0649", # 'alif maqSuura
"y": "\u064A", # yaa'
"aN": "\u064B", # fatHatayn
"uN": "\u064C", # Dammatayn
"iN": "\u064D", # kasratayn
"a": "\u064E", # fatHa
"u": "\u064F", # Damma
"i": "\u0650", # kasra
"xx": "\u0651", # shaddah
# "": "\u0652", # sukuun Missing
# "": "\u0670", # dagger 'alif Missing
# "": "\u0671", # waSla Missing
# extended here
# "": "\u0653", # Maddah Missing
# "": "\u0654", # HamzaAbove Missing
# "": "\u06DC", # SmallHighSeen Missing
# "": "\u06DF", # SmallHighRoundedZero Missing
# """: "\u06E0", # SmallHighUprightRectangularZero Missing
# "": "\u06E2", # SmallHighMeemIsolatedForm Missing
# "": "\u06E3", # SmallLowSeen Missing
# "": "\u06E5", # SmallWaw Missing
# "": "\u06E6", # SmallYa Missing
# "": "\u06E8", # SmallHighNoon Missing
# "": "\u06EA", # EmptyCentreLowStop Missing
# "": "\u06EB", # EmptyCentreHighStop Missing
# "": "\u06EC", # RoundedHighStopWithFilledCentre Missing
# "": "\u06ED" # Missing
}
ASMO449_TO_UNICODE = {
"A": "\u0621", # hamza-on-the-line
"B": "\u0622", # madda
"C": "\u0623", # hamza-on-'alif
"D": "\u0624", # hamza-on-waaw
"E": "\u0625", # hamza-under-'alif
"F": "\u0626", # hamza-on-yaa'
"G": "\u0627", # bare 'alif
"H": "\u0628", # baa'
"I": "\u0629", # taa' marbuuTa
"J": "\u062A", # taa'
"K": "\u062B", # thaa'
"L": "\u062C", # jiim
"M": "\u062D", # Haa'
"N": "\u062E", # khaa'
"O": "\u062F", # daal
"P": "\u0630", # dhaal
"Q": "\u0631", # raa'
"R": "\u0632", # zaay
"S": "\u0633", # siin
"T": "\u0634", # shiin
"U": "\u0635", # Saad
"V": "\u0636", # Daad
"W": "\u0637", # Taa'
"X": "\u0638", # Zaa' (DHaa')
"Y": "\u0639", # cayn
"Z": "\u063A", # ghayn
"0x60": "\u0640", # taTwiil
"a": "\u0641", # faa'
"b": "\u0642", # qaaf
"c": "\u0643", # kaaf
"d": "\u0644", # laam
"e": "\u0645", # miim
"f": "\u0646", # nuun
"g": "\u0647", # haa'
"h": "\u0648", # waaw
"i": "\u0649", # 'alif maqSuura
"j": "\u064A", # yaa'
"k": "\u064B", # fatHatayn
"l": "\u064C", # Dammatayn
"m": "\u064D", # kasratayn
"n": "\u064E", # fatHa
"o": "\u064F", # Damma
"p": "\u0650", # kasra
"q": "\u0651", # shaddah
"r": "\u0652", # sukuun
# "": "\u0670", # dagger 'alif missing
# "": "\u0671", # waSla missing
# extended here
# "": "\u0653", # Maddah missing
# "": "\u0654", # HamzaAbove missing
# "": "\u06DC", # SmallHighSeen missing
# "": "\u06DF", # SmallHighRoundedZero missing
# """: "\u06E0", # SmallHighUprightRectangularZero missing
# "": "\u06E2", # SmallHighMeemIsolatedForm missing
# "": "\u06E3", # SmallLowSeen missing
# "": "\u06E5", # SmallWaw missing
# "": "\u06E6", # SmallYa missing
# "": "\u06E8", # SmallHighNoon missing
# "": "\u06EA", # EmptyCentreLowStop missing
# "": "\u06EB", # EmptyCentreHighStop missing
# "": "\u06EC", # RoundedHighStopWithFilledCentre missing
# "": "\u06ED" # missing
}
ISO88596_TO_UNICODE = {
"C1": "\u0621", # hamza-on-the-line
"C2": "\u0622", # madda
"C3": "\u0623", # hamza-on-'alif
"C4": "\u0624", # hamza-on-waaw
"C5": "\u0625", # hamza-under-'alif
"C6": "\u0626", # hamza-on-yaa'
"C7": "\u0627", # bare 'alif
"C8": "\u0628", # baa'
"C9": "\u0629", # taa' marbuuTa
"CA": "\u062A", # taa'
"CB": "\u062B", # thaa'
"CC": "\u062C", # jiim
"CD": "\u062D", # Haa'
"CE": "\u062E", # khaa'
"CF": "\u062F", # daal
"D0": "\u0630", # dhaal
"D1": "\u0631", # raa'
"D2": "\u0632", # zaay
"D3": "\u0633", # siin
"D4": "\u0634", # shiin
"D5": "\u0635", # Saad
"D6": "\u0636", # Daad
"D7": "\u0637", # Taa'
"D8": "\u0638", # Zaa' (DHaa')
"D9": "\u0639", # cayn
"DA": "\u063A", # ghayn
"E0": "\u0640", # taTwiil missing
"E1": "\u0641", # faa'
"E2": "\u0642", # qaaf
"E3": "\u0643", # kaaf
"E4": "\u0644", # laam
"E5": "\u0645", # miim
"E6": "\u0646", # nuun
"E7": "\u0647", # haa'
"E8": "\u0648", # waaw
"E9": "\u0649", # 'alif maqSuura
"EA": "\u064A", # yaa'
"EB": "\u064B", # fatHatayn
"EC": "\u064C", # Dammatayn
"ED": "\u064D", # kasratayn
"EE": "\u064E", # fatHa
"EF": "\u064F", # Damma
"F0": "\u0650", # kasra
"F1": "\u0651", # shaddah
"F2": "\u0652", # sukuun
# "": "\u0670", # dagger 'alif missing
# "": "\u0671", # waSla missing
# extended here
# "": "\u0653", # Maddah missing
# "": "\u0654", # HamzaAbove missing
# "": "\u06DC", # SmallHighSeen missing
# "": "\u06DF", # SmallHighRoundedZero missing
# """: "\u06E0", # SmallHighUprightRectangularZero missing
# "": "\u06E2", # SmallHighMeemIsolatedForm missing
# "": "\u06E3", # SmallLowSeen missing
# "": "\u06E5", # SmallWaw missing
# "": "\u06E6", # SmallYa missing
# "": "\u06E8", # SmallHighNoon missing
# "": "\u06EA", # EmptyCentreLowStop missing
# "": "\u06EB", # EmptyCentreHighStop missing
# "": "\u06EC", # RoundedHighStopWithFilledCentre missing
# "": "\u06ED" # missing
}
ROMANIZATION_SYSTEMS_MAPPINGS = {
"buckwalter": BUCKWALTER_TO_UNICODE,
"iso233-2": ISO2332_TO_UNICODE,
# "arabtex": ARABTEX_TO_UNICODE, todo: not ready
"asmo449": ASMO449_TO_UNICODE,
# "iso8859-6": ISO88596_TO_UNICODE, todo: not ready
}
def available_transliterate_systems():
return list(ROMANIZATION_SYSTEMS_MAPPINGS.keys())
def guess_romaization_system():
# @todo
pass
def transliterate(mode, string, ignore="", reverse=False):
# @todo: arabtex and iso8859-6 need individual handling because in some cases using one-two mapping
"""
encode & decode different romanization systems
:param mode:
:param string:
:param ignore:
:param reverse:
:return:
"""
if mode in available_transliterate_systems():
MAPPING = ROMANIZATION_SYSTEMS_MAPPINGS[mode]
else:
print(mode + " not supported! \n")
MAPPING = {}
if reverse:
mapping = {}
for k, v in MAPPING.items():
# reverse the mapping buckwalter <-> unicode
mapping[v] = k
else:
mapping = MAPPING
result = ""
for char in string:
if char in mapping.keys() and char not in ignore:
result += mapping[char]
else:
result += char
return result
| {
"repo_name": "D-K-E/cltk",
"path": "src/cltk/phonology/arb/romanization.py",
"copies": "4",
"size": "12272",
"license": "mit",
"hash": -7319602476410622000,
"line_mean": 30.3820512821,
"line_max": 109,
"alpha_frac": 0.4839447667,
"autogenerated": false,
"ratio": 2.3356870229007636,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9818601863950982,
"avg_score": 0.00020598512995650744,
"num_lines": 390
} |
# Aradiabot function for searching rule34.xxx
# As they don't have an API, this was easier to put in it's own file so I could organize everything.
import requests
from html.parser import HTMLParser
import random
import sys
counter = [10,9,8,7,6,5,4,3,2,1]
images = []
class booruparser(HTMLParser):
def handle_starttag(self, tag, attrs):
if tag == 'a':
if any('id' in pairs for pairs in attrs):
try:
images.append(str(attrs[1][1]))
except:
pass
class imageparser(HTMLParser):
def handle_starttag(self, tag, attrs):
if ('id', 'image') in attrs:
print("http:" + attrs[2][1])
parser = booruparser()
imgparser = imageparser()
tags = ""
for arg in sys.argv:
if arg == sys.argv[0]:
pass
else:
tags = tags + arg + "+"
count = 0
while len(images) < 1:
if count < 10:
parser.feed(requests.get('http://rule34.xxx/index.php?page=post&s=list&tags=' + tags + '&pid=' + str(counter[count])).text)
count = count + 1
else:
break
if count != 10:
image = requests.get('http://rule34.xxx/' + random.choice(images)).text
imgparser.feed(image)
else:
print("0")
| {
"repo_name": "Loreleix64/aradiabot",
"path": "r34.py",
"copies": "1",
"size": "1109",
"license": "mit",
"hash": -745223523102354300,
"line_mean": 21.6326530612,
"line_max": 125,
"alpha_frac": 0.65374211,
"autogenerated": false,
"ratio": 2.821882951653944,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3975625061653944,
"avg_score": null,
"num_lines": null
} |
# Aradiabot functions for searching through various image sharing websites.
import requests
import random
import subprocess
import json
import asyncio
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:48.0) Gecko/20100101 Firefox/48.0'}
async def mom():
response = requests.get("https://danbooru.donmai.us/posts.json?tags=yakumo_ran+1girl&page=" + str(random.randint(1,200)) + "&limit = 20")
table = response.json()
return "https://danbooru.donmai.us/" + str(table[random.randint(0,19)]["file_url"])
async def e621(tags):
tagsa = tags.split(" ")
tags = ""
for tag in tagsa:
tags = tags + tag + "+"
response = requests.get("https://e621.net/post/index.json?tags=" + tags + "&limit=320", headers=headers).json()
try:
return random.choice(response)["file_url"]
except:
return "No results!"
async def danbooru(tags):
tagsa = tags.split(" ")
tags = ""
for tag in tagsa:
tags = tags + tag + "+"
response = requests.get("https://danbooru.donmai.us/posts.json?tags=" + tags + "&limit=200", headers=headers).json()
try:
return "https://danbooru.donmai.us/" + random.choice(response)["file_url"]
except:
return "No results!"
async def derpibooru(tags):
response = requests.get("https://derpibooru.org/search.json?q=" + tags, params={'key':#Please supply your own derpibooru API key!}, headers=headers).json()
try:
return "https:" + random.choice(response["search"])["image"]
except:
return "No results!"
async def rule34(tags):
tagsa = tags.split(" ")
args = ['python3.5', 'r34.py'] # This worked a lot better in it's own file.
for tag in tagsa:
args.append(tag)
p = subprocess.Popen(args, stdout=subprocess.PIPE)
text = p.stdout.read().decode()
print (text)
if text.strip() == "0":
return "No results!"
else:
return text
| {
"repo_name": "Loreleix64/aradiabot",
"path": "booru.py",
"copies": "1",
"size": "1777",
"license": "mit",
"hash": -4047175464088835600,
"line_mean": 32.5283018868,
"line_max": 156,
"alpha_frac": 0.6837366348,
"autogenerated": false,
"ratio": 2.7984251968503937,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3982161831650394,
"avg_score": null,
"num_lines": null
} |
# Aradiabot image glitching functions.
# Transcribed over from my 'fastglitch' repository.
from io import BytesIO, StringIO
import random, sys, PIL.Image, PIL.ImageChops, PIL.ImageDraw, os
chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'
import asyncio
def genImg(fname):
img = PIL.Image.open(fname)
img = img.convert('RGBA')
proto1 = RandomByteAddition(img, random.randint(1,16))
proto2 = RGBOffset(proto1, random.randint(1,64))
proto3 = PixelOffset(proto2, random.randint(1,512))
proto4 = Artifact(proto3, random.randint(1,64))
proto5 = RowSlice(proto4, random.randint(1,32))
proto6 = Noise(proto5, random.randint(25000,50000))
p = proto6.convert('RGB')
p.save('new' + fname + '.png')
proto1.close()
proto2.close()
proto3.close()
proto4.close()
proto5.close()
proto6.close()
def RandomByteAddition(image, seed):
bytesBroken = False
bytesobj = BytesIO()
image.save(bytesobj, 'jpeg')
iter = seed
bytesobj.seek(1024)
if seed > 0:
for x in range(0, iter):
bytes2 = bytesobj
bytesobj.seek(random.randint(0, 32), 1)
byte = random.choice(chars)
bytesobj.write(bytes(byte, 'utf-8'))
try:
PIL.Image.open(bytesobj)
except:
bytesBroken = True
break
if bytesBroken == True:
bytes2.seek(0)
new_img = PIL.Image.open(bytes2)
else:
bytesobj.seek(0)
new_img = PIL.Image.open(bytesobj)
return new_img
def RGBOffset(image, distance):
distance = distance * 30
r, g, b = image.split()
r = PIL.ImageChops.offset(r, distance * -1, 0)
b = PIL.ImageChops.offset(b, distance, 0)
new_img = PIL.Image.merge('RGB', (r, g, b))
return new_img
def PixelOffset(image, distance):
new_img = PIL.ImageChops.offset(image, distance)
return new_img
def RowSlice(image, sliceamount):
cps = 0
new_img = image
for x in range(sliceamount):
upbound = cps
downbound = upbound + random.randint(16, 128)
if downbound > image.height:
break
box = (0,
upbound,
new_img.width,
downbound)
reigon = new_img.crop(box)
distance = random.randint(-128, 128)
reigon = PIL.ImageChops.offset(reigon, distance, 0)
new_img.paste(reigon, box)
reigon.close()
cps = downbound
return new_img
def Artifact(image, screwamount):
tnspimg = image.convert('RGBA')
base = PIL.Image.new('RGBA', tnspimg.size, (255, 255, 255, 0))
rows = PIL.ImageDraw.Draw(base)
cps = 0
for x in range(screwamount):
leftbound = cps
rightbound = leftbound + random.randint(32, 128)
if rightbound > image.width:
break
y1 = random.randint(0, image.height - int(round(image.height / 2.0, 0)))
x1 = random.randint(leftbound, rightbound - 1)
y2 = random.randint(y1, image.height)
x2 = rightbound
color = (random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
random.randint(64, 200))
rows.rectangle((x1,
y1,
x2,
y2), fill=color)
cps = rightbound
new_img = PIL.Image.alpha_composite(tnspimg, base)
return new_img
def Noise(image, pixels):
for x in range(1, pixels):
image.putpixel((random.randint(1, image.width - 1), random.randint(1, image.height - 1)), (random.randint(1, 255), random.randint(1, 255), random.randint(1, 255)))
new_img = image
return new_img
| {
"repo_name": "Loreleix64/aradiabot",
"path": "glitch.py",
"copies": "1",
"size": "3186",
"license": "mit",
"hash": -2911874073598982000,
"line_mean": 24.9024390244,
"line_max": 165,
"alpha_frac": 0.6930320151,
"autogenerated": false,
"ratio": 2.688607594936709,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3881639610036709,
"avg_score": null,
"num_lines": null
} |
# Aradiabot main file + misc functions.
import discord
import asyncio
import random
import tweepy
import json
import booru
import os
import re
import glitch
import requests
import image
#Please provide your own twitter api keys if you want to make use of the tweeting function.
consumer_key = ""
consumer_secret = ""
access_key = ""
access_secret = ""
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
tapi = tweepy.API(auth)
client = discord.Client()
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:48.0) Gecko/20100101 Firefox/48.0'}
# Most of the things in the loot tables were inside jokes.
# Feel free to feel these in yourself.
lootPrefix=["Ancient"]
lootItem=["Sword"]
lootSuffix=["of Fire"]
def lootgen():
return str(random.choice(lootPrefix) + " " + random.choice(lootItem) + " " + random.choice(lootSuffix))
async def tweet(msg, message):
r = re.search(r'<@(.*)>', msg)
r2 = re.search(r'<@!(.*)>', msg)
if r or r2:
for group in r.groups():
for u in message.channel.server.members:
if str(group) == str(u.id):
msg = re.sub(r'<@(.*)>', "@" + str(u.name), msg)
for group in r2.groups():
for u in message.channel.server.members:
if str(group) == str(u.id):
msg = re.sub(r'<@(.*)>', "@" + str(u.name), msg)
imsg = False
print (message.attachments)
if len(message.attachments) == 1:
j = message.attachments[0]
if j["filename"].lower().endswith('.jpg') or j["filename"].lower().endswith('.jpeg') or j["filename"].lower().endswith('.png') or j["filename"].lower().endswith('.gif'):
import urllib.request
fname = j["filename"]
req = urllib.request.Request(j["url"], None, headers)
html = urllib.request.urlopen(req).read()
with open(fname, 'wb') as f:
f.write(html)
imsg = True
try:
if len(msg) > 140:
msg = msg[:140]
if imsg:
tapi.update_with_media(fname, status=msg)
os.remove(fname)
else:
tapi.update_status(msg)
else:
if imsg:
tapi.update_with_media(fname, status=msg)
os.remove(fname)
else:
tapi.update_status(msg)
return "Your message has been sent."
except tweepy.TweepError as e:
print(json.loads(e.response.text))
return json.loads(e.response.text)['errors'][0]['message']
async def loot():
return "You obtain the \'" + lootgen() + "\'! Beep boop."
async def joke():
table = requests.get("http://tambal.azurewebsites.net/joke/random").json()
return str(table["joke"])
async def imgglitch(message):
if len(message.attachments) == 1:
j = message.attachments[0]
print("Validating!")
if j["filename"].lower().endswith('.jpg') or j["filename"].lower().endswith('.jpeg') or j["filename"].lower().endswith('.png') or j["filename"].lower().endswith('.gif'):
print("Downloading!")
import urllib.request
fname = j["filename"]
req = urllib.request.Request(j["url"], None, headers)
html = urllib.request.urlopen(req).read()
with open(fname, 'wb') as f:
f.write(html)
print("Glitching!" + fname)
glitch.genImg(fname)
print("Sending!")
await client.send_file(message.channel,'new' + fname + '.png')
print("Removing!")
os.remove('new' + fname + '.png')
os.remove(fname)
else:
client.send_message(message.channel, "You did not upload an image, or it is corrupt!")
async def liquid(message):
if len(message.attachments) == 1:
j = message.attachments[0]
print("Validating!")
if j["filename"].lower().endswith('.jpg') or j["filename"].lower().endswith('.jpeg') or j["filename"].lower().endswith('.png') or j["filename"].lower().endswith('.gif'):
print("Downloading!")
import urllib.request
fname = j["filename"]
req = urllib.request.Request(j["url"], None, headers)
html = urllib.request.urlopen(req).read()
with open(fname, 'wb') as f:
f.write(html)
print("Glitching!" + fname)
image.liquid(fname)
print("Sending!")
await client.send_file(message.channel,'liqnew' + fname)
print("Removing!")
os.remove('liqnew' + fname)
os.remove(fname)
else:
client.send_message(message.channel, "You did not upload an image, or it is corrupt!")
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
@client.event
async def on_message(message):
#Booru commands, listed in booru.py
if message.content.startswith('$mom'):
result = await booru.mom()
await client.send_message(message.channel, result)
elif message.content.startswith('$e621'):
result = await booru.e621(message.content[5:])
await client.send_message(message.channel, result)
elif message.content.startswith('$danbooru'):
result = await booru.danbooru(message.content[9:])
await client.send_message(message.channel, result)
elif message.content.startswith('$derpibooru'):
result = await booru.derpibooru(message.content[11:])
await client.send_message(message.channel, result)
elif message.content.startswith('$r34'):
result = await booru.rule34(message.content[4:])
await client.send_message(message.channel, result)
#Image handling functions, listed in glitch.py and image.py
elif message.content.startswith('$glitch'):
await imgglitch(message)
elif message.content.startswith('$liquid'):
await liquid(message)
#Miscellanious commands.
elif message.content.startswith('$joke'):
joked = await joke()
await client.send_message(message.channel, joked)
elif message.content.startswith('$loot'):
pickedup = await loot()
await client.send_message(message.channel, pickedup)
elif message.content.startswith('$tweet'):
returnstatus = await tweet(message.content[6:], message)
await client.send_message(message.channel, returnstatus)
elif message.content.startswith('$help'):
await client.send_message(message.channel, help)
client.run("Here's where your discord client id goes")
| {
"repo_name": "Loreleix64/aradiabot",
"path": "main.py",
"copies": "1",
"size": "5935",
"license": "mit",
"hash": 5225665439705571000,
"line_mean": 31.9722222222,
"line_max": 171,
"alpha_frac": 0.6775063184,
"autogenerated": false,
"ratio": 3.0703569580962236,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4247863276496224,
"avg_score": null,
"num_lines": null
} |
"""A radial distribution function observer."""
import numpy
import asap3
from asap3.Internal.Subject import Subject
from asap3.Internal.ListOfElements import ListOfElements
import cPickle
import new
import sys
class RadialDistributionFunction(Subject):
def __init__(self, atoms, rMax, nBins, groups=None, interval=1, average=1,
autoclear=False, verbose=False):
"""Create a RadialDistributionFunction observer.
Arguments:
atoms: The atoms being observed.
rMax: The maximum distance in the RDF.
nBins: The number of bins in the histograms.
groups (optional): A non-negative integer per atom, used to
classify atoms into groups, the RDF is calculated for each
group.
interval (optional, default=1): How often should the RDF be
calculated. DEPRECATED: Use interval argument when attaching
to dynamics instead.
average (optional, default=1): How many times should the RDF
be calculated and averaged before any observer of this object
is notified and/or the RDF is saved to a file.
autoclear (optional, default=False): Should the RDF be cleared
after the RDF has been processed by observers and/or written
to a file? The default is to continue to accumulate data.
"""
Subject.__init__(self)
self.atoms = atoms
self.natoms = len(atoms)
self.globalrdf = None
self.rMax = rMax * 1.0 # Guard against integer divisions.
self.nBins = nBins
self.dr = self.rMax / nBins
self.interval = interval
self.average = average
self.autoclear = autoclear
self.verbose = verbose
self.autosave = False
self.n1 = 0 # Counter associated with interval
self.n2 = 0 # Counter associated with average
self.clearnext = False
self.countRDF = 0
self.listOfElements = ListOfElements(atoms)
if groups is None:
self.groups = numpy.zeros(len(atoms), numpy.int32)
self.ngroups = 1
else:
if groups.shape != (len(atoms),):
raise ValueError, "groups must be an integer per atom"
if min(groups) < 0 or max(groups) >= len(atoms):
raise ValueError, "groups array is unreasonable"
self.groups = groups.astype(numpy.int32)
self.ngroups = max(self.groups) + 1
def update(self, atoms=None):
"""Calculate the RDF of the atoms.
Make an RDF calculation now (or if interval=n was specified,
every n time this method is called.
If average = 1, then any observers of this object is notified.
If average > 1, the calculated RDFs are accumulated, and once
a sufficient number of RDFs have been accumulated, the
observers are notified and the averaged RDFs are ready.
"""
if atoms is not None:
self.atoms = atoms
self.n1 += 1
if self.n1 >= self.interval:
if self.clearnext:
self.clear()
self.n1 = 0
self.do_rdf()
self.n2 += 1
if self.n2 >= self.average:
self.clearnext = self.autoclear
self.call_observers() # I have data ready
if self.autosave:
self.save()
self.n2 = 0
def do_rdf(self):
"""Do the actual RDF calculation. Do not call directly."""
if self.verbose:
print >>sys.stderr, "Calculating RDF"
assert self.natoms == len(self.atoms)
grdf, rdfs, cnts = asap3._asap.RawRDF(self.atoms, self.rMax, self.nBins,
self.groups, self.ngroups,
self.listOfElements)
###
print 'Natoms:', self.natoms
print 'Global RDF count:', grdf.sum()
for g in range(len(rdfs)):
print 'Group:', g
for e in rdfs[g].keys():
print ' RDF count %s: %i' % (e, rdfs[g][e].sum())
for e in cnts[g].keys():
print ' Count (%i): %i' % (e, cnts[g][e])
###
# Sanity check on counts
c = 0
for cnt in cnts: # Loop over groups
for t in cnt.keys(): # Loop over elements
c += cnt[t]
assert c == self.natoms
# Now store the data
if self.globalrdf is None:
self.globalrdf = grdf
self.rdfs = rdfs
self.atomcounts = cnts
# Ensure that all elements are represented
for i in range(len(rdfs)): # Loop over groups
for e in self.listOfElements: # Loop over elements
if not self.atomcounts[i].has_key(e):
self.atomcounts[i][e] = 0
else:
self.globalrdf += grdf
for i in range(len(rdfs)): # Loop over groups
for t in rdfs[i].keys(): # Loop over pairs
self.rdfs[i][t] += rdfs[i][t]
for t in cnts[i].keys(): # Loop over elements
self.atomcounts[i][t] += cnts[i][t]
self.countRDF += 1
self.volume = self.atoms.get_volume()
if self.verbose:
print >>sys.stderr, "RDF done!"
def clear(self):
"""Clear the accumulated RDFs. Called automatically."""
if self.verbose:
print >>sys.stderr, "Clearing RDF"
self.globalrdf = self.rdfs = self.atomcounts = None
self.countRDF = 0
def get_rdf(self, groups=None, elements=None, normalize='volume'):
"""Get an RDF.
Arguments (both optional):
groups: Only get the RDF for atoms in these groups. Can either be an
integer or a list integer refering to the groups (default: all groups)
elements: Get the partial RDF for these two elements.
elements must be a tuple of two atomic numbers (a, b), the
returned RDF tells how many b neighbors an a atom has.
If get_rdf is called on a newly created
RadialDistributionFunction object, and this object has been
created without specifying interval and average parameters, it
is assumed that the user is not using the object as an
observer, but wants an immediate calculation of the RDF. In
that case, calling get_rdf triggers the calculation of the
RDFs. In all other cases previously stored RDFs are returned.
"""
if self.globalrdf is None and self.interval == 1 and self.average == 1:
self.update()
if groups is None and elements is None:
# Return global RDF
return self.normalize(self.globalrdf,
self.countRDF * self.natoms,
normalize)
# Either a group or a pair of elements have been specified.
# Sum over selected groups
if groups is None:
groups = range(len(self.rdfs))
else:
if not isinstance(groups, list):
groups = [groups]
rdfs = self.rdfs[groups[0]].copy() # Shallow copy
atomcounts = self.atomcounts[groups[0]].copy() # Shallow copy
for i in groups[1:]: # Loop over groups
if i >= len(self.rdfs):
continue
for t in self.rdfs[i].keys(): # Loop over pairs
rdfs[t] += self.rdfs[i][t]
for t in self.atomcounts[i].keys(): # Loop over elements
atomcounts[t] += self.atomcounts[i][t]
# Sum over selected element pairs
elementpairs = elements
if elementpairs is None:
elements = self.listOfElements
elementpairs = rdfs.keys()
else:
if isinstance(elementpairs, tuple):
elements = [elementpairs[0]]
elementpairs = [elementpairs]
elif isinstance(elementpairs, int):
elements = [elementpairs]
elementpairs = []
for e1 in self.listOfElements:
elementpairs.append((elements[0], e1))
else:
raise ValueError('Elements must be either an interger or ' +
'a tuple of two integers.')
rdf = None
for pair in elementpairs:
if rdf is None:
rdf = numpy.array(rdfs[pair])
else:
rdf += rdfs[pair]
# The atomcounts should always be summed over elements!
atomcount = None
for t in atomcounts.keys():
if atomcount is None:
atomcount = atomcounts[t]
else:
atomcount += atomcounts[t]
###
atomcount = None
for e in elements:
if atomcount is None:
atomcount = atomcounts[e]
else:
atomcount += atomcounts[e]
print 'Number of selected atoms:', atomcount
###
return self.normalize(rdf, atomcount, normalize)
def normalize(self, rdf, ncount, type):
"""Normalize the raw RDF returned by the C++ module."""
if type == 'volume':
factor = (4 * numpy.pi / 3.0) * ncount * self.natoms / self.volume
r = numpy.arange(self.nBins) * self.dr
r3low = r * r * r
r += self.dr
r3high = r * r * r
normalization = factor * (r3high - r3low)
elif type == 'atoms':
normalization = 1.0 * ncount
else:
normalization = 1.0
return rdf / normalization
# Saving and loading RDF's
def output_file(self, prefix):
"""Give the file prefix for saving RDFs, and turn on saving."""
self.autosave = True
self.savefilecounter = 0
if "%" in prefix:
# Assume the user knows what (s)he is doing
self.filenames = prefix
else:
self.filenames = prefix + "%04d.rdf"
def save(self, filename=None):
if self.verbose:
print >>sys.stderr, "Saving RDF"
if filename == None:
filename = (self.filenames % (self.savefilecounter,))
self.savefilecounter += 1
data = {"globalrdf": self.globalrdf,
"rdfs" : self.rdfs,
"atomcounts": self.atomcounts,
"countRDF": self.countRDF,
"rMax": self.rMax,
"dr": self.dr,
"nBins": self.nBins,
"natoms": self.natoms,
"volume": self.volume}
f = open(filename, "w")
cPickle.dump(data, f, 2) # Protocol 2 (efficient bin from Python 2.3)
f.close()
# Not supported in python 2.3: @classmethod
#@classmethod
def load(cls, filename):
f = open(filename)
data = cPickle.load(f)
f.close()
obj = new.instance(cls, data)
return obj
load=classmethod(load)
| {
"repo_name": "auag92/n2dm",
"path": "Asap-3.8.4/Python/asap3/analysis/rdf2.py",
"copies": "1",
"size": "11138",
"license": "mit",
"hash": -1975056889259459000,
"line_mean": 36.3758389262,
"line_max": 80,
"alpha_frac": 0.546507452,
"autogenerated": false,
"ratio": 4.243047619047619,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0063846764257118535,
"num_lines": 298
} |
"""AraGenoSite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework import routers, serializers, viewsets
from rest_framework.urlpatterns import format_suffix_patterns
import arageno.views as views
import arageno.rest as rest
admin.autodiscover()
UUID_REGEX = r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
# Routers provide an easy way of automatically determining the URL conf.
router = routers.DefaultRouter()
router.register(r'identify', rest.GenotypeSubmissionViewSet)
urlpatterns = [
url(r'^$', views.index, name="index"),
url(r'^faq/', views.faq, name="faq"),
url(r'^about/', views.about, name="about"),
url(r'^identify/$', views.upload_genotype, name="upload_genotype"),
url(r'^identify/(?P<pk>%s)/$' % UUID_REGEX, views.GenotypeSubmissionInfo.as_view(), name="genotype_submission_result"),
url(r'^identify/(?P<pk>%s)/delete/$' % UUID_REGEX, views.GenotypeSubmissionDeleteView.as_view(), name="delete_submission"),
url(r'^api-auth/', include('rest_framework.urls')),
url(r'^api/', include(router.urls))
]
restpatterns = [
url(r'^api/identify/(?P<pk>%s)/jobs/(?P<job_id>(\d+))/plot/$' % UUID_REGEX, rest.plot_crosses_windows, name="crosses_plot"),
url(r'^api/identify/(?P<pk>%s)/jobs/(?P<job_id>(\d+))/download/$' % UUID_REGEX, rest.download, name="download"),
]
restpatterns = format_suffix_patterns(restpatterns, allowed=['json','zip','png','pdf'])
urlpatterns += restpatterns
| {
"repo_name": "Gregor-Mendel-Institute/AraGeno",
"path": "AraGenoSite/urls.py",
"copies": "1",
"size": "2122",
"license": "mit",
"hash": -4243218941289104000,
"line_mean": 42.306122449,
"line_max": 128,
"alpha_frac": 0.6936852026,
"autogenerated": false,
"ratio": 3.1671641791044776,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9352927901284571,
"avg_score": 0.0015842960839812733,
"num_lines": 49
} |
"""aragwas URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/dev/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework.documentation import include_docs_urls
from rest_framework.routers import DefaultRouter, DynamicListRoute, DynamicDetailRoute, Route
from rest_framework.urlpatterns import format_suffix_patterns
from gwasdb import views
import gwasdb.rest as rest
class SearchRouter(DefaultRouter):
routes = [
Route(
url=r'^{prefix}$',
mapping={'get': 'list'},
name='{basename}-list',
initkwargs={'suffix': 'List'}
),
Route(
url=r'^{prefix}/{lookup}$',
mapping={'get': 'retrieve'},
name='{basename}-detail',
initkwargs={'suffix': 'Detail'}
),
DynamicDetailRoute(
url=r'^{prefix}/{methodname}/(?P<query_term>[^/.]+)/$',
name='{basename}-{methodname}',
initkwargs={}
),
DynamicListRoute(
url=r'^{prefix}/{methodname}/$',
name='{basename}-{methodname}',
initkwargs={}
)
]
# Create a router and register our viewsets with it.
router = DefaultRouter()
router.register(r'genotypes', rest.GenotypeViewSet)
router.register(r'studies', rest.StudyViewSet)
router.register(r'phenotypes', rest.PhenotypeViewSet)
router.register(r'associations', rest.AssociationViewSet, base_name="associations")
router.register(r'koassociations', rest.KOAssociationViewSet, base_name="koassociations")
router.register(r'genes', rest.GeneViewSet, base_name="genes")
router.register(r'snps', rest.SNPViewSet, base_name="snps")
srouter = SearchRouter()
srouter.register(r'search', rest.SearchViewSet)
from gwasdb.custom_documentation import include_custom_docs_urls
urlpatterns = [
url(r'^$', views.index, name="index"),
url(r'^admin/', admin.site.urls),
# url(r'^docs/', include_docs_urls(title="AraGWAS API", description="REST API for AraGWAS")),
url(r'^docs/', include_custom_docs_urls(title="AraGWAS API", description="REST API for AraGWAS")),
url(r'^api/', include(router.urls, namespace="router_apis")),
url(r'^api/', include(srouter.urls)),
url(r'^api/version/$',rest.ApiVersionView.as_view(),name='api-version')
]
# for custom REST API endpoints (search, etc)
restpatterns = [
#search
# url(r'^api/search/search_results/$', ),
]
restpatterns = format_suffix_patterns(restpatterns, allowed=['json','zip','png','pdf'])
urlpatterns += restpatterns
| {
"repo_name": "1001genomes/AraGWAS",
"path": "aragwas_server/aragwas/urls.py",
"copies": "1",
"size": "3129",
"license": "mit",
"hash": -6605992602043151000,
"line_mean": 34.5568181818,
"line_max": 102,
"alpha_frac": 0.6657078939,
"autogenerated": false,
"ratio": 3.6898584905660377,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9782121685454529,
"avg_score": 0.014688939802301716,
"num_lines": 88
} |
"""Arakhne
===============================================================================
============================== Arakhne Text Loom ==============================
===============================================================================
Arakhne makes the scrubbing and analysis of mass volumes of texts accessible to
the Ancient Scholar with minimal Python training. The goal of Arakhne is to
allow the user to perform the greatest number of changes in the fewest number
of commands possible, all while maintaining semantic clarity.
For more information, see
https://github.com/thePortus/arakhne
"""
import sys
from pkg_resources import get_distribution
from .setup import Setup
from .corpus import Corpus
if sys.version_info[0] != 3:
raise ImportError('Python Version 3 or above is required for cltk.')
__author__ = 'David J. Thomas'
__copyright__ = 'Copyright (c) 2016 David J. Thomas. Distributed and Licensed under the MIT License.'
__description__ = __doc__
__license__ = 'MIT'
__url__ = 'http://github.com/thePortus/arakhne'
__version__ = get_distribution('arakhne').version
def Arakhne(language=None):
# if not Setup(language=language).setup():
# raise Exception('Problem encountered during setup.')
# Return an empty corpus of appropriate language
return Corpus(language).make()
# Cleaning up namespaces
del get_distribution
del sys
| {
"repo_name": "thePortus/arakhne",
"path": "arakhne/__init__.py",
"copies": "1",
"size": "1396",
"license": "mit",
"hash": 510635964971069600,
"line_mean": 28.0833333333,
"line_max": 101,
"alpha_frac": 0.6210601719,
"autogenerated": false,
"ratio": 4.093841642228739,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5214901814128738,
"avg_score": null,
"num_lines": null
} |
"""Aramis."""
# --- import --------------------------------------------------------------------------------------
import os
import struct
import pathlib
import warnings
import numpy as np
from ._data import Data
from .. import exceptions as wt_exceptions
# --- define --------------------------------------------------------------------------------------
__all__ = ["from_Aramis"]
# --- from function -------------------------------------------------------------------------------
def from_Aramis(filepath, name=None, parent=None, verbose=True) -> Data:
"""Create a data object from Horiba Aramis ngc binary file.
Parameters
----------
filepath : path-like
Path to .ngc file.
Can be either a local or remote file (http/ftp).
Can be compressed with gz/bz2, decompression based on file name.
name : string (optional)
Name to give to the created data object. If None, name is extracted from file.
Default is None.
parent : WrightTools.Collection (optional)
Collection to place new data object within. Default is None.
verbose : boolean (optional)
Toggle talkback. Default is True.
Returns
-------
data
New data object(s).
"""
# parse filepath
filestr = os.fspath(filepath)
filepath = pathlib.Path(filepath)
if not ".ngc" in filepath.suffixes:
wt_exceptions.WrongFileTypeWarning.warn(filepath, ".ngc")
ds = np.DataSource(None)
f = ds.open(filestr, "rb")
header = f.readline()
if header != b"NGSNextGen\x01\x00\x00\x00\x01\x00\x00\x00\n":
warnings.warn(f"Unexpected Header {header}, Aramis parsing may not be valid")
header = f.read(10)
if header != b"DataMatrix":
warnings.warn(f"Unexpected Header {header}, Aramis parsing may not be valid")
instr = _readstr(f)
iname = _readstr(f)
# parse name
if not name:
name = iname
# create data
kwargs = {"name": name, "kind": "Aramis", "source": filestr}
if parent is None:
data = Data(**kwargs)
else:
data = parent.create_data(**kwargs)
# array
f.seek(4 * 4, 1) # skip 4 integers [~=size, 0, 8, -1] is expected
asize = struct.unpack("<i", f.read(4))[0]
ndim = struct.unpack("<h", f.read(2))[0]
shape = struct.unpack(f"<{'i'*ndim}", f.read(4 * ndim))
f.seek(2 + 4, 1) # skip '0xffff', size in bytes
arr = np.fromfile(f, "<f4", np.prod(shape))
arr.shape = shape
f.read((asize - arr.size) * 4)
while f.read(1) == b"\0":
pass
f.seek(-1, 1)
nlab = struct.unpack("<h", f.read(2))[0]
labels = [_readstr(f) for _ in range(nlab)]
nunit = struct.unpack("<h", f.read(2))[0]
unit_trans = {"1/cm": "wn", "µm": "um", "sec": "s_t", "": None}
units = [_readstr(f) for _ in range(nunit)]
units = [unit_trans.get(u, u) for u in units]
skip = struct.unpack("<h", f.read(2))[0]
f.seek(skip, 1) # skip values that were all zero in test data
# Which index in the shape aligns whith which label/unit
nidx = struct.unpack("<h", f.read(2))[0]
idx = struct.unpack(f"<{'h'*2*nidx}", f.read(4 * nidx))[::2]
chidx = idx.index(ndim)
data.create_channel(labels[chidx], arr, units=units[chidx])
# Endpoints of axes, needed if full array unavailable
nend = struct.unpack("<h", f.read(2))[0]
end = struct.unpack(f"<{'f'*nend}", f.read(4 * nend))
# Unknown what value means, other than nonzero seems to indicate array present
nunk = struct.unpack("<h", f.read(2))[0]
unk = struct.unpack(f"<{'i'*nunk}", f.read(4 * nunk))
for i, u in enumerate(unk):
if idx[i] < ndim:
if u != 0:
axissize = struct.unpack("<h", f.read(2))[0] // 4
arr = np.fromfile(f, "<f4", axissize)
else:
arr = np.linspace(end[2 * i], end[2 * i + 1], shape[idx[i]])
sh = [1] * ndim
sh[idx[i]] = shape[idx[i]]
arr.shape = tuple(sh)
data.create_variable(labels[i], arr, units=units[i], label=labels[i])
data.transform(*[labels[i] for i, ix in enumerate(idx) if ix < ndim])
# finish
f.close()
if verbose:
print("data created at {0}".format(data.fullpath))
print(" axes: {0}".format(data.axis_names))
print(" shape: {0}".format(data.shape))
return data
def _readstr(f):
return f.read(ord(f.read(1))).decode("iso-8859-1")
| {
"repo_name": "wright-group/WrightTools",
"path": "WrightTools/data/_aramis.py",
"copies": "1",
"size": "4452",
"license": "mit",
"hash": -1770452509731671600,
"line_mean": 32.9770992366,
"line_max": 99,
"alpha_frac": 0.5506627724,
"autogenerated": false,
"ratio": 3.359245283018868,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.940555716299332,
"avg_score": 0.0008701784851095837,
"num_lines": 131
} |
"""A random agent for starcraft."""
import numpy
from pysc2.agents import base_agent
from pysc2.lib import actions
from pysc2.lib import features
from keras.models import Model, load_model, save_model
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_PLAYER_FRIENDLY = 1
_PLAYER_NEUTRAL = 3 # beacon/minerals
_PLAYER_HOSTILE = 4
_NO_OP = actions.FUNCTIONS.no_op.id
_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id
_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_NOT_QUEUED = [0]
_SELECT_ALL = [0]
_EPSILON_GREEDY = 1.0 # exploration vs exploitation criteria
class A3CTester(base_agent.BaseAgent):
"""A NN agent for starcraft."""
model = None
def __init__(self):
super().__init__()
self.model = load_model("learning_tools/learning_nn/pysc2-mineralshard-v0.knn")
# try:
# with open("config", mode='r') as config:
# self.number_of_run = int(config.readline())
# self.epsilon_step = int(config.readline()) / 100.0
# self.epsilon = 0.0
# self.step_by_epsilon = 240 * self.number_of_run
# except OSError:
# self.number_of_run = 10
# self.epsilon = _EPSILON_GREEDY
# self.epsilon_step = 0.0
# self.step_by_epsilon = -1
def get_random_action(self, obs):
"""return an available random action
-obs: the obs parameter given to the agent for the step call
"""
number_of_possible_action = 1 # _NO_OP
if _MOVE_SCREEN in obs.observation["available_actions"]:
number_of_possible_action += 256
if _SELECT_ARMY in obs.observation["available_actions"]:
number_of_possible_action += 1
# get a random number to select an action (including _NO_OP)
selected_action_id = numpy.random.randint(0, number_of_possible_action)
if _MOVE_SCREEN in obs.observation["available_actions"] and selected_action_id < 256:
return self.get_move_action(selected_action_id)
else:
# here two case: whether we have action id 256 or 257 or we have 0 or 1
# in both case if _SELECT_ARMY is not available the following call handles it
return self.get_non_spacial_action(selected_action_id % 256)
@staticmethod
def get_move_action(linear_position):
"""return a pysc2 action and argument to do a move action at the pos given
-linear_position : position of the move on a 16x16 grid, integer equal to y*16+x
"""
x_16 = (linear_position // 16)
y_16 = (linear_position % 16)
x_true = min(x_16 * 4, 63)
y_true = min(y_16 * 4, 63)
# x and y are not in the right order, else it doesn't work...
action_args = [_NOT_QUEUED, [y_true, x_true]]
return _MOVE_SCREEN, action_args
def step(self, obs):
super().step(obs)
if _MOVE_SCREEN not in obs.observation["available_actions"]:
return actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])
if True: # numpy.random.rand() < self.epsilon:
state = [obs.observation[SCREEN][features.SCREEN_FEATURES.player_relative.index],
obs.observation[SCREEN][features.SCREEN_FEATURES.selected.index]]
formatted_state = numpy.zeros(shape=(1, 64, 64, 2), dtype=float)
for formatted_row, state0_row, state1_row in zip(formatted_state[0], state[0], state[1]):
for formatted_case, state0_case, state1_case in zip(formatted_row, state0_row, state1_row):
formatted_case[0] = state0_case
formatted_case[1] = state1_case
# get reward prediction from neural network
action = self.model.predict(formatted_state, batch_size=1)
# action_num = numpy.argmax(action[0][0])
action_num = numpy.random.choice(256, p=action[0][0])
selected_action, action_args = self.get_move_action(action_num)
else:
selected_action, action_args = self.get_random_action(obs)
return actions.FunctionCall(selected_action, action_args)
def reset(self):
super().reset()
print("reward for this game:", self.reward, "(", self.steps, "steps)")
self.reward = 0
# if self.steps == self.step_by_epsilon:
# with open("reward.csv", mode='a') as out_file:
# out_file.write(str(self.epsilon) + ", " + str(self.reward * 240.0 / self.steps) + "\n")
# self.epsilon += self.epsilon_step
# self.reward = 0
# self.steps = 0
| {
"repo_name": "Xaxetrov/OSCAR",
"path": "learning_tools/A3C_test/a3c_tester.py",
"copies": "1",
"size": "4688",
"license": "apache-2.0",
"hash": -7636944543635444000,
"line_mean": 41.2342342342,
"line_max": 107,
"alpha_frac": 0.6092150171,
"autogenerated": false,
"ratio": 3.543461829176115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4652676846276115,
"avg_score": null,
"num_lines": null
} |
"""A random agent for starcraft."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from pysc2.agents import base_agent
from pysc2.lib import actions
from pysc2.lib import actions as sc2_actions
from pysc2.lib import features
from pysc2.lib import actions
from chris_example.defeat_zerglings import common
import numpy as np
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_UNIT_TYPE = features.SCREEN_FEATURES.unit_type.index
_SELECTED = features.SCREEN_FEATURES.selected.index
_PLAYER_FRIENDLY = 1
_PLAYER_NEUTRAL = 3 # beacon/minerals
_PLAYER_HOSTILE = 4
_NO_OP = actions.FUNCTIONS.no_op.id
_SELECT_UNIT_ID = 1
_CONTROL_GROUP_SET = 1
_CONTROL_GROUP_RECALL = 0
_SELECT_CONTROL_GROUP = actions.FUNCTIONS.select_control_group.id
_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id
_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_SELECT_UNIT = actions.FUNCTIONS.select_unit.id
_SELECT_POINT = actions.FUNCTIONS.select_point.id
_NOT_QUEUED = [0]
_SELECT_ALL = [0]
class MarineAgent(base_agent.BaseAgent):
"""A random agent for starcraft."""
demo_replay = []
def __init__(self, env):
super(MarineAgent, self).__init__()
self.env = env
def step(self, obs):
super(MarineAgent, self).step(obs)
# 1. Select marine!
obs, screen, player = common.select_marine(self.env, [obs])
player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE]
enemy_y, enemy_x = (player_relative == _PLAYER_HOSTILE).nonzero()
# 2. Run away from nearby enemy
closest, min_dist = None, None
if (len(player) == 2):
for p in zip(enemy_x, enemy_y):
dist = np.linalg.norm(np.array(player) - np.array(p))
if not min_dist or dist < min_dist:
closest, min_dist = p, dist
# 3. Sparse!
friendly_y, friendly_x = (player_relative == _PLAYER_FRIENDLY).nonzero()
closest_friend, min_dist_friend = None, None
if (len(player) == 2):
for p in zip(friendly_x, friendly_y):
dist = np.linalg.norm(np.array(player) - np.array(p))
if not min_dist_friend or dist < min_dist_friend:
closest_friend, min_dist_friend = p, dist
if (min_dist != None and min_dist <= 7):
obs, new_action = common.marine_action(self.env, obs, player, 2)
elif (min_dist_friend != None and min_dist_friend <= 3):
sparse_or_attack = np.random.randint(0, 2)
obs, new_action = common.marine_action(self.env, obs, player, sparse_or_attack)
else:
obs, new_action = common.marine_action(self.env, obs, player, 1)
return new_action[0]
| {
"repo_name": "pangzhenjia/pysc2-source",
"path": "chris_example/defeat_zerglings/demo_agent.py",
"copies": "1",
"size": "2859",
"license": "apache-2.0",
"hash": -1335931962972192500,
"line_mean": 29.0947368421,
"line_max": 91,
"alpha_frac": 0.6411332634,
"autogenerated": false,
"ratio": 3.2562642369020502,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.439739750030205,
"avg_score": null,
"num_lines": null
} |
"""A random collection of methods used in openprescribing, in need of
refactoring, tests, and upgrading.
"""
from os import environ
import csv
import datetime
import json
import logging
import psycopg2
import re
import shutil
import subprocess
import tempfile
import time
import uuid
from google.cloud import bigquery
from google.cloud import storage
from google.cloud.bigquery import SchemaField
from google.cloud.exceptions import NotFound
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
logger = logging.getLogger(__name__)
DMD_SCHEMA = [
SchemaField('dmdid', 'STRING'),
SchemaField('bnf_code', 'STRING'),
SchemaField('vpid', 'STRING'),
SchemaField('display_name', 'STRING'),
SchemaField('ema', 'STRING'),
SchemaField('pres_statcd', 'STRING'),
SchemaField('avail_restrictcd', 'STRING'),
SchemaField('product_type', 'STRING'),
SchemaField('non_availcd', 'STRING'),
SchemaField('concept_class', 'STRING'),
SchemaField('nurse_f', 'STRING'),
SchemaField('dent_f', 'STRING'),
SchemaField('prod_order_no', 'STRING'),
SchemaField('sched_1', 'STRING'),
SchemaField('sched_2', 'STRING'),
SchemaField('padm', 'STRING'),
SchemaField('fp10_mda', 'STRING'),
SchemaField('acbs', 'STRING'),
SchemaField('assort_flav', 'STRING'),
SchemaField('catcd', 'STRING'),
SchemaField('tariff_category', 'STRING'),
SchemaField('flag_imported', 'STRING'),
SchemaField('flag_broken_bulk', 'STRING'),
SchemaField('flag_non_bioequivalence', 'STRING'),
SchemaField('flag_special_containers', 'BOOLEAN')
]
CCG_SCHEMA = [
SchemaField('code', 'STRING'),
SchemaField('name', 'STRING'),
SchemaField('ons_code', 'STRING'),
SchemaField('org_type', 'STRING'),
SchemaField('open_date', 'TIMESTAMP'),
SchemaField('close_date', 'TIMESTAMP'),
SchemaField('address', 'STRING'),
SchemaField('postcode', 'STRING'),
]
PRESCRIBING_SCHEMA = [
SchemaField('sha', 'STRING'),
SchemaField('pct', 'STRING'),
SchemaField('practice', 'STRING'),
SchemaField('bnf_code', 'STRING'),
SchemaField('bnf_name', 'STRING'),
SchemaField('items', 'INTEGER'),
SchemaField('net_cost', 'FLOAT'),
SchemaField('actual_cost', 'FLOAT'),
SchemaField('quantity', 'INTEGER'),
SchemaField('month', 'TIMESTAMP'),
]
PRESENTATION_SCHEMA = [
SchemaField('bnf_code', 'STRING'),
SchemaField('name', 'STRING'),
SchemaField('is_generic', 'BOOLEAN'),
SchemaField('active_quantity', 'FLOAT'),
SchemaField('adq', 'FLOAT'),
SchemaField('adq_unit', 'STRING'),
SchemaField('percent_of_adq', 'FLOAT'),
]
PRACTICE_SCHEMA = [
SchemaField('code', 'STRING'),
SchemaField('name', 'STRING'),
SchemaField('address1', 'STRING'),
SchemaField('address2', 'STRING'),
SchemaField('address3', 'STRING'),
SchemaField('address4', 'STRING'),
SchemaField('address5', 'STRING'),
SchemaField('postcode', 'STRING'),
SchemaField('location', 'STRING'),
SchemaField('ccg_id', 'STRING'),
SchemaField('setting', 'INTEGER'),
SchemaField('close_date', 'STRING'),
SchemaField('join_provider_date', 'STRING'),
SchemaField('leave_provider_date', 'STRING'),
SchemaField('open_date', 'STRING'),
SchemaField('status_code', 'STRING'),
]
PRACTICE_STATISTICS_SCHEMA = [
SchemaField('month', 'TIMESTAMP'),
SchemaField('male_0_4', 'INTEGER'),
SchemaField('female_0_4', 'INTEGER'),
SchemaField('male_5_14', 'INTEGER'),
SchemaField('male_15_24', 'INTEGER'),
SchemaField('male_25_34', 'INTEGER'),
SchemaField('male_35_44', 'INTEGER'),
SchemaField('male_45_54', 'INTEGER'),
SchemaField('male_55_64', 'INTEGER'),
SchemaField('male_65_74', 'INTEGER'),
SchemaField('male_75_plus', 'INTEGER'),
SchemaField('female_5_14', 'INTEGER'),
SchemaField('female_15_24', 'INTEGER'),
SchemaField('female_25_34', 'INTEGER'),
SchemaField('female_35_44', 'INTEGER'),
SchemaField('female_45_54', 'INTEGER'),
SchemaField('female_55_64', 'INTEGER'),
SchemaField('female_65_74', 'INTEGER'),
SchemaField('female_75_plus', 'INTEGER'),
SchemaField('total_list_size', 'INTEGER'),
SchemaField('astro_pu_cost', 'FLOAT'),
SchemaField('astro_pu_items', 'FLOAT'),
SchemaField('star_pu', 'STRING'),
SchemaField('pct_id', 'STRING'),
SchemaField('practice', 'STRING')
]
def get_env_setting(setting, default=None):
""" Get the environment setting.
Return the default, or raise an exception if none supplied
"""
try:
return environ[setting]
except KeyError:
if default:
return default
else:
error_msg = "Set the %s env variable" % setting
raise StandardError(error_msg)
def get_bq_service():
"""Returns a bigquery service endpoint
"""
# We've started using the google-cloud library since first writing
# this. When it settles down a bit, start using that rather than
# this low-level API. See
# https://googlecloudplatform.github.io/google-cloud-python/
credentials = GoogleCredentials.get_application_default()
return discovery.build('bigquery', 'v2',
credentials=credentials)
def load_data_from_file(
dataset_name, table_name,
source_file_name, schema, _transform=None):
"""Given a CSV of data, load it into BigQuery using the specified
schema, with an optional function to transform each row before
loading.
"""
# We use the new-style bigquery library here
client = bigquery.Client(project='ebmdatalab')
dataset = client.dataset(dataset_name)
table = dataset.table(
table_name,
schema=schema)
if not table.exists():
table.create()
table.reload()
with tempfile.NamedTemporaryFile(mode='rb+') as csv_file:
with open(source_file_name, 'rb') as source_file:
writer = csv.writer(csv_file)
reader = csv.reader(source_file)
for row in reader:
if _transform:
row = _transform(row)
writer.writerow(row)
job = table.upload_from_file(
csv_file, source_format='text/csv',
create_disposition="CREATE_IF_NEEDED",
write_disposition="WRITE_TRUNCATE",
rewind=True)
try:
wait_for_job(job)
except Exception as e:
shutil.copyfile(csv_file.name, "/tmp/error.csv")
extra_info = '. Failed CSV has been copied to /tmp/error.csv; '
extra_info += "first and last lines:\n"
extra_info += subprocess.check_output(
'head -1 /tmp/error.csv', shell=True)
extra_info += subprocess.check_output(
'tail -1 /tmp/error.csv', shell=True)
e.args = (str(e.args[0]) + extra_info,), e.args[1:]
raise
return job
def prescribing_transform(row):
"""Transform a row from a formatted file into data suitable for
storing in our bigquery schema
A 'formatted file' is a file created by the
import_hscic_prescribing Django management command.
"""
# To match the prescribing table format in BigQuery, we have
# to re-encode the date field as a bigquery TIMESTAMP and drop
# a couple of columns
row[10] = "%s 00:00:00" % row[10]
del(row[3])
del(row[-1])
return row
def statistics_transform(row):
"""Transform a row from the frontend_practicestatistics table so it
matches our statistics schema
"""
row[0] = "%s 00:00:00" % row[0] # BQ TIMESTAMP format
return row
def presentation_transform(row):
"""Transform a row from the frontend_presentation table so it
matches our statistics schema
"""
if row[2] == 't':
row[2] = 'true'
else:
row[2] = 'false'
return row
def load_prescribing_data_from_file(
dataset_name, table_name, source_file_name):
"""Given a formatted file of prescribing data, load it into BigQuery.
"""
return load_data_from_file(
dataset_name, table_name,
source_file_name, PRESCRIBING_SCHEMA, _transform=prescribing_transform)
def load_statistics_from_pg(dataset='hscic'):
"""Load the frontend_stataistics table from the openprescribing
application into BigQuery
"""
schema = PRACTICE_STATISTICS_SCHEMA
pg_cols = [x.name for x in schema]
pg_cols[0] = 'date'
pg_cols[-1] = 'practice_id'
load_data_from_pg(
dataset, 'practice_statistics', 'frontend_practicestatistics',
schema, cols=pg_cols, _transform=statistics_transform)
def load_presentation_from_pg(dataset='hscic'):
"""Load the frontend_presentation table from the openprescribing
application into BigQuery
"""
load_data_from_pg(
dataset, 'presentation', 'frontend_presentation',
PRESENTATION_SCHEMA, _transform=presentation_transform)
def load_ccgs_from_pg(dataset='hscic'):
"""Load the frontend_practices table from the openprescribing
application into BigQuery
"""
def transform(row):
if row[4]:
row[4] = "%s 00:00:00" % row[4]
if row[5]:
row[5] = "%s 00:00:00" % row[5]
return row
load_data_from_pg(
dataset, 'ccgs', 'frontend_pct',
CCG_SCHEMA, cols=[x.name for x in CCG_SCHEMA], _transform=transform)
def load_data_from_pg(dataset_name, bq_table_name,
pg_table_name, schema, cols=None, _transform=None):
"""Loads every row currently in named postgres table to a
specified table (with schema) in BigQuery
"""
db_name = get_env_setting('DB_NAME')
db_user = get_env_setting('DB_USER')
db_pass = get_env_setting('DB_PASS')
db_host = get_env_setting('DB_HOST', '127.0.0.1')
conn = psycopg2.connect(database=db_name, user=db_user,
password=db_pass, host=db_host)
with tempfile.NamedTemporaryFile(mode='r+b') as csv_file:
if not cols:
cols = [x.name for x in schema]
sql = "COPY %s(%s) TO STDOUT (FORMAT CSV, NULL '')" % (
pg_table_name, ",".join(cols))
conn.cursor().copy_expert(
sql, csv_file)
csv_file.seek(0)
load_data_from_file(
dataset_name, bq_table_name,
csv_file.name,
schema,
_transform
)
conn.commit()
conn.close()
def wait_for_job(job):
"""Poll a BigQuery job until it is finished.
Returns job
"""
if job.state != 'RUNNING':
job.begin()
retry_count = 1000
while retry_count > 0 and job.state != 'DONE':
retry_count -= 1
time.sleep(1)
job.reload()
assert not job.errors, job.errors
return job
def download_from_gcs(gcs_uri, target_path):
"""Download file at given URI to `target_path
"""
bucket, blob_name = gcs_uri.replace('gs://', '').split('/', 1)
client = storage.Client(project='embdatalab')
bucket = client.get_bucket(bucket)
prefix = blob_name.split('*')[0]
unzipped = open(target_path, 'w')
cmd = "gunzip -c -f %s >> %s"
for blob in bucket.list_blobs(prefix=prefix):
with tempfile.NamedTemporaryFile(mode='rb+') as f:
logger.info("Downloading %s to %s" % (blob.path, f.name))
blob.chunk_size = 2 ** 30
blob.download_to_file(f)
f.flush()
f.seek(0)
subprocess.check_call(
cmd % (f.name, unzipped.name), shell=True)
return unzipped.name
def delete_from_gcs(gcs_uri):
bucket, blob_name = gcs_uri.replace('gs://', '').split('/', 1)
client = storage.Client(project='embdatalab')
try:
bucket = client.get_bucket(bucket)
prefix = blob_name.split('*')[0]
for blob in bucket.list_blobs(prefix=prefix):
blob.delete()
except NotFound:
pass
def copy_table_to_gcs(table, gcs_uri):
delete_from_gcs(gcs_uri)
client = bigquery.client.Client(project='ebmdatalab')
job = client.extract_table_to_storage(
"extract-formatted-table-job-%s" % uuid.uuid1(), table,
gcs_uri)
job.destination_format = 'CSV'
job.compression = 'GZIP'
job.print_header = False
job = wait_for_job(job)
def query_and_return(project_id, dataset_id, table_id, query, legacy=False):
"""Send query to BigQuery, wait, write it to table_id, and return
response object when the job has completed.
"""
if not legacy:
# Rename any legacy-style table references to use standard
# SQL dialect. Because we use a mixture of both, we
# standardise on only using the legacy style for the time
# being.
query = re.sub(r'\[(.+?):(.+?)\.(.+?)\]', r'\1.\2.\3', query)
payload = {
"configuration": {
"query": {
"query": query,
"flattenResuts": False,
"allowLargeResults": True,
"timeoutMs": 100000,
"useQueryCache": True,
"useLegacySql": legacy,
"destinationTable": {
"projectId": project_id,
"tableId": table_id,
"datasetId": dataset_id
},
"createDisposition": "CREATE_IF_NEEDED",
"writeDisposition": "WRITE_TRUNCATE"
}
}
}
# We've started using the google-cloud library since first
# writing this. TODO: decide if we can use that throughout
bq = get_bq_service()
logger.info("Writing to bigquery table %s" % table_id)
start = datetime.datetime.now()
response = bq.jobs().insert(
projectId=project_id,
body=payload).execute()
counter = 0
job_id = response['jobReference']['jobId']
while True:
time.sleep(1)
response = bq.jobs().get(
projectId=project_id,
jobId=job_id).execute()
counter += 1
if response['status']['state'] == 'DONE':
if 'errors' in response['status']:
query = str(response['configuration']['query']['query'])
for i, l in enumerate(query.split("\n")):
# print SQL query with line numbers for debugging
print "{:>3}: {}".format(i + 1, l)
raise StandardError(
json.dumps(response['status']['errors'], indent=2))
else:
break
bytes_billed = float(
response['statistics']['query']['totalBytesBilled'])
gb_processed = round(bytes_billed / 1024 / 1024 / 1024, 2)
est_cost = round(bytes_billed / 1e+12 * 5.0, 2)
# Add our own metadata
elapsed = (datetime.datetime.now() - start).total_seconds()
response['openp'] = {'query': query,
'est_cost': est_cost,
'time': elapsed,
'gb_processed': gb_processed}
logger.info("Time %ss, cost $%s" % (elapsed, est_cost))
return response
def get_rows(project_id, dataset_id, table_name, max_results=None):
"""Iterate over the specified bigquery table, returning a dict for
each row of data.
"""
client = bigquery.Client(project=project_id)
dataset = client.dataset(dataset_id)
table = dataset.table(table_name)
table.reload()
fields = [x.name for x in table.schema]
result = table.fetch_data(max_results=max_results)
token = result.next_page_token
while True:
for row in result:
yield _row_to_dict(row, fields)
if token is None:
break
result = table.fetch_data(page_token=token, max_results=max_results)
token = result.next_page_token
raise StopIteration
def _row_to_dict(row, fields):
"""Convert a row from bigquery into a dictionary, and convert NaN to
None
"""
dict_row = {}
for i, value in enumerate(row):
key = fields[i]
if value and str(value).lower() == 'nan':
value = None
dict_row[key] = value
return dict_row
| {
"repo_name": "ebmdatalab/ebmdatalab-python",
"path": "ebmdatalab/bigquery.py",
"copies": "1",
"size": "16167",
"license": "mit",
"hash": 4120428029813539300,
"line_mean": 31.5291750503,
"line_max": 79,
"alpha_frac": 0.6047504175,
"autogenerated": false,
"ratio": 3.71484375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48195941675,
"avg_score": null,
"num_lines": null
} |
"""ArangoDB api."""
from functools import wraps, partial
from itertools import chain
from six.moves import map
from six import iteritems
import requests
import requests.adapters
from . import exc
import logging
LOG = logging.getLogger(__name__)
def json_result():
"""Decorate an arango response call to extract json and perform error handling."""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
response = func(*args, **kwargs)
headers = dict(response.headers.lower_items())
if headers.get('content-type', '').startswith('application/json'):
json_content = response.json()
# inspect response
if "error" in json_content and json_content.get('error', False):
# create a polymorphic exception just by errorNum
code = json_content.get('code')
num = json_content.get('errorNum')
message = json_content.get('errorMessage')
raise exc.ApiError(
code=code,
num=num,
message=message,
func=func,
args=args,
kwargs=kwargs,
)
# no error
return json_content
if response.status_code == 401:
raise exc.Unauthorized(response)
ex = exc.ContentTypeException("No json content-type", response)
LOG.error("Error while reading `%s` from API: %s", response.url, ex)
raise ex
return wrapped
return decorator
class Client(object):
"""A client for arangodb server."""
def __init__(self, database, endpoint="http://localhost:8529", session=None, auth=None):
# default database
self.database = database
self.endpoint = endpoint.rstrip("/")
self.auth = auth
# may use an external session
if session is None:
adapter = requests.adapters.HTTPAdapter(
pool_maxsize=100,
pool_connections=100
)
session = requests.Session()
session.mount(self.endpoint, adapter)
self.session = session
# arango specific api
self.collections = Collections(self.api(self.database, 'collection'))
self.documents = Documents(self.api(self.database, 'document'))
self.edges = Edges(self.api(self.database, 'edge'))
self.cursors = Cursors(self.api(self.database, 'cursor'))
self.graphs = Graphs(self.api(self.database, 'gharial'))
self.indexes = Indexes(self.api(self.database, 'index'))
self.queries = Queries(self.api(self.database, 'query'))
def url(self, *path):
"""Return a full url to the arangodb server."""
return '/'.join(map(str, chain((self.endpoint, ), path)))
@json_result()
def get(self, *path, **kwargs):
return self.session.get(self.url(*path), **kwargs)
@json_result()
def post(self, *path, **kwargs):
return self.session.post(self.url(*path), **kwargs)
@json_result()
def put(self, *path, **kwargs):
return self.session.put(self.url(*path), **kwargs)
@json_result()
def patch(self, *path, **kwargs):
return self.session.patch(self.url(*path), **kwargs)
def head(self, *path, **kwargs):
return self.session.head(self.url(*path), **kwargs)
@json_result()
def delete(self, *path, **kwargs):
return self.session.delete(self.url(*path), **kwargs)
def api(self, database, *path, **kwargs):
"""Just expose the HTTP methods to this session, by partially pre binding the path."""
if database is None:
prefix = ('_api', )
else:
prefix = ('_db', database, '_api')
return ApiProxy(self, *chain(prefix, path), auth=self.auth, **kwargs)
class SystemClient(Client):
"""A client to the system database of an arangodb server."""
def __init__(self, endpoint="http://localhost:8529", session=None, auth=None):
super(SystemClient, self).__init__(None, endpoint=endpoint, session=session, auth=auth)
# database api is only allowed for system database
self.databases = Databases(self.api(None, 'database'))
def create_database(self, database, user=None):
"""Just create the actual database if not exists."""
if database not in self.databases.databases:
if user:
self.databases.create(database, user)
else:
self.databases.create(database)
class ApiProxy(object):
"""A Proxy to the session, partially preselect parts of the url and request parameter."""
def __init__(self, session, *path, **kwargs):
# wrap the session and preselect api
for method in ('get', 'post', 'put', 'patch', 'delete', 'head'):
setattr(self, method, partial(getattr(session, method), *path, **kwargs))
class Api(object):
def __init__(self, api_proxy):
self.api = api_proxy
class Databases(Api):
"""Database stuff."""
@property
def databases(self):
return self.api.get('user')['result']
def create(self, name, *users):
"""Create a database."""
data = dict(name=name)
if users:
# TODO validation of users
data['users'] = users
return self.api.post(json=data).get('result', False)
def drop(self, name):
"""Drop a database."""
return self.api.delete(name).get('result', False)
DOCUMENT_COLLECTION = 2
EDGE_COLLECTION = 3
class Collections(Api):
"""Collection stuff."""
def create(self, name, **kwargs):
"""Create a collection."""
body = dict(name=name, **kwargs)
return self.api.post(json=body)
def get(self, *name, **kwargs):
"""Get one or all collection/s."""
params = {}
if 'no_system' in kwargs:
params['excludeSystem'] = kwargs['no_system']
try:
return self.api.get(*name, params=params)
except exc.CollectionNotFound:
return None
class DocumentsMixin(object):
def create(self, collection, doc, createCollection=None):
"""Create a document."""
params = {'collection': collection}
if createCollection is not None:
params['createCollection'] = createCollection
return self.api.post(json=doc, params=params)
def get(self, *handle, **kwargs):
"""Get a document or all documents.
:param handle: the document handle or the collection name
"""
params = {}
if len(handle) == 1 and '/' not in handle[0]:
params = dict(collection=handle[0])
# default to document handle
params['type'] = kwargs.get('type', 'id')
handle = ()
return self.api.get(*handle, params=params)
def delete(self, *handle):
"""Delete a document."""
return self.api.delete(*handle)
def replace(self, doc, *handle, **kwargs):
"""Replace a document."""
return self.api.put(*handle, json=doc)
def update(self, doc, *handle, **kwargs):
"""Partially update a document."""
params = {}
params['keepNull'] = kwargs.get('keep', False)
params['mergeObjects'] = kwargs.get('merge', True)
return self.api.patch(*handle, json=doc, params=params)
class Documents(Api, DocumentsMixin):
pass
class Edges(Api, DocumentsMixin):
"""Edge stuff."""
def create(self, collection, _from, _to, edge):
params = {
'from': str(_from),
'to': str(_to),
'collection': collection
}
return self.api.post(json=edge, params=params)
class Graphs(Api):
def get(self, *name):
result = self.api.get(*name)
if name:
return result['graph']
return result['graphs']
def create(self, definition):
return self.api.post(json=definition)
def drop(self, name):
return self.api.delete(name)
def vertex(self, name):
return self.api.get(name, "vertex")['collections']
def remap_fields(dct, *include, **mapping):
"""Remap certain fields of a dict by yielding (key, value)."""
for k, v in iteritems(dct):
# just define all possible keys
# this is to prevent wrong ones
if include and k not in include:
continue
yield mapping.get(k, k), v
class Cursors(Api):
"""
see https://github.com/arangodb/arangodb/issues/1285
no underscore in query bind var
"""
def create(self, query, bind=None, **kwargs):
# https://docs.arangodb.com/HttpAqlQueryCursor/AccessingCursors.html
body = dict(
query=query,
bindVars=bind or {}
)
# optional fields
body.update(
remap_fields(
kwargs,
'batch', 'ttl', 'count',
batch='batchSize'
)
)
return self.api.post(json=body)
def pursue(self, cursor_id):
"""Just continue to load a batch from a previous call."""
return self.api.put(cursor_id)
def delete(self, name):
return self.api.delete(name)
class Indexes(Api):
def get(self, *handle, **kwargs):
"""Get a document or all documents.
:param handle: the document handle or the collection name
"""
params = {}
if len(handle) == 1 and '/' not in handle[0]:
params = dict(collection=handle[0])
# default to document handle
params['type'] = kwargs.get('type', 'id')
handle = ()
return self.api.get(*handle, params=params)
def create(self, collection, index_type, fields=None, **kwargs):
"""Create an index."""
params = {'collection': collection}
doc = {
"type": index_type,
"fields": fields is not None and fields or []
}
doc.update(kwargs)
return self.api.post(json=doc, params=params)
class Queries(Api):
def parse(self, query):
"""Parse a query and validate it by the server."""
return self.api.post(
json={
'query': query
}
)
| {
"repo_name": "diefans/python-arangodb",
"path": "src/arangodb/api.py",
"copies": "1",
"size": "10483",
"license": "apache-2.0",
"hash": 7609258695579025000,
"line_mean": 24.8839506173,
"line_max": 95,
"alpha_frac": 0.5670132596,
"autogenerated": false,
"ratio": 4.169848846459825,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5236862106059825,
"avg_score": null,
"num_lines": null
} |
"""araplus URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url
# from django.contrib import admin
urlpatterns = [
url(r'^$', 'apps.grill.views.home'),
url(r'^(?P<grill_id>\d+)/refresh_comment/$',
'apps.grill.views.refresh_comment', name='refresh_comment'),
url(r'^add_grill/',
'apps.grill.views.add_grill', name='add_grill'),
url(r'^(?P<grill_id>\d+)$',
'apps.grill.views.view_grill', name='view_grill'),
url(r'^(?P<grill_id>\d+)/add_comment/$',
'apps.grill.views.add_comment', name='add_comment'),
url(r'^(?P<grill_id>\d+)/vote_comment/$',
'apps.grill.views.vote_comment', name='vote_comment'),
]
| {
"repo_name": "sparcs-kaist/araplus",
"path": "apps/grill/urls.py",
"copies": "1",
"size": "1249",
"license": "mit",
"hash": -2170863569932669200,
"line_mean": 39.2903225806,
"line_max": 77,
"alpha_frac": 0.646917534,
"autogenerated": false,
"ratio": 3.130325814536341,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4277243348536341,
"avg_score": null,
"num_lines": null
} |
AR = 'arm-none-eabi-ar'
ARFLAGS = 'rcs'
AS = 'arm-none-eabi-gcc'
BINDIR = '/usr/local/bin'
BLOCK_MESSAGE_KEYS = []
BUILD_DIR = 'aplite'
BUILD_TYPE = 'app'
BUNDLE_BIN_DIR = 'aplite'
BUNDLE_NAME = 'ActivityLogV2.pbw'
CC = ['arm-none-eabi-gcc']
CCLNK_SRC_F = []
CCLNK_TGT_F = ['-o']
CC_NAME = 'gcc'
CC_SRC_F = []
CC_TGT_F = ['-c', '-o']
CC_VERSION = ('4', '7', '2')
CFLAGS = ['-mcpu=cortex-m3', '-mthumb', '-ffunction-sections', '-fdata-sections', '-g', '-fPIE', '-Os', '-D_TIME_H_', '-Wall', '-Wextra', '-Werror', '-Wno-unused-parameter', '-Wno-error=unused-function', '-Wno-error=unused-variable', '-std=c11', '-fms-extensions', '-Wno-address', '-Wno-type-limits', '-Wno-missing-field-initializers']
CFLAGS_MACBUNDLE = ['-fPIC']
CFLAGS_cshlib = ['-fPIC']
CPPPATH_ST = '-I%s'
DEFINES = ['RELEASE', 'PBL_PLATFORM_APLITE', 'PBL_BW', 'PBL_RECT', 'PBL_COMPASS', 'PBL_DISPLAY_WIDTH=144', 'PBL_DISPLAY_HEIGHT=168', 'PBL_SDK_3']
DEFINES_ST = '-D%s'
DEST_BINFMT = 'elf'
DEST_CPU = 'arm'
DEST_OS = 'darwin'
INCLUDES = ['aplite']
LD = 'arm-none-eabi-ld'
LIBDIR = '/usr/local/lib'
LIBPATH_ST = '-L%s'
LIB_DIR = 'node_modules'
LIB_ST = '-l%s'
LINKFLAGS = ['-mcpu=cortex-m3', '-mthumb', '-Wl,--gc-sections', '-Wl,--warn-common', '-fPIE', '-Os']
LINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup']
LINKFLAGS_cshlib = ['-shared']
LINKFLAGS_cstlib = ['-Wl,-Bstatic']
LINK_CC = ['arm-none-eabi-gcc']
MESSAGE_KEYS = {}
MESSAGE_KEYS_HEADER = '/Users/gwena/dev/pebble/ActivityLogV2/build/include/message_keys.auto.h'
NODE_PATH = '/Users/gwena/Library/Application Support/Pebble SDK/SDKs/current/node_modules'
PEBBLE_SDK_COMMON = '/Users/gwena/Library/Application Support/Pebble SDK/SDKs/current/sdk-core/pebble/common'
PEBBLE_SDK_PLATFORM = '/Users/gwena/Library/Application Support/Pebble SDK/SDKs/current/sdk-core/pebble/aplite'
PEBBLE_SDK_ROOT = '/Users/gwena/Library/Application Support/Pebble SDK/SDKs/current/sdk-core/pebble'
PLATFORM = {'TAGS': ['aplite', 'bw', 'rect', 'compass', '144w', '168h'], 'MAX_FONT_GLYPH_SIZE': 256, 'ADDITIONAL_TEXT_LINES_FOR_PEBBLE_H': [], 'MAX_APP_BINARY_SIZE': 65536, 'MAX_RESOURCES_SIZE': 524288, 'MAX_APP_MEMORY_SIZE': 24576, 'MAX_WORKER_MEMORY_SIZE': 10240, 'NAME': 'aplite', 'BUNDLE_BIN_DIR': 'aplite', 'BUILD_DIR': 'aplite', 'MAX_RESOURCES_SIZE_APPSTORE_2_X': 98304, 'MAX_RESOURCES_SIZE_APPSTORE': 131072, 'DEFINES': ['PBL_PLATFORM_APLITE', 'PBL_BW', 'PBL_RECT', 'PBL_COMPASS', 'PBL_DISPLAY_WIDTH=144', 'PBL_DISPLAY_HEIGHT=168']}
PLATFORM_NAME = 'aplite'
PREFIX = '/usr/local'
PROJECT_INFO = {u'sdkVersion': u'3', u'uuid': u'133215f0-cf20-4c05-997b-3c9be5a64e5b', u'appKeys': {}, u'companyName': u'Gwen@56', 'messageKeys': {}, u'targetPlatforms': [u'aplite', u'basalt', u'chalk', u'diorite'], u'capabilities': [u'configurable'], u'versionLabel': u'0.4', u'longName': u'ActivityLog', u'versionCode': 1, u'shortName': u'ActivityLog', u'watchapp': {u'watchface': False}, u'resources': {u'media': [{u'menuIcon': True, u'type': u'bitmap', u'name': u'IMAGE_MENU_ICON', u'file': u'images/menu_icon.png'}, {u'type': u'bitmap', u'name': u'IMAGE_LOGO_SPLASH', u'file': u'images/logo_splash.png'}, {u'type': u'bitmap', u'name': u'IMAGE_TILE_SPLASH', u'file': u'images/tile_splash.png'}, {u'type': u'font', u'name': u'MONO_FONT_14', u'file': u'fonts/UbuntuMono-Regular.ttf'}]}}
REQUESTED_PLATFORMS = [u'aplite', u'basalt', u'chalk', u'diorite']
RESOURCES_JSON = [{u'menuIcon': True, u'type': u'bitmap', u'name': u'IMAGE_MENU_ICON', u'file': u'images/menu_icon.png'}, {u'type': u'bitmap', u'name': u'IMAGE_LOGO_SPLASH', u'file': u'images/logo_splash.png'}, {u'type': u'bitmap', u'name': u'IMAGE_TILE_SPLASH', u'file': u'images/tile_splash.png'}, {u'type': u'font', u'name': u'MONO_FONT_14', u'file': u'fonts/UbuntuMono-Regular.ttf'}]
RPATH_ST = '-Wl,-rpath,%s'
SANDBOX = False
SDK_VERSION_MAJOR = 5
SDK_VERSION_MINOR = 78
SHLIB_MARKER = None
SIZE = 'arm-none-eabi-size'
SONAME_ST = '-Wl,-h,%s'
STLIBPATH_ST = '-L%s'
STLIB_MARKER = None
STLIB_ST = '-l%s'
SUPPORTED_PLATFORMS = ['aplite', 'basalt', 'chalk', 'diorite', 'emery']
TARGET_PLATFORMS = ['diorite', 'chalk', 'basalt', 'aplite']
TIMESTAMP = 1485342651
USE_GROUPS = True
VERBOSE = 0
WEBPACK = '/Users/gwena/Library/Application Support/Pebble SDK/SDKs/current/node_modules/.bin/webpack'
cprogram_PATTERN = '%s'
cshlib_PATTERN = 'lib%s.so'
cstlib_PATTERN = 'lib%s.a'
macbundle_PATTERN = '%s.bundle'
| {
"repo_name": "gwena56/ActivLog",
"path": "build/c4che/aplite_cache.py",
"copies": "1",
"size": "4374",
"license": "mit",
"hash": -4849470611006938000,
"line_mean": 63.3235294118,
"line_max": 788,
"alpha_frac": 0.6700960219,
"autogenerated": false,
"ratio": 2.4408482142857144,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.36109442361857147,
"avg_score": null,
"num_lines": null
} |
AR = 'arm-none-eabi-ar'
ARFLAGS = 'rcs'
AS = 'arm-none-eabi-gcc'
BINDIR = '/usr/local/bin'
BLOCK_MESSAGE_KEYS = []
BUILD_DIR = 'basalt'
BUILD_TYPE = 'app'
BUNDLE_BIN_DIR = 'basalt'
BUNDLE_NAME = 'ActivityLogV2.pbw'
CC = ['arm-none-eabi-gcc']
CCLNK_SRC_F = []
CCLNK_TGT_F = ['-o']
CC_NAME = 'gcc'
CC_SRC_F = []
CC_TGT_F = ['-c', '-o']
CC_VERSION = ('4', '7', '2')
CFLAGS = ['-mcpu=cortex-m3', '-mthumb', '-ffunction-sections', '-fdata-sections', '-g', '-fPIE', '-Os', '-D_TIME_H_', '-Wall', '-Wextra', '-Werror', '-Wno-unused-parameter', '-Wno-error=unused-function', '-Wno-error=unused-variable', '-std=c11', '-fms-extensions', '-Wno-address', '-Wno-type-limits', '-Wno-missing-field-initializers']
CFLAGS_MACBUNDLE = ['-fPIC']
CFLAGS_cshlib = ['-fPIC']
CPPPATH_ST = '-I%s'
DEFINES = ['RELEASE', 'PBL_PLATFORM_BASALT', 'PBL_COLOR', 'PBL_RECT', 'PBL_MICROPHONE', 'PBL_SMARTSTRAP', 'PBL_HEALTH', 'PBL_COMPASS', 'PBL_SMARTSTRAP_POWER', 'PBL_DISPLAY_WIDTH=144', 'PBL_DISPLAY_HEIGHT=168', 'PBL_SDK_3']
DEFINES_ST = '-D%s'
DEST_BINFMT = 'elf'
DEST_CPU = 'arm'
DEST_OS = 'darwin'
INCLUDES = ['basalt']
LD = 'arm-none-eabi-ld'
LIBDIR = '/usr/local/lib'
LIBPATH_ST = '-L%s'
LIB_DIR = 'node_modules'
LIB_ST = '-l%s'
LINKFLAGS = ['-mcpu=cortex-m3', '-mthumb', '-Wl,--gc-sections', '-Wl,--warn-common', '-fPIE', '-Os']
LINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup']
LINKFLAGS_cshlib = ['-shared']
LINKFLAGS_cstlib = ['-Wl,-Bstatic']
LINK_CC = ['arm-none-eabi-gcc']
MESSAGE_KEYS = {}
MESSAGE_KEYS_HEADER = '/Users/gwena/dev/pebble/ActivityLogV2/build/include/message_keys.auto.h'
NODE_PATH = '/Users/gwena/Library/Application Support/Pebble SDK/SDKs/current/node_modules'
PEBBLE_SDK_COMMON = '/Users/gwena/Library/Application Support/Pebble SDK/SDKs/current/sdk-core/pebble/common'
PEBBLE_SDK_PLATFORM = '/Users/gwena/Library/Application Support/Pebble SDK/SDKs/current/sdk-core/pebble/basalt'
PEBBLE_SDK_ROOT = '/Users/gwena/Library/Application Support/Pebble SDK/SDKs/current/sdk-core/pebble'
PLATFORM = {'TAGS': ['basalt', 'color', 'rect', 'mic', 'strap', 'strappower', 'compass', 'health', '144w', '168h'], 'MAX_FONT_GLYPH_SIZE': 256, 'ADDITIONAL_TEXT_LINES_FOR_PEBBLE_H': [], 'MAX_APP_BINARY_SIZE': 65536, 'MAX_RESOURCES_SIZE': 1048576, 'MAX_APP_MEMORY_SIZE': 65536, 'MAX_WORKER_MEMORY_SIZE': 10240, 'NAME': 'basalt', 'BUNDLE_BIN_DIR': 'basalt', 'BUILD_DIR': 'basalt', 'MAX_RESOURCES_SIZE_APPSTORE': 262144, 'DEFINES': ['PBL_PLATFORM_BASALT', 'PBL_COLOR', 'PBL_RECT', 'PBL_MICROPHONE', 'PBL_SMARTSTRAP', 'PBL_HEALTH', 'PBL_COMPASS', 'PBL_SMARTSTRAP_POWER', 'PBL_DISPLAY_WIDTH=144', 'PBL_DISPLAY_HEIGHT=168']}
PLATFORM_NAME = 'basalt'
PREFIX = '/usr/local'
PROJECT_INFO = {u'sdkVersion': u'3', u'uuid': u'133215f0-cf20-4c05-997b-3c9be5a64e5b', u'appKeys': {}, u'companyName': u'Gwen@56', 'messageKeys': {}, u'targetPlatforms': [u'aplite', u'basalt', u'chalk', u'diorite'], u'capabilities': [u'configurable'], u'versionLabel': u'0.4', u'longName': u'ActivityLog', u'versionCode': 1, u'shortName': u'ActivityLog', u'watchapp': {u'watchface': False}, u'resources': {u'media': [{u'menuIcon': True, u'type': u'bitmap', u'name': u'IMAGE_MENU_ICON', u'file': u'images/menu_icon.png'}, {u'type': u'bitmap', u'name': u'IMAGE_LOGO_SPLASH', u'file': u'images/logo_splash.png'}, {u'type': u'bitmap', u'name': u'IMAGE_TILE_SPLASH', u'file': u'images/tile_splash.png'}, {u'type': u'font', u'name': u'MONO_FONT_14', u'file': u'fonts/UbuntuMono-Regular.ttf'}]}}
REQUESTED_PLATFORMS = [u'aplite', u'basalt', u'chalk', u'diorite']
RESOURCES_JSON = [{u'menuIcon': True, u'type': u'bitmap', u'name': u'IMAGE_MENU_ICON', u'file': u'images/menu_icon.png'}, {u'type': u'bitmap', u'name': u'IMAGE_LOGO_SPLASH', u'file': u'images/logo_splash.png'}, {u'type': u'bitmap', u'name': u'IMAGE_TILE_SPLASH', u'file': u'images/tile_splash.png'}, {u'type': u'font', u'name': u'MONO_FONT_14', u'file': u'fonts/UbuntuMono-Regular.ttf'}]
RPATH_ST = '-Wl,-rpath,%s'
SANDBOX = False
SDK_VERSION_MAJOR = 5
SDK_VERSION_MINOR = 86
SHLIB_MARKER = None
SIZE = 'arm-none-eabi-size'
SONAME_ST = '-Wl,-h,%s'
STLIBPATH_ST = '-L%s'
STLIB_MARKER = None
STLIB_ST = '-l%s'
SUPPORTED_PLATFORMS = ['aplite', 'basalt', 'chalk', 'diorite', 'emery']
TARGET_PLATFORMS = ['diorite', 'chalk', 'basalt', 'aplite']
TIMESTAMP = 1485342651
USE_GROUPS = True
VERBOSE = 0
WEBPACK = '/Users/gwena/Library/Application Support/Pebble SDK/SDKs/current/node_modules/.bin/webpack'
cprogram_PATTERN = '%s'
cshlib_PATTERN = 'lib%s.so'
cstlib_PATTERN = 'lib%s.a'
macbundle_PATTERN = '%s.bundle'
| {
"repo_name": "gwena56/ActivLog",
"path": "build/c4che/basalt_cache.py",
"copies": "1",
"size": "4530",
"license": "mit",
"hash": 7991887308894448000,
"line_mean": 65.6176470588,
"line_max": 788,
"alpha_frac": 0.6710816777,
"autogenerated": false,
"ratio": 2.44336569579288,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.8608437286620054,
"avg_score": 0.0012020173745651477,
"num_lines": 68
} |
AR = 'arm-none-eabi-ar'
ARFLAGS = 'rcs'
AS = 'arm-none-eabi-gcc'
BINDIR = '/usr/local/bin'
BUILD_DIR = 'basalt'
CC = ['arm-none-eabi-gcc']
CCLNK_SRC_F = []
CCLNK_TGT_F = ['-o']
CC_NAME = 'gcc'
CC_SRC_F = []
CC_TGT_F = ['-c', '-o']
CC_VERSION = ('4', '7', '2')
CFLAGS = ['-mcpu=cortex-m3', '-mthumb', '-ffunction-sections', '-fdata-sections', '-g', '-Os', '-D_TIME_H_', '-Wall', '-Wextra', '-Werror', '-Wno-unused-parameter', '-Wno-error=unused-function', '-Wno-error=unused-variable', '-std=c11', '-fms-extensions', '-Wno-address', '-Wno-type-limits', '-Wno-missing-field-initializers']
CFLAGS_MACBUNDLE = ['-fPIC']
CFLAGS_cshlib = ['-fPIC']
CPPPATH_ST = '-I%s'
DEFINES = ['RELEASE', 'PBL_PLATFORM_BASALT', 'PBL_COLOR', 'PBL_SDK_3']
DEFINES_ST = '-D%s'
DEST_BINFMT = 'elf'
DEST_CPU = 'arm'
DEST_OS = 'darwin'
INCLUDES = ['basalt']
LD = 'arm-none-eabi-ld'
LIBDIR = '/usr/local/lib'
LIBPATH_ST = '-L%s'
LIB_ST = '-l%s'
LINKFLAGS = ['-mcpu=cortex-m3', '-mthumb', '-Wl,--gc-sections', '-Wl,--warn-common', '-Os']
LINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup']
LINKFLAGS_cshlib = ['-shared']
LINKFLAGS_cstlib = ['-Wl,-Bstatic']
LINK_CC = ['arm-none-eabi-gcc']
PBW_BIN_DIR = 'basalt'
PEBBLE_SDK = '/usr/local/Cellar/pebble-sdk/3.4/Pebble/basalt'
PEBBLE_SDK_COMMON = '/usr/local/Cellar/pebble-sdk/3.4/Pebble/common'
PLATFORM = {'PBW_BIN_DIR': 'basalt', 'TAGS': ['basalt', 'color'], 'ADDITIONAL_TEXT_LINES_FOR_PEBBLE_H': [], 'MAX_APP_BINARY_SIZE': 65536, 'MAX_RESOURCES_SIZE': 1048576, 'MAX_APP_MEMORY_SIZE': 65536, 'MAX_WORKER_MEMORY_SIZE': 10240, 'NAME': 'basalt', 'BUILD_DIR': 'basalt', 'MAX_RESOURCES_SIZE_APPSTORE': 262144, 'DEFINES': ['PBL_PLATFORM_BASALT', 'PBL_COLOR']}
PLATFORM_NAME = 'basalt'
PREFIX = '/usr/local'
RPATH_ST = '-Wl,-rpath,%s'
SDK_VERSION_MAJOR = 5
SDK_VERSION_MINOR = 60
SHLIB_MARKER = None
SIZE = 'arm-none-eabi-size'
SONAME_ST = '-Wl,-h,%s'
STLIBPATH_ST = '-L%s'
STLIB_MARKER = None
STLIB_ST = '-l%s'
TARGET_PLATFORMS = [u'basalt', u'aplite']
cprogram_PATTERN = '%s'
cshlib_PATTERN = 'lib%s.so'
cstlib_PATTERN = 'lib%s.a'
macbundle_PATTERN = '%s.bundle'
| {
"repo_name": "chriswongtv/GrandWatch",
"path": "watch/build/c4che/basalt_cache.py",
"copies": "1",
"size": "2093",
"license": "mit",
"hash": -5787560339911913000,
"line_mean": 40.0392156863,
"line_max": 360,
"alpha_frac": 0.640707119,
"autogenerated": false,
"ratio": 2.3838268792710706,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.3524533998271071,
"avg_score": null,
"num_lines": null
} |
AR = 'arm-none-eabi-ar'
ARFLAGS = 'rcs'
AS = 'arm-none-eabi-gcc'
BINDIR = '/usr/local/bin'
CC = ['arm-none-eabi-gcc']
CCLNK_SRC_F = []
CCLNK_TGT_F = ['-o']
CC_NAME = 'gcc'
CC_SRC_F = []
CC_TGT_F = ['-c', '-o']
CC_VERSION = ('4', '7', '2')
CFLAGS = ['-std=c99', '-mcpu=cortex-m3', '-mthumb', '-ffunction-sections', '-fdata-sections', '-g', '-Os', '-Wall', '-Wextra', '-Werror', '-Wno-unused-parameter', '-Wno-error=unused-function', '-Wno-error=unused-variable']
CFLAGS_MACBUNDLE = ['-fPIC']
CFLAGS_cshlib = ['-fPIC']
CPPPATH_ST = '-I%s'
DEFINES = ['RELEASE']
DEFINES_ST = '-D%s'
DEST_BINFMT = 'elf'
DEST_CPU = 'arm'
DEST_OS = 'darwin'
LD = 'arm-none-eabi-ld'
LIBDIR = '/usr/local/lib'
LIBPATH_ST = '-L%s'
LIB_ST = '-l%s'
LINKFLAGS = ['-mcpu=cortex-m3', '-mthumb', '-Wl,--gc-sections', '-Wl,--warn-common', '-Os']
LINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup']
LINKFLAGS_cshlib = ['-shared']
LINKFLAGS_cstlib = ['-Wl,-Bstatic']
LINK_CC = ['arm-none-eabi-gcc']
PEBBLE_SDK = '/Users/admin/pebble-dev/PebbleSDK-2.0.2/Pebble'
PREFIX = '/usr/local'
RPATH_ST = '-Wl,-rpath,%s'
SHLIB_MARKER = None
SIZE = 'arm-none-eabi-size'
SONAME_ST = '-Wl,-h,%s'
STLIBPATH_ST = '-L%s'
STLIB_MARKER = None
STLIB_ST = '-l%s'
cprogram_PATTERN = '%s'
cshlib_PATTERN = 'lib%s.so'
cstlib_PATTERN = 'lib%s.a'
macbundle_PATTERN = '%s.bundle'
| {
"repo_name": "danigar/runrunpeter",
"path": "build/c4che/_cache.py",
"copies": "1",
"size": "1332",
"license": "apache-2.0",
"hash": -3683564518854522000,
"line_mean": 30.7142857143,
"line_max": 222,
"alpha_frac": 0.6178678679,
"autogenerated": false,
"ratio": 2.3286713286713288,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.8444711354886789,
"avg_score": 0.00036556833690777927,
"num_lines": 42
} |
"""A raster object for efficiently computing point in polygon."""
import numpy
class Raster(object):
"""A class for manipulating raster files."""
grid = None
nrows = None
ncols = None
cellsize = None
lonmin = None
latmin = None
def load_data_from_file(self, fname):
"""Load raster data from a file.
TODO: Find the format name.
"""
fid = open(fname, 'r')
self.ncols = int(fid.readline().split()[1])
self.nrows = int(fid.readline().split()[1])
self.lonmin = float(fid.readline().split()[1])
self.latmin = float(fid.readline().split()[1])
self.cellsize = float(fid.readline().split()[1])
grid = []
for Line in fid:
line = Line.rstrip().split()
grid.append([int(k) for k in line])
self.grid = numpy.array(grid)
def in_poly(self, lon, lat):
"""Lookup the raster value for the cell containing a lat, lon pair."""
j = int((lon - self.lonmin) / self.cellsize)
i = self.nrows - int((lat - self.latmin) / self.cellsize)
if (i >= 0) and (i < self.nrows) and (j >= 0) and (j < self.ncols):
return self.grid[i, j]
else:
return 0
def in_poly_vec(self, lon, lat):
"""Lookup the raster value for multiple lat lon pairs."""
j = ((lon - self.lonmin) / self.cellsize).astype(int)
i = self.nrows - ((lat - self.latmin) / self.cellsize).astype(int)
idx = numpy.intersect1d(
numpy.where(((i >= 0) & (i < self.nrows)))[0],
numpy.where(((j >= 0) & (j < self.ncols)))[0]
)
vals = numpy.zeros(len(lon))
vals[idx] = self.grid[i[idx], j[idx]]
return vals.astype(int)
def get_x(self, lon):
"""Return the x index of the raster correpsonding to a lon value."""
return int((lon - self.lonmin) / self.cellsize)
def get_y(self, lat):
"""Return the y index of the raster correpsonding to a lat value."""
return self.nrows - int((lat - self.latmin) / self.cellsize)
def unique_values(self):
"""Return a list of all the unique raster values."""
return numpy.unique(self.grid)
| {
"repo_name": "wherehouse/wherehouse-sdk-py",
"path": "wherehouse/centroid/models/raster.py",
"copies": "1",
"size": "2227",
"license": "mit",
"hash": 8156286964061685000,
"line_mean": 31.2753623188,
"line_max": 78,
"alpha_frac": 0.5590480467,
"autogenerated": false,
"ratio": 3.5575079872204474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4616556033920447,
"avg_score": null,
"num_lines": null
} |
"""A rather messy way of parsing commands."""
### This is better than what I made but I still probably want to redo it
from pag import words as pag_words
class Token:
T_VERB = 'Verb'
T_NOUN = 'Noun'
T_EXTRA = 'Extra'
T_DIRECTION = 'Direction'
def __init__(self, tvalue, ttype=T_VERB):
"""
tvalue : Token literal value.
ttype : Token type.
"""
self._ttype = ttype
self._tvalue = tvalue
def __str__(self):
return self._tvalue
def __repr__(self):
return "{0}<{1}>".format(self._ttype, self._tvalue)
def __eq__(self, other):
return other._ttype == self._ttype and other._tvalue == self._tvalue
class Preprocessor:
def __init__(self):
self._directions = pag_words.directions
self._extras = pag_words.extras
def supplement_words(self, words=None):
"""
"""
if words is not None:
if 'extras' in words:
self._extras = {**self._extras, **words['extras']}
if 'directions' in words:
self._directions = {**self._verbs, **words['directions']}
def prep(self, command):
"""
Pre-process a command.
Returns a sequence of string words
"""
# Normalise whitespaces
toreturn = command.lower().strip()
if len(toreturn) == 0:
return ""
word_seq = toreturn.split()
word_seq = [w for w in word_seq if len(w) > 0]
# See if command is only a direction
for i in self._directions:
if command.strip() == i:
# Return Verb, Noun
word_seq = ["go", i]
else:
for syn in self._directions[i]:
if command.strip() == syn:
word_seq = ["go", i]
# remove extra words
removing = [word for word in word_seq if word in self._extras]
for word in removing:
word_seq.remove(word)
return word_seq
class Parser:
def __init__(self):
pass
self._words = None
self._verbs = pag_words.verbs
self._nouns = pag_words.nouns
self._extras = pag_words.extras
self._directions = pag_words.directions
def supplement_words(self, words=None):
"""
"""
self._words = words
if words is not None:
if 'verbs' in words:
self._verbs = {**self._verbs, **words['verbs']}
if 'nouns' in words:
self._nouns = {**self._nouns, **words['nouns']}
if 'extras' in words:
self._extras = {**self._extras, **words['extras']}
if 'directions' in words:
self._directions = {**self._verbs, **words['directions']}
def eat_verb(self, word_seq):
"""
Try to consume a verb from a word sequence.
On success:
- Returns a new token of type T_VERB
- Consumed word removed from word_seq.
On failure:
- Returns None
- word_seq unchanged.
"""
if len(word_seq) == 0:
return None
word = word_seq[0]
for i in self._verbs:
if word.strip() == i:
word_seq.pop(0)
return Token(i)
else:
for syn in self._verbs[i]:
if (word.strip() == syn):
word_seq.pop(0)
return Token(i)
return None
def eat_noun(self, word_seq):
"""
Try to consume a noun from a word sequence.
On success:
- Returns a new token of type T_NOUN
- Consumed word removed from word_seq.
On failure:
- Returns None
- word_seq unchanged.
"""
if len(word_seq) == 0:
return None
# Attempt a greedy eat.
# I.e. attempt to eat 'toilet paper roll'
# even if we would succeed at 'toilet paper'
greedy_seq = self.merge_first_words(word_seq)
if len(greedy_seq) != len(word_seq):
greedy_res = self.eat_noun(greedy_seq)
if greedy_res is not None:
while len(greedy_seq) < len(word_seq):
word_seq.pop(0)
return greedy_res
word = word_seq[0]
for i in {**self._nouns, **self._directions}:
if word == i:
word_seq.pop(0)
return Token(i, Token.T_NOUN)
else:
for syn in {**self._nouns, **self._directions}[i]:
if word == syn:
word_seq.pop(0)
return Token(i, Token.T_NOUN)
def merge_first_words(self, word_seq):
"""
Merge first two words in a word sequence.
Needed for multi-word words, i.e. 'look at', 'toilet paper'
"""
if len(word_seq) > 1:
return [word_seq[0] + " " + word_seq[1]] + word_seq[2:]
return word_seq[:]
def parse(self, command):
prep = Preprocessor()
prep.supplement_words(self._words)
word_seq = prep.prep(command)
parsed_command = []
# command must start with a verb
verb = self.eat_verb(word_seq)
if verb is None and len(word_seq) > 1:
# Try again, but with multi-word commands. I.e. 'pick up'
word_seq = self.merge_first_words(word_seq)
verb = self.eat_verb(word_seq)
if verb is not None:
parsed_command.append(verb)
else:
print('What?')
return
# Next is a noun. Maybe.
if len(word_seq) > 0:
noun_result = self.eat_noun(word_seq)
if noun_result is not None:
parsed_command.append(noun_result)
else:
rest_of_command = " ".join(word_seq)
print(f'I don\'t understand the noun "{rest_of_command}".')
return
if len(word_seq) > 0:
rest_of_command = " ".join(word_seq)
print(f'I don\'t understand the extra word "{rest_of_command}".')
return
return parsed_command
def parse_command(command, words=None):
parser = Parser()
parser.supplement_words(words)
tokens = parser.parse(command)
if tokens is None:
return None
else:
return [t._tvalue for t in tokens]
| {
"repo_name": "disorientedperson/python-adventure-game",
"path": "pag/parser.py",
"copies": "2",
"size": "6490",
"license": "mit",
"hash": -8453668431109277000,
"line_mean": 25.4897959184,
"line_max": 77,
"alpha_frac": 0.5018489985,
"autogenerated": false,
"ratio": 3.8746268656716416,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5376475864171641,
"avg_score": null,
"num_lines": null
} |
# A Ravello SDK example for enforcing a short auto-stop for all published applications
# in the account
#
# To use, edit the relevant variables (or extract them to command line param),
# and run enforce_autostop()
#
# Copyright 2011-2015 Ravello Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from ravello_sdk import *
import datetime
MAX_ALLOWED_EXPIRATION_PERIOD_IN_SEC = 60*60*2 # 2 hours
USERNAME = 'wile.e.coyote@acme.com'
PASSWORD = 'PA$$W0RD'
def enforce_autostop():
client = RavelloClient()
client.login(USERNAME, PASSWORD)
apps = client.get_applications()
for app in apps:
if app['published'] == False:
continue
deployment = app['deployment']
if deployment['totalActiveVms'] == 0:
continue
if not app.has_key('nextStopTime'):
# no expiration set for this application, set it
set_expiration(client, app)
else:
expiration_time = datetime.datetime.utcfromtimestamp(app['nextStopTime'] / 1e3)
# if expiration_time (utc) is too long into the future, set_expiration correctly
if should_expire_app(expiration_time, app):
set_expiration(client, app)
def should_expire_app(current_expiration_time_utc, app):
now = datetime.datetime.utcnow()
delta = current_expiration_time_utc - now
if delta.total_seconds() > MAX_ALLOWED_EXPIRATION_PERIOD_IN_SEC:
return True
return False
def set_expiration(client, app):
print "setting expiration for ", app['name']
client.set_application_expiration(app, {'expirationFromNowSeconds': MAX_ALLOWED_EXPIRATION_PERIOD_IN_SEC}) | {
"repo_name": "ryran/python-sdk",
"path": "examples/enforce_autostop.py",
"copies": "4",
"size": "2174",
"license": "apache-2.0",
"hash": -7904376359024440000,
"line_mean": 37.1578947368,
"line_max": 110,
"alpha_frac": 0.6931922723,
"autogenerated": false,
"ratio": 3.834215167548501,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.030736230808785226,
"num_lines": 57
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.