text stringlengths 0 1.05M | meta dict |
|---|---|
"""API for creating a contract and configuring the mock service."""
from __future__ import unicode_literals
import os
import platform
from subprocess import Popen
import psutil
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3 import Retry
from .broker import Broker
from .constants import MOCK_SERVICE_PATH
from .matchers import from_term
class Pact(Broker):
"""
Represents a contract between a consumer and provider.
Provides Python context handlers to configure the Pact mock service to
perform tests on a Python consumer. For example:
>>> from pact import Consumer, Provider
>>> pact = Consumer('consumer').has_pact_with(Provider('provider'))
>>> (pact.given('the echo service is available')
... .upon_receiving('a request is made to the echo service')
... .with_request('get', '/echo', query={'text': 'Hello!'})
... .will_respond_with(200, body='Hello!'))
>>> with pact:
... requests.get(pact.uri + '/echo?text=Hello!')
The GET request is made to the mock service, which will verify that it
was a GET to /echo with a query string with a key named `text` and its
value is `Hello!`. If the request does not match an error is raised, if it
does match the defined interaction, it will respond with the text `Hello!`.
"""
HEADERS = {'X-Pact-Mock-Service': 'true'}
MANDATORY_FIELDS = {'response', 'description', 'request'}
def __init__(
self,
consumer,
provider,
host_name='localhost',
port=1234,
log_dir=None,
ssl=False,
sslcert=None,
sslkey=None,
cors=False,
publish_to_broker=False,
broker_base_url=None,
broker_username=None,
broker_password=None,
broker_token=None,
pact_dir=None,
specification_version='2.0.0',
file_write_mode='overwrite',
):
"""
Create a Pact instance.
:param consumer: The consumer for this contract.
:type consumer: pact.Consumer
:param provider: The provider for this contract.
:type provider: pact.Provider
:param host_name: The host name where the mock service is running.
:type host_name: str
:param port: The port number where the mock service is running.
:type port: int
:param log_dir: The directory where logs should be written. Defaults to
the current directory.
:type log_dir: str
:param ssl: Flag to control the use of a self-signed SSL cert to run
the server over HTTPS , defaults to False.
:type ssl: bool
:param sslcert: Path to a custom self-signed SSL cert file, 'ssl'
option must be set to True to use this option. Defaults to None.
:type sslcert: str
:param sslkey: Path to a custom key and self-signed SSL cert key file,
'ssl' option must be set to True to use this option.
Defaults to None.
:type sslkey: str
:param cors: Allow CORS OPTION requests to be accepted,
defaults to False.
:type cors: bool
:param publish_to_broker: Flag to control automatic publishing of
pacts to a pact broker. Defaults to False.
:type publish_to_broker: bool
:param broker_base_url: URL of the pact broker that pacts will be
published to. Can also be supplied through the PACT_BROKER_BASE_URL
environment variable. Defaults to None.
:type broker_base_url: str
:param broker_username: Username to use when connecting to the pact
broker if authentication is required. Can also be supplied through
the PACT_BROKER_USERNAME environment variable. Defaults to None.
:type broker_username: str
:param broker_password: Password to use when connecting to the pact
broker if authentication is required. Strongly recommend supplying
this value through the PACT_BROKER_PASSWORD environment variable
instead. Defaults to None.
:type broker_password: str
:param broker_token: Authentication token to use when connecting to
the pact broker. Strongly recommend supplying this value through
the PACT_BROKER_TOKEN environment variable instead.
Defaults to None.
:type broker_token: str
:param pact_dir: Directory where the resulting pact files will be
written. Defaults to the current directory.
:type pact_dir: str
:param specification_version: The Pact Specification version to use, defaults to
'2.0.0'.
:type version: str of the consumer version.
:param file_write_mode: `overwrite` or `merge`. Use `merge` when
running multiple mock service instances in parallel for the same
consumer/provider pair. Ensure the pact file is deleted before
running tests when using this option so that interactions deleted
from the code are not maintained in the file. Defaults to
`overwrite`.
:type file_write_mode: str
"""
super().__init__(
broker_base_url, broker_username, broker_password, broker_token
)
scheme = 'https' if ssl else 'http'
self.uri = '{scheme}://{host_name}:{port}'.format(
host_name=host_name, port=port, scheme=scheme)
self.consumer = consumer
self.cors = cors
self.file_write_mode = file_write_mode
self.host_name = host_name
self.log_dir = log_dir or os.getcwd()
self.pact_dir = pact_dir or os.getcwd()
self.port = port
self.provider = provider
self.publish_to_broker = publish_to_broker
self.ssl = ssl
self.sslcert = sslcert
self.sslkey = sslkey
self.specification_version = specification_version
self._interactions = []
self._process = None
def given(self, provider_state):
"""
Define the provider state for this pact.
When the provider verifies this contract, they will use this field to
setup pre-defined data that will satisfy the response expectations.
:param provider_state: The short sentence that is unique to describe
the provider state for this contract.
:type provider_state: basestring
:rtype: Pact
"""
self._insert_interaction_if_complete()
self._interactions[0]['provider_state'] = provider_state
return self
def setup(self):
"""Configure the Mock Service to ready it for a test."""
try:
interactions_uri = f"{self.uri}/interactions"
resp = requests.delete(
interactions_uri, headers=self.HEADERS, verify=False
)
assert resp.status_code == 200, resp.text
resp = requests.put(
interactions_uri,
headers=self.HEADERS,
verify=False,
json={"interactions": self._interactions},
)
assert resp.status_code == 200, resp.text
except AssertionError:
raise
def start_service(self):
"""
Start the external Mock Service.
:raises RuntimeError: if there is a problem starting the mock service.
"""
command = [
MOCK_SERVICE_PATH,
"service",
f"--host={self.host_name}",
f"--port={format(self.port)}",
"--log", f"{self.log_dir}/pact-mock-service.log",
"--pact-dir", self.pact_dir,
"--pact-file-write-mode", self.file_write_mode,
f"--pact-specification-version={self.specification_version}",
"--consumer", self.consumer.name,
"--provider", self.provider.name,
]
if self.ssl:
command.append('--ssl')
if self.sslcert:
command.extend(['--sslcert', self.sslcert])
if self.sslkey:
command.extend(['--sslkey', self.sslkey])
self._process = Popen(command)
self._wait_for_server_start()
def stop_service(self):
"""Stop the external Mock Service."""
is_windows = 'windows' in platform.platform().lower()
if is_windows:
# Send the signal to ruby.exe, not the *.bat process
p = psutil.Process(self._process.pid)
for child in p.children(recursive=True):
child.terminate()
p.wait()
if psutil.pid_exists(self._process.pid):
raise RuntimeError(
'There was an error when stopping the Pact mock service.')
else:
self._process.terminate()
self._process.communicate()
if self._process.returncode != 0:
raise RuntimeError(
'There was an error when stopping the Pact mock service.'
)
if self.publish_to_broker:
self.publish(
self.consumer.name,
self.consumer.version,
tag_with_git_branch=self.consumer.tag_with_git_branch,
consumer_tags=self.consumer.tags,
pact_dir=self.pact_dir
)
def upon_receiving(self, scenario):
"""
Define the name of this contract.
:param scenario: A unique name for this contract.
:type scenario: basestring
:rtype: Pact
"""
self._insert_interaction_if_complete()
self._interactions[0]['description'] = scenario
return self
def verify(self):
"""
Have the mock service verify all interactions occurred.
Calls the mock service to verify that all interactions occurred as
expected, and has it write out the contracts to disk.
:raises AssertionError: When not all interactions are found.
"""
self._interactions = []
resp = requests.get(
self.uri + "/interactions/verification", headers=self.HEADERS, verify=False
)
assert resp.status_code == 200, resp.text
resp = requests.post(self.uri + "/pact", headers=self.HEADERS, verify=False)
assert resp.status_code == 200, resp.text
def with_request(self, method, path, body=None, headers=None, query=None):
"""
Define the request that the client is expected to perform.
:param method: The HTTP method.
:type method: str
:param path: The path portion of the URI the client will access.
:type path: str, Matcher
:param body: The request body, can be a string or an object that will
serialize to JSON, like list or dict, defaults to None.
:type body: list, dict or None
:param headers: The headers the client is expected to include on with
this request. Defaults to None.
:type headers: dict or None
:param query: The query options the client is expected to send. Can be
a dict of keys and values, or a URL encoded string.
Defaults to None.
:type query: dict, basestring, or None
:rtype: Pact
"""
self._insert_interaction_if_complete()
self._interactions[0]['request'] = Request(
method, path, body=body, headers=headers, query=query
).json()
return self
def will_respond_with(self, status, headers=None, body=None):
"""
Define the response the server is expected to create.
:param status: The HTTP status code.
:type status: int
:param headers: All required headers. Defaults to None.
:type headers: dict or None
:param body: The response body, or a collection of Matcher objects to
allow for pattern matching. Defaults to None.
:type body: Matcher, dict, list, basestring, or None
:rtype: Pact
"""
self._insert_interaction_if_complete()
self._interactions[0]['response'] = Response(
status, headers=headers, body=body
).json()
return self
def _insert_interaction_if_complete(self):
"""
Insert a new interaction if current interaction is complete.
An interaction is complete if it has all the mandatory fields.
If there are no interactions, a new interaction will be added.
:rtype: None
"""
if not self._interactions:
self._interactions.append({})
elif all(field in self._interactions[0] for field in self.MANDATORY_FIELDS):
self._interactions.insert(0, {})
def _wait_for_server_start(self):
"""
Wait for the mock service to be ready for requests.
:rtype: None
:raises RuntimeError: If there is a problem starting the mock service.
"""
s = requests.Session()
retries = Retry(total=9, backoff_factor=0.1)
http_mount = 'https://' if self.ssl else 'http://'
s.mount(http_mount, HTTPAdapter(max_retries=retries))
resp = s.get(self.uri, headers=self.HEADERS, verify=False)
if resp.status_code != 200:
self._process.terminate()
self._process.communicate()
raise RuntimeError(
'There was a problem starting the mock service: %s', resp.text
)
def __enter__(self):
"""
Enter a Python context.
Sets up the mock service to expect the client requests.
"""
self.setup()
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Exit a Python context.
Calls the mock service to verify that all interactions occurred as
expected, and has it write out the contracts to disk.
"""
if (exc_type, exc_val, exc_tb) != (None, None, None):
return
self.verify()
class FromTerms(object):
"""Base class for objects built from a collection of Matchers."""
def json(self):
"""Convert the object to a JSON version of the mock service."""
raise NotImplementedError
class Request(FromTerms):
"""Represents an HTTP request and supports Matchers on its properties."""
def __init__(self, method, path, body=None, headers=None, query=''):
"""
Create a new instance of Request.
:param method: The HTTP method that is expected.
:type method: str
:param path: The URI path that is expected on this request.
:type path: str, Matcher
:param body: The contents of the body of the expected request.
:type body: str, dict, list
:param headers: The headers of the expected request.
:type headers: dict
:param query: The URI query of the expected request.
:type query: str or dict
"""
self.method = method
self.path = from_term(path)
self.body = from_term(body)
self.headers = from_term(headers)
self.query = from_term(query)
def json(self):
"""Convert the Request to a JSON version for the mock service."""
request = {'method': self.method, 'path': self.path}
if self.headers:
request['headers'] = self.headers
if self.body is not None:
request['body'] = self.body
if self.query:
request['query'] = self.query
return request
class Response(FromTerms):
"""Represents an HTTP response and supports Matchers on its properties."""
def __init__(self, status, headers=None, body=None):
"""
Create a new Response.
:param status: The expected HTTP status of the response.
:type status: int
:param headers: The expected headers of the response.
:type headers: dict
:param body: The expected body of the response.
:type body: str, dict, or list
"""
self.status = status
self.body = from_term(body)
self.headers = from_term(headers)
def json(self):
"""Convert the Response to a JSON version for the mock service."""
response = {'status': self.status}
if self.body is not None:
response['body'] = self.body
if self.headers:
response['headers'] = self.headers
return response
| {
"repo_name": "pact-foundation/pact-python",
"path": "pact/pact.py",
"copies": "1",
"size": "16407",
"license": "mit",
"hash": -1332257370136846000,
"line_mean": 35.6227678571,
"line_max": 88,
"alpha_frac": 0.6024867435,
"autogenerated": false,
"ratio": 4.404563758389262,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00010296652496545094,
"num_lines": 448
} |
"""API for email verifications"""
from urllib.parse import quote_plus
from django.urls import reverse
from mail import api
from authentication.models import BlockedEmailRegex
VERIFICATION_TEMPLATE_NAME = "verification"
def send_verification_email(
strategy, backend, code, partial_token
): # pylint: disable=unused-argument
"""
Sends a verification email for python-social-auth
Args:
strategy (social_django.strategy.DjangoStrategy): the strategy used to authenticate
backend (social_core.backends.base.BaseAuth): the backend being used to authenticate
code (social_django.models.Code): the confirmation code used to confirm the email address
partial_token (str): token used to resume a halted pipeline
"""
url = "{}?verification_code={}&partial_token={}".format(
strategy.build_absolute_uri(reverse("register-confirm")),
quote_plus(code.code),
quote_plus(partial_token),
)
if not BlockedEmailRegex.objects.extra(
where=["%s ~ match"], params=[code.email]
).exists():
api.send_messages(
list(
api.messages_for_recipients(
[
(
code.email,
api.context_for_user(
extra_context={"confirmation_url": url}
),
)
],
VERIFICATION_TEMPLATE_NAME,
)
)
)
| {
"repo_name": "mitodl/open-discussions",
"path": "mail/verification_api.py",
"copies": "1",
"size": "1546",
"license": "bsd-3-clause",
"hash": 3537016259906707500,
"line_mean": 31.8936170213,
"line_max": 97,
"alpha_frac": 0.5692108668,
"autogenerated": false,
"ratio": 4.742331288343558,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006771560642997315,
"num_lines": 47
} |
"""API for email verifications"""
from urllib.parse import quote_plus
from django.urls import reverse
from mail.v2 import api
from mail.v2.constants import EMAIL_VERIFICATION, EMAIL_CHANGE_EMAIL
def send_verification_email(
strategy, backend, code, partial_token
): # pylint: disable=unused-argument
"""
Sends a verification email for python-social-auth
Args:
strategy (social_django.strategy.DjangoStrategy): the strategy used to authenticate
backend (social_core.backends.base.BaseAuth): the backend being used to authenticate
code (social_django.models.Code): the confirmation code used to confirm the email address
partial_token (str): token used to resume a halted pipeline
"""
url = "{}?verification_code={}&partial_token={}&backend=email".format(
strategy.build_absolute_uri(reverse("register-confirm")),
quote_plus(code.code),
quote_plus(partial_token),
)
api.send_message(
api.message_for_recipient(
code.email,
api.context_for_user(extra_context={"confirmation_url": url}),
EMAIL_VERIFICATION,
)
)
def send_verify_email_change_email(request, change_request):
"""
Sends a verification email for a user email change
Args:
request (django.http.Request): the http request we're sending this email for
change_request (ChangeEmailRequest): the change request to send the confirmation for
"""
url = "{}?verification_code={}".format(
request.build_absolute_uri(reverse("account-confirm-email-change")),
quote_plus(change_request.code),
)
api.send_messages(
list(
api.messages_for_recipients(
[
(
change_request.new_email,
api.context_for_user(extra_context={"confirmation_url": url}),
)
],
EMAIL_CHANGE_EMAIL,
)
)
)
| {
"repo_name": "mitodl/bootcamp-ecommerce",
"path": "mail/v2/verification_api.py",
"copies": "1",
"size": "2013",
"license": "bsd-3-clause",
"hash": 7293972439432969000,
"line_mean": 31.4677419355,
"line_max": 97,
"alpha_frac": 0.6204669647,
"autogenerated": false,
"ratio": 4.319742489270387,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010619029346781701,
"num_lines": 62
} |
'''API for FHEM homeautomation server, supporting telnet or HTTP/HTTPS connections with authentication and CSRF-token support.'''
import datetime
import json
import logging
import re
import socket
import errno
import ssl
import threading
import time
try:
# Python 3.x
from urllib.parse import quote
from urllib.parse import urlencode
from urllib.request import urlopen
from urllib.error import URLError
from urllib.request import HTTPSHandler
from urllib.request import HTTPPasswordMgrWithDefaultRealm
from urllib.request import HTTPBasicAuthHandler
from urllib.request import build_opener
from urllib.request import install_opener
except ImportError:
# Python 2.x
from urllib import urlencode
from urllib2 import quote
from urllib2 import urlopen
from urllib2 import URLError
from urllib2 import HTTPSHandler
from urllib2 import HTTPPasswordMgrWithDefaultRealm
from urllib2 import HTTPBasicAuthHandler
from urllib2 import build_opener
from urllib2 import install_opener
# needs to be in sync with setup.py and documentation (conf.py, branch gh-pages)
__version__ = '0.6.5'
# create logger with 'python_fhem'
# logger = logging.getLogger(__name__)
class Fhem:
'''Connects to FHEM via socket communication with optional SSL and password
support'''
def __init__(self, server, port=7072,
use_ssl=False, protocol="telnet", username="", password="", csrf=True,
cafile="", loglevel=1):
'''
Instantiate connector object.
:param server: address of FHEM server
:param port: telnet/http(s) port of server
:param use_ssl: boolean for SSL (TLS) [https as protocol sets use_ssl=True]
:param protocol: 'telnet', 'http' or 'https'
:param username: username for http(s) basicAuth validation
:param password: (global) telnet or http(s) password
:param csrf: (http(s)) use csrf token (FHEM 5.8 and newer), default True
:param cafile: path to public certificate of your root authority, if left empty, https protocol will ignore certificate checks.
:param loglevel: deprecated, will be removed. Please use standard python logging API with logger 'Fhem'.
'''
self.log = logging.getLogger("Fhem")
validprots = ['http', 'https', 'telnet']
self.server = server
self.port = port
self.ssl = use_ssl
self.csrf = csrf
self.csrftoken = ''
self.username = username
self.password = password
self.loglevel = loglevel
self.connection = False
self.cafile = cafile
self.nolog = False
self.bsock = None
self.sock = None
self.https_handler = None
# Set LogLevel
# self.set_loglevel(loglevel)
# Check if protocol is supported
if protocol in validprots:
self.protocol = protocol
else:
self.log.error("Invalid protocol: {}".format(protocol))
# Set authenticication values if#
# the protocol is http(s) or use_ssl is True
if protocol != "telnet":
tmp_protocol = "http"
if (protocol == "https") or (use_ssl is True):
self.ssl = True
tmp_protocol = "https"
self.baseurlauth = "{}://{}:{}/".format(tmp_protocol, server, port)
self.baseurltoken = "{}fhem".format(self.baseurlauth)
self.baseurl = "{}fhem?XHR=1&cmd=".format(self.baseurlauth)
self._install_opener()
def connect(self):
'''create socket connection to server (telnet protocol only)'''
if self.protocol == 'telnet':
try:
self.log.debug("Creating socket...")
if self.ssl:
self.bsock = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
self.sock = ssl.wrap_socket(self.bsock)
self.log.info("Connecting to {}:{} with SSL (TLS)".format(
self.server, self.port))
else:
self.sock = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
self.log.info("Connecting to {}:{} without SSL".format(
self.server, self.port))
self.sock.connect((self.server, self.port))
self.connection = True
self.log.info("Connected to {}:{}".format(
self.server, self.port))
except socket.error:
self.connection = False
self.log.error("Failed to connect to {}:{}".format(
self.server, self.port))
return
if self.password != "":
# time.sleep(1.0)
# self.send_cmd("\n")
# prmpt = self._recv_nonblocking(4.0)
prmpt = self.sock.recv(32000)
self.log.debug("auth-prompt: {}".format(prmpt))
self.nolog = True
self.send_cmd(self.password)
self.nolog = False
time.sleep(0.1)
try:
po1 = self.sock.recv(32000)
self.log.debug("auth-repl1: {}".format(po1))
except socket.error:
self.log.error("Failed to recv auth reply")
self.connection = False
return
self.log.info("Auth password sent to {}".format(self.server))
else: # http(s)
if self.csrf:
dat = self.send("")
if dat is not None:
dat = dat.decode("UTF-8")
stp = dat.find("csrf_")
if stp != -1:
token = dat[stp:]
token = token[:token.find("'")]
self.csrftoken = token
self.connection = True
else:
self.log.error(
"CSRF token requested for server that doesn't know CSRF")
else:
self.log.error(
"No valid answer on send when expecting csrf.")
else:
self.connection = True
def connected(self):
'''Returns True if socket/http(s) session is connected to server.'''
return self.connection
def set_loglevel(self, level):
'''Set logging level. [Deprecated, will be removed, use python logging.setLevel]
:param level: 0: critical, 1: errors, 2: info, 3: debug
'''
self.log.warning(
"Deprecation: please set logging levels using python's standard logging for logger 'Fhem'")
if level == 0:
self.log.setLevel(logging.CRITICAL)
elif level == 1:
self.log.setLevel(logging.ERROR)
elif level == 2:
self.log.setLevel(logging.INFO)
elif level == 3:
self.log.setLevel(logging.DEBUG)
def close(self):
'''Closes socket connection. (telnet only)'''
if self.protocol == 'telnet':
if self.connected():
time.sleep(0.2)
self.sock.close()
self.connection = False
self.log.info("Disconnected from fhem-server")
else:
self.log.error("Cannot disconnect, not connected")
else:
self.connection = False
def _install_opener(self):
self.opener = None
if self.username != "":
self.password_mgr = HTTPPasswordMgrWithDefaultRealm()
self.password_mgr.add_password(None, self.baseurlauth,
self.username, self.password)
self.auth_handler = HTTPBasicAuthHandler(self.password_mgr)
if self.ssl is True:
if self.cafile == "":
self.context = ssl.create_default_context()
self.context.check_hostname = False
self.context.verify_mode = ssl.CERT_NONE
else:
self.context = ssl.create_default_context()
self.context.load_verify_locations(cafile=self.cafile)
self.context.verify_mode = ssl.CERT_REQUIRED
self.https_handler = HTTPSHandler(context=self.context)
if self.username != "":
self.opener = build_opener(self.https_handler,
self.auth_handler)
else:
self.opener = build_opener(self.https_handler)
else:
if self.username != "":
self.opener = build_opener(self.auth_handler)
if self.opener is not None:
self.log.debug("Setting up opener on: {}".format(self.baseurlauth))
install_opener(self.opener)
def send(self, buf, timeout=10):
'''Sends a buffer to server
:param buf: binary buffer'''
if len(buf) > 0:
if not self.connected():
self.log.debug("Not connected, trying to connect...")
self.connect()
if self.protocol == 'telnet':
if self.connected():
self.log.debug("Connected, sending...")
try:
self.sock.sendall(buf)
self.log.info("Sent msg, len={}".format(len(buf)))
return None
except OSError as err:
self.log.error(
"Failed to send msg, len={}. Exception raised: {}".format(len(buf), err))
self.connection = None
return None
else:
self.log.error(
"Failed to send msg, len={}. Not connected.".format(len(buf)))
return None
else: # HTTP(S)
paramdata = None
if self.csrf and len(buf) > 0:
if len(self.csrftoken) == 0:
self.log.error("CSRF token not available!")
self.connection = False
else:
datas = {'fwcsrf': self.csrftoken}
paramdata = urlencode(datas).encode('UTF-8')
try:
self.log.debug("Cmd: {}".format(buf))
cmd = quote(buf)
self.log.debug("Cmd-enc: {}".format(cmd))
if len(cmd) > 0:
ccmd = self.baseurl + cmd
else:
ccmd = self.baseurltoken
self.log.info("Request: {}".format(ccmd))
if ccmd.lower().startswith('http'):
ans = urlopen(ccmd, paramdata, timeout=timeout)
else:
self.log.error(
"Invalid URL {}, Failed to send msg, len={}, {}".format(ccmd, len(buf), err))
return None
data = ans.read()
return data
except URLError as err:
self.connection = False
self.log.error(
"Failed to send msg, len={}, {}".format(len(buf), err))
return None
except socket.timeout as err:
# Python 2.7 fix
self.log.error(
"Failed to send msg, len={}, {}".format(len(buf), err))
return None
def send_cmd(self, msg, timeout=10.0):
'''Sends a command to server.
:param msg: string with FHEM command, e.g. 'set lamp on'
:param timeout: timeout on send (sec).
'''
if not self.connected():
self.connect()
if not self.nolog:
self.log.debug("Sending: {}".format(msg))
if self.protocol == 'telnet':
if self.connection:
msg = "{}\n".format(msg)
cmd = msg.encode('utf-8')
return self.send(cmd)
else:
self.log.error(
"Failed to send msg, len={}. Not connected.".format(len(msg)))
return None
else:
return self.send(msg, timeout=timeout)
def _recv_nonblocking(self, timeout=0.1):
if not self.connected():
self.connect()
data = b''
if self.connection:
self.sock.setblocking(False)
data = b''
try:
data = self.sock.recv(32000)
except socket.error as err:
# Resource temporarily unavailable, operation did not complete are expected
if err.errno != errno.EAGAIN and err.errno!= errno.ENOENT:
self.log.debug(
"Exception in non-blocking (1). Error: {}".format(err))
time.sleep(timeout)
wok = 1
while len(data) > 0 and wok > 0:
time.sleep(timeout)
datai = b''
try:
datai = self.sock.recv(32000)
if len(datai) == 0:
wok = 0
else:
data += datai
except socket.error as err:
# Resource temporarily unavailable, operation did not complete are expected
if err.errno != errno.EAGAIN and err.errno!= errno.ENOENT:
self.log.debug(
"Exception in non-blocking (2). Error: {}".format(err))
wok = 0
self.sock.setblocking(True)
return data
def send_recv_cmd(self, msg, timeout=0.1, blocking=False):
'''
Sends a command to the server and waits for an immediate reply.
:param msg: FHEM command (e.g. 'set lamp on')
:param timeout: waiting time for reply
:param blocking: (telnet only) on True: use blocking socket communication (bool)
'''
data = b''
if not self.connected():
self.connect()
if self.protocol == 'telnet':
if self.connection:
self.send_cmd(msg)
time.sleep(timeout)
data = []
if blocking is True:
try:
# This causes failures if reply is larger!
data = self.sock.recv(64000)
except socket.error:
self.log.error("Failed to recv msg. {}".format(data))
return {}
else:
data = self._recv_nonblocking(timeout)
self.sock.setblocking(True)
else:
self.log.error(
"Failed to send msg, len={}. Not connected.".format(len(msg)))
else:
data = self.send_cmd(msg)
if data is None:
return None
if len(data) == 0:
return {}
try:
sdata = data.decode('utf-8')
jdata = json.loads(sdata)
except Exception as err:
self.log.error(
"Failed to decode json, exception raised. {} {}".format(data, err))
return {}
if len(jdata[u'Results']) == 0:
self.log.error("Query had no result.")
return {}
else:
self.log.info("JSON answer received.")
return jdata
def get_dev_state(self, dev, timeout=0.1):
self.log.warning(
"Deprecation: use get_device('device') instead of get_dev_state")
return self.get_device(dev, timeout=timeout, raw_result=True)
def get_dev_reading(self, dev, reading, timeout=0.1):
self.log.warning(
"Deprecation: use get_device_reading('device', 'reading') instead of get_dev_reading")
return self.get_device_reading(dev, reading, value_only=True, timeout=timeout)
def getDevReadings(self, dev, reading, timeout=0.1):
self.log.warning(
"Deprecation: use get_device_reading('device', ['reading']) instead of getDevReadings")
return self.get_device_reading(dev, timeout=timeout, value_only=True, raw_result=True)
def get_dev_readings(self, dev, readings, timeout=0.1):
self.log.warning(
"Deprecation: use get_device_reading('device', ['reading']) instead of get_dev_readings")
return self.get_device_reading(dev, readings, timeout=timeout, value_only=True, raw_result=True)
def get_dev_reading_time(self, dev, reading, timeout=0.1):
self.log.warning(
"Deprecation: use get_device_reading('device', 'reading', time_only=True) instead of get_dev_reading_time")
return self.get_device_reading(dev, reading, timeout=timeout, time_only=True)
def get_dev_readings_time(self, dev, readings, timeout=0.1):
self.log.warning(
"Deprecation: use get_device_reading('device', ['reading'], time_only=True) instead of get_dev_reading_time")
return self.get_device_reading(dev, readings, timeout=timeout, time_only=True)
def getFhemState(self, timeout=0.1):
self.log.warning(
"Deprecation: use get() without parameters instead of getFhemState")
return self.get(timeout=timeout, raw_result=True)
def get_fhem_state(self, timeout=0.1):
self.log.warning(
"Deprecation: use get() without parameters instead of get_fhem_state")
return self.get(timeout=timeout, raw_result=True)
@staticmethod
def _sand_down(value):
return value if len(value.values()) - 1 else list(value.values())[0]
@staticmethod
def _append_filter(name, value, compare, string, filter_list):
value_list = [value] if isinstance(value, str) else value
values = ",".join(value_list)
filter_list.append(string.format(name, compare, values))
def _response_filter(self, response, arg, value, value_only=None, time_only=None):
if len(arg) > 2:
self.log.error("Too many positional arguments")
return {}
result = {}
for r in response if 'totalResultsReturned' not in response else response['Results']:
arg = [arg[0]] if len(arg) and isinstance(arg[0], str) else arg
if value_only:
result[r['Name']] = {k: v['Value'] for k, v in r[value].items() if
'Value' in v and (not len(arg) or (len(arg) and k == arg[0]))} # k in arg[0]))} fixes #14
elif time_only:
result[r['Name']] = {k: v['Time'] for k, v in r[value].items() if
'Time' in v and (not len(arg) or (len(arg) and k == arg[0]))} # k in arg[0]))}
else:
result[r['Name']] = {k: v for k, v in r[value].items() if
(not len(arg) or (len(arg) and k == arg[0]))} # k in arg[0]))}
if not result[r['Name']]:
result.pop(r['Name'], None)
elif len(result[r['Name']].values()) == 1:
result[r['Name']] = list(result[r['Name']].values())[0]
return result
def _parse_filters(self, name, value, not_value, filter_list, case_sensitive):
compare = "=" if case_sensitive else "~"
if value:
self._append_filter(name, value, compare, "{}{}{}", filter_list)
elif not_value:
self._append_filter(name, not_value, compare,
"{}!{}{}", filter_list)
def _convert_data(self, response, k, v):
try:
test_type = unicode
except NameError:
test_type = str
if isinstance(v, test_type):
if re.findall("^[0-9]+$", v):
response[k] = int(v)
elif re.findall(r"^[0-9]+\.[0-9]+$", v):
response[k] = float(v)
elif re.findall("^[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}$", v):
response[k] = datetime.datetime.strptime(
v, '%Y-%m-%d %H:%M:%S')
if isinstance(v, dict):
self._parse_data_types(response[k])
if isinstance(v, list):
self._parse_data_types(response[k])
def _parse_data_types(self, response):
if isinstance(response, dict):
for k, v in response.items():
self._convert_data(response, k, v)
if isinstance(response, list):
for i, v in enumerate(response):
self._convert_data(response, i, v)
def get(self, name=None, state=None, group=None, room=None, device_type=None, not_name=None, not_state=None, not_group=None,
not_room=None, not_device_type=None, case_sensitive=None, filters=None, timeout=0.1, blocking=False, raw_result=None):
"""
Get FHEM data of devices, can filter by parameters or custom defined filters.
All filters use regular expressions (except full match), so don't forget escaping.
Filters can be used by all other get functions.
For more information about filters, see https://FHEM.de/commandref.html#devspec
:param name: str or list, device name in FHEM
:param state: str or list, state in FHEM
:param group: str or list, filter FHEM groups
:param room: str or list, filter FHEM room
:param device_type: str or list, FHEM device type
:param not_name: not name
:param not_state: not state
:param not_group: not group
:param not_room: not room
:param not_device_type: not device_type
:param case_sensitive: bool, use case_sensitivity for all filter functions
:param filters: dict of filters - key=attribute/internal/reading, value=regex for value, e.g. {"battery": "ok"}
:param raw_result: On True: Don't convert to python types and send full FHEM response
:param timeout: timeout for reply
:param blocking: telnet socket mode, default blocking=False
:return: dict of FHEM devices
"""
if not self.connected():
self.connect()
if self.connected():
filter_list = []
self._parse_filters("NAME", name, not_name,
filter_list, case_sensitive)
self._parse_filters("STATE", state, not_state,
filter_list, case_sensitive)
self._parse_filters("group", group, not_group,
filter_list, case_sensitive)
self._parse_filters("room", room, not_room,
filter_list, case_sensitive)
self._parse_filters("TYPE", device_type,
not_device_type, filter_list, case_sensitive)
if filters:
for key, value in filters.items():
filter_list.append("{}{}{}".format(
key, "=" if case_sensitive else "~", value))
cmd = "jsonlist2 {}".format(":FILTER=".join(filter_list))
if self.protocol == 'telnet':
result = self.send_recv_cmd(
cmd, blocking=blocking, timeout=timeout)
else:
result = self.send_recv_cmd(
cmd, blocking=False, timeout=timeout)
if not result or raw_result:
return result
result = result['Results']
self._parse_data_types(result)
return result
else:
self.log.error("Failed to get fhem state. Not connected.")
return {}
def get_states(self, **kwargs):
"""
Return only device states, can use filters from get().
:param kwargs: Use keyword arguments from :py:meth:`Fhem.get` function
:return: dict of FHEM devices with states
"""
response = self.get(**kwargs)
if not response:
return response
return {r['Name']: r['Readings']['state']['Value'] for r in response if 'state' in r['Readings']}
def get_readings(self, *arg, **kwargs):
"""
Return readings of a device, can use filters from get().
:param arg: str, Get only a specified reading, return all readings of device when parameter not given
:param value_only: return only value of reading, not timestamp
:param time_only: return only timestamp of reading
:param kwargs: use keyword arguments from :py:meth:`Fhem.get` function
:return: dict of FHEM devices with readings
"""
value_only = kwargs['value_only'] if 'value_only' in kwargs else None
time_only = kwargs['time_only'] if 'time_only' in kwargs else None
kwargs.pop('value_only', None)
kwargs.pop('time_only', None)
response = self.get(**kwargs)
return self._response_filter(response, arg, 'Readings', value_only=value_only, time_only=time_only)
def get_attributes(self, *arg, **kwargs):
"""
Return attributes of a device, can use filters from get()
:param arg: str, Get only specified attribute, return all attributes of device when parameter not given
:param kwargs: use keyword arguments from :py:meth:`Fhem.get` function
:return: dict of FHEM devices with attributes
"""
response = self.get(**kwargs)
return self._response_filter(response, arg, 'Attributes')
def get_internals(self, *arg, **kwargs):
"""
Return internals of a device, can use filters from get()
:param arg: str, Get only specified internal, return all internals of device when parameter not given
:param kwargs: use keyword arguments from :py:meth:`Fhem.get` function
:return: dict of FHEM devices with internals
"""
response = self.get(**kwargs)
return self._response_filter(response, arg, 'Internals')
def get_device(self, device, **kwargs):
"""
Get all data from a device
:param device: str or list,
:param kwargs: use keyword arguments from :py:meth:`Fhem.get` function
:return: dict with data of specific FHEM device
"""
return self.get(name=device, **kwargs)
def get_device_state(self, device, **kwargs):
"""
Get state of one device
:param device: str or list,
:param kwargs: use keyword arguments from :py:meth:`Fhem.get` and :py:meth:`Fhem.get_states` functions
:return: str, int, float when only specific value requested else dict
"""
result = self.get_states(name=device, **kwargs)
return self._sand_down(result)
def get_device_reading(self, device, *arg, **kwargs):
"""
Get reading(s) of one device
:param device: str or list,
:param arg: str for one reading, list for special readings, empty for all readings
:param kwargs: use keyword arguments from :py:meth:`Fhem.get` and :py:meth:`Fhem.get_readings` functions
:return: str, int, float when only specific value requested else dict
"""
result = self.get_readings(*arg, name=device, **kwargs)
return self._sand_down(result)
def get_device_attribute(self, device, *arg, **kwargs):
"""
Get attribute(s) of one device
:param device: str or list,
:param arg: str for one attribute, list for special attributes, empty for all attributes
:param kwargs: use keyword arguments from :py:meth:`Fhem.get` function
:return: str, int, float when only specific value requested else dict
"""
result = self.get_attributes(*arg, name=device, **kwargs)
return self._sand_down(result)
def get_device_internal(self, device, *arg, **kwargs):
"""
Get internal(s) of one device
:param device: str or list,
:param arg: str for one internal value, list for special internal values, empty for all internal values
:param kwargs: use keyword arguments from :py:meth:`Fhem.get` function
:return: str, int, float when only specific value requested else dict
"""
result = self.get_internals(*arg, name=device, **kwargs)
return self._sand_down(result)
class FhemEventQueue:
'''Creates a thread that listens to FHEM events and dispatches them to
a Python queue.'''
def __init__(self, server, que, port=7072, protocol='telnet',
use_ssl=False, username="", password="", csrf=True, cafile="",
filterlist=None, timeout=0.1,
eventtimeout=60, serverregex=None, loglevel=1, raw_value=False):
'''
Construct an event queue object, FHEM events will be queued into the queue given at initialization.
:param server: FHEM server address
:param que: Python Queue object, receives FHEM events as dictionaries
:param port: FHEM telnet port
:param protocol: 'telnet', 'http' or 'https'. NOTE: for FhemEventQueue, currently only 'telnet' is supported!
:param use_ssl: boolean for SSL (TLS)
:param username: http(s) basicAuth username
:param password: (global) telnet password or http(s) basicAuth password
:param csrf: (http(s)) use csrf token (FHEM 5.8 and newer), default True (currently not used, since telnet-only)
:param cafile: path to public certificate of your root authority, if left empty, https protocol will ignore certificate checks.
:param filterlist: array of filter dictionaires [{"dev"="lamp1"}, {"dev"="livingtemp", "reading"="temperature"}]. A filter dictionary can contain devstate (type of FHEM device), dev (FHEM device name) and/or reading conditions. The filterlist works on client side.
:param timeout: internal timeout for socket receive (should be short)
:param eventtimeout: larger timeout for server keep-alive messages
:param serverregex: FHEM regex to restrict event messages on server side.
:param loglevel: deprecated, will be removed. Use standard python logging function for logger 'FhemEventQueue', old: 0: no log, 1: errors, 2: info, 3: debug
:param raw_value: default False. On True, the value of a reading is not parsed for units, and returned as-is.
'''
# self.set_loglevel(loglevel)
self.log = logging.getLogger('FhemEventQueue')
self.informcmd = "inform timer"
self.timeout = timeout
if serverregex is not None:
self.informcmd += " " + serverregex
if protocol != 'telnet':
self.log.error("ONLY TELNET is currently supported for EventQueue")
return
self.fhem = Fhem(server=server, port=port, use_ssl=use_ssl, username=username,
password=password, cafile=cafile, loglevel=loglevel)
self.fhem.connect()
time.sleep(timeout)
self.EventThread = threading.Thread(target=self._event_worker_thread,
args=(que, filterlist,
timeout, eventtimeout, raw_value))
self.EventThread.setDaemon(True)
self.EventThread.start()
def set_loglevel(self, level):
'''
Set logging level, [Deprecated, will be removed, use python's logging.setLevel]
:param level: 0: critical, 1: errors, 2: info, 3: debug
'''
self.log.warning(
"Deprecation: please set logging levels using python's standard logging for logger 'FhemEventQueue'")
if level == 0:
self.log.setLevel(logging.CRITICAL)
elif level == 1:
self.log.setLevel(logging.ERROR)
elif level == 2:
self.log.setLevel(logging.INFO)
elif level == 3:
self.log.setLevel(logging.DEBUG)
def _event_worker_thread(self, que, filterlist, timeout=0.1,
eventtimeout=120, raw_value=False):
self.log.debug("FhemEventQueue worker thread starting...")
if self.fhem.connected() is not True:
self.log.warning("EventQueueThread: Fhem is not connected!")
time.sleep(timeout)
self.fhem.send_cmd(self.informcmd)
data = ""
first = True
lastreceive = time.time()
self.eventThreadActive = True
while self.eventThreadActive is True:
while self.fhem.connected() is not True:
self.fhem.connect()
if self.fhem.connected():
time.sleep(timeout)
lastreceive = time.time()
self.fhem.send_cmd(self.informcmd)
else:
self.log.warning("Fhem is not connected in EventQueue thread, retrying!")
time.sleep(5.0)
if first is True:
first = False
self.log.debug("FhemEventQueue worker thread active.")
time.sleep(timeout)
if time.time() - lastreceive > eventtimeout:
self.log.debug("Event-timeout, refreshing INFORM TIMER")
self.fhem.send_cmd(self.informcmd)
if self.fhem.connected() is True:
lastreceive = time.time()
if self.fhem.connected() is True:
data = self.fhem._recv_nonblocking(timeout)
lines = data.decode('utf-8').split('\n')
for l in lines:
if len(l) > 0:
lastreceive = time.time()
li = l.split(' ')
if len(li) > 4:
dd = li[0].split('-')
tt = li[1].split(':')
try:
dt = datetime.datetime(int(dd[0]), int(dd[1]),
int(dd[2]), int(tt[0]),
int(tt[1]), int(tt[2]))
except:
self.log.debug("EventQueue: invalid date format in date={} time={}, event {} ignored".format(li[0],li[1],l))
continue
devtype = li[2]
dev = li[3]
val = ''
for i in range(4, len(li)):
val += li[i]
if i < len(li) - 1:
val += " "
full_val=val
vl = val.split(" ")
val = ''
unit = ''
if len(vl) > 0:
if len(vl[0])>0 and vl[0][-1] == ':':
read = vl[0][:-1]
if len(vl) > 1:
val = vl[1]
if len(vl) > 2:
unit = vl[2]
else:
read = 'STATE'
if len(vl) > 0:
val = vl[0]
if len(vl) > 1:
unit = vl[1]
adQ = True
if filterlist is not None:
adQ = False
for f in filterlist:
adQt = True
for c in f:
if c == 'devtype':
if devtype != f[c]:
adQt = False
if c == 'device':
if dev != f[c]:
adQt = False
if c == 'reading':
if read != f[c]:
adQt = False
if adQt:
adQ = True
if adQ:
if raw_value is False:
ev = {
'timestamp': dt,
'devicetype': devtype,
'device': dev,
'reading': read,
'value': val,
'unit': unit
}
else:
ev = {
'timestamp': dt,
'devicetype': devtype,
'device': dev,
'reading': read,
'value': full_val,
'unit': None
}
que.put(ev)
# self.log.debug("Event queued for {}".format(ev['device']))
time.sleep(timeout)
self.fhem.close()
self.log.debug("FhemEventQueue worker thread terminated.")
return
def close(self):
'''Stop event thread and close socket.'''
self.eventThreadActive = False
time.sleep(0.5+self.timeout)
| {
"repo_name": "domschl/python-fhem",
"path": "fhem/fhem/__init__.py",
"copies": "1",
"size": "37891",
"license": "mit",
"hash": -775439594242331900,
"line_mean": 43.0081300813,
"line_max": 272,
"alpha_frac": 0.5155577842,
"autogenerated": false,
"ratio": 4.451480263157895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5467038047357895,
"avg_score": null,
"num_lines": null
} |
"""API for general search-related functionality"""
from base64 import urlsafe_b64encode
from collections import Counter, defaultdict
from elasticsearch_dsl import Q, Search
from elasticsearch_dsl.query import MoreLikeThis
from django.conf import settings
from channels.constants import (
CHANNEL_TYPE_PUBLIC,
CHANNEL_TYPE_RESTRICTED,
COMMENT_TYPE,
POST_TYPE,
ROLE_CONTRIBUTORS,
ROLE_MODERATORS,
)
from channels.models import ChannelGroupRole
from course_catalog.constants import PrivacyLevel
from course_catalog.models import FavoriteItem
from course_catalog.utils import get_list_items_by_resource
from open_discussions import features
from open_discussions.utils import extract_values
from search.connection import get_default_alias_name
from search.constants import (
ALIAS_ALL_INDICES,
GLOBAL_DOC_TYPE,
COURSE_TYPE,
USER_LIST_TYPE,
LEARNING_PATH_TYPE,
LEARNING_RESOURCE_TYPES,
PODCAST_TYPE,
PODCAST_EPISODE_TYPE,
)
RELATED_POST_RELEVANT_FIELDS = ["plain_text", "post_title", "author_id", "channel_name"]
SIMILAR_RESOURCE_RELEVANT_FIELDS = ["title", "short_description"]
def gen_post_id(reddit_obj_id):
"""
Generates the Elasticsearch document id for a post
Args:
reddit_obj_id (int|str): The id of a reddit object as reported by PRAW
Returns:
str: The Elasticsearch document id for this object
"""
return "p_{}".format(reddit_obj_id)
def gen_comment_id(reddit_obj_id):
"""
Generates the Elasticsearch document id for a comment
Args:
reddit_obj_id (int|str): The id of a reddit object as reported by PRAW
Returns:
str: The Elasticsearch document id for this object
"""
return "c_{}".format(reddit_obj_id)
def gen_profile_id(profile_id):
"""
Generates the Elasticsearch document id for a profile
Args:
profile_id (str): The username of a Profile object
Returns:
str: The Elasticsearch document id for this object
"""
return "u_{}".format(profile_id)
def gen_course_id(platform, course_id):
"""
Generates the Elasticsearch document id for a course
Args:
platform (str): The platform of a Course object
course_id (str): The course_id of a Course object
Returns:
str: The Elasticsearch document id for this object
"""
safe_id = urlsafe_b64encode(course_id.encode("utf-8")).decode("utf-8").rstrip("=")
return "co_{}_{}".format(platform, safe_id)
def gen_content_file_id(key):
"""
Generates the Elasticsearch document id for a ContentFile
Args:
run_id (str): The run id of a ContentFile object
key (str): The key of a ContentFile object
Returns:
str: The Elasticsearch document id for this object
"""
safe_key = urlsafe_b64encode(key.encode("utf-8")).decode("utf-8").rstrip("=")
return "cf_{}".format(safe_key)
def gen_program_id(program_obj):
"""
Generates the Elasticsearch document id for a Program
Args:
program_obj (Program): The Program object
Returns:
str: The Elasticsearch document id for this object
"""
return "program_{}".format(program_obj.id)
def gen_user_list_id(user_list_obj):
"""
Generates the Elasticsearch document id for a UserList
Args:
user_list_obj (UserList): The UserList object
Returns:
str: The Elasticsearch document id for this object
"""
return "user_list_{}".format(user_list_obj.id)
def gen_video_id(video_obj):
"""
Generates the Elasticsearch document id for a Video
Args:
video_obj (Video): The Video object
Returns:
str: The Elasticsearch document id for this object
"""
return "video_{}_{}".format(video_obj.platform, video_obj.video_id)
def gen_podcast_id(podcast_obj):
"""
Generates the Elasticsearch document id for a Podcast
Args:
podcast_obj (Podcast): The Podcast object
Returns:
str: The Elasticsearch document id for this object
"""
return "podcast_{}".format(podcast_obj.id)
def gen_podcast_episode_id(podcast_episode_obj):
"""
Generates the Elasticsearch document id for a Podcast
Args:
podcast_episode_obj (PodcastEpisode): The PodcastEpisode object
Returns:
str: The Elasticsearch document id for this object
"""
return "podcast_ep_{}".format(podcast_episode_obj.id)
def is_reddit_object_removed(reddit_obj):
"""
Indicates whether or not a given reddit object is considered to be removed by moderators
Args:
reddit_obj (praw.models.reddit.submission.Submission, praw.models.reddit.comment.Comment):
A PRAW post/'submission' or comment object
Returns:
bool: True if the object is considered removed, False otherwise
"""
return bool(reddit_obj.banned_by) and not reddit_obj.approved_by
# pylint: disable=invalid-unary-operand-type
def _apply_general_query_filters(search, user):
"""
Applies a series of filters to a Search object so permissions are respected, deleted
objects are ignored, etc.
search (elasticsearch_dsl.Search): Search object
user (User): The user executing the search
Returns:
elasticsearch_dsl.Search: Search object with filters applied
"""
# Get the list of channels a logged in user is a contributor/moderator of
channel_names = (
sorted(
list(
ChannelGroupRole.objects.filter(
group__user=user, role__in=(ROLE_CONTRIBUTORS, ROLE_MODERATORS)
)
.values_list("channel__name", flat=True)
.distinct()
)
)
if not user.is_anonymous
else []
)
# Search for comments and posts from channels
channels_filter = Q(
"terms", channel_type=[CHANNEL_TYPE_PUBLIC, CHANNEL_TYPE_RESTRICTED]
) | ~Q("terms", object_type=[COMMENT_TYPE, POST_TYPE])
# Exclude deleted comments and posts
content_filter = (Q("term", deleted=False) & Q("term", removed=False)) | ~Q(
"terms", object_type=[COMMENT_TYPE, POST_TYPE]
)
# Search public channels and channels user is a contributor/moderator of
if channel_names:
channels_filter = channels_filter | Q("terms", channel_name=channel_names)
return search.filter(channels_filter).filter(content_filter)
# pylint: disable=invalid-unary-operand-type
def _apply_learning_query_filters(search, user):
"""
Applies a series of filters to a Search object so permissions are respected, deleted
objects are ignored, etc.
search (elasticsearch_dsl.Search): Search object
user (User): The user executing the search
Returns:
elasticsearch_dsl.Search: Search object with filters applied
"""
# Search public lists (and user's own lists if logged in)
user_list_filter = Q("term", privacy_level=PrivacyLevel.public.value) | ~Q(
"terms", object_type=[USER_LIST_TYPE, LEARNING_PATH_TYPE]
)
if not user.is_anonymous:
user_list_filter = user_list_filter | Q("term", author=user.id)
search = search.filter(user_list_filter)
if not features.is_enabled(features.PODCAST_SEARCH):
# Exclude podcasts from the search results if the feature flag isn't enabled
search = search.exclude(
Q("terms", object_type=[PODCAST_TYPE, PODCAST_EPISODE_TYPE])
)
return search
def is_learning_query(query):
"""
Return True if the query includes learning resource types, False otherwise
Args:
query (dict): The query sent to ElasticSearch
Returns:
bool: if the query includes learning resource types
"""
object_types = set(extract_values(query, "object_type"))
return len(object_types.intersection(set(LEARNING_RESOURCE_TYPES))) > 0
def execute_search(*, user, query):
"""
Execute a search based on the query
Args:
user (User): The user executing the search. Used to determine filters to enforce permissions.
query (dict): The Elasticsearch query constructed in the frontend
Returns:
dict: The Elasticsearch response dict
"""
index = get_default_alias_name(ALIAS_ALL_INDICES)
search = Search(index=index)
search.update_from_dict(query)
search = _apply_general_query_filters(search, user)
return _transform_search_results_suggest(search.execute().to_dict())
def execute_learn_search(*, user, query):
"""
Execute a learning resources search based on the query
Args:
user (User): The user executing the search. Used to determine filters to enforce permissions.
query (dict): The Elasticsearch query constructed in the frontend
Returns:
dict: The Elasticsearch response dict
"""
index = get_default_alias_name(ALIAS_ALL_INDICES)
search = Search(index=index)
search.update_from_dict(query)
search = _apply_learning_query_filters(search, user)
return transform_results(search.execute().to_dict(), user)
def _transform_search_results_suggest(search_result):
"""
Transform suggest results from elasticsearch
Args:
search_result (dict): The results from ElasticSearch
Returns:
dict: The Elasticsearch response dict with transformed suggestions
"""
es_suggest = search_result.pop("suggest", {})
if (
search_result.get("hits", {}).get("total", 0)
<= settings.ELASTICSEARCH_MAX_SUGGEST_HITS
):
suggestion_dict = defaultdict(int)
suggestions = [
suggestion
for suggestion_list in extract_values(es_suggest, "options")
for suggestion in suggestion_list
if suggestion["collate_match"] is True
]
for suggestion in suggestions:
suggestion_dict[suggestion["text"]] = (
suggestion_dict[suggestion["text"]] + suggestion["score"]
)
search_result["suggest"] = [
key
for key, value in sorted(
suggestion_dict.items(), key=lambda item: item[1], reverse=True
)
][: settings.ELASTICSEARCH_MAX_SUGGEST_RESULTS]
else:
search_result["suggest"] = []
return search_result
# pylint: disable=too-many-branches
def transform_results(search_result, user):
"""
Transform podcast and podcast episode, and userlist and learning path in aggregations
Add 'is_favorite' and 'lists' fields to the '_source' attributes for learning resources.
Args:
search_result (dict): The results from ElasticSearch
user (User): the user who performed the search
Returns:
dict: The Elasticsearch response dict with transformed aggregates and source values
"""
for aggregation_key in [
"type",
"topics",
"offered_by",
"audience",
"certification",
"department_name",
"level",
"course_feature_tags",
"resource_type",
]:
if f"agg_filter_{aggregation_key}" in search_result.get("aggregations", {}):
if aggregation_key == "level":
levels = (
search_result.get("aggregations", {})
.get(f"agg_filter_{aggregation_key}", {})
.get("level", {})
.get("level", {})
)
if levels:
search_result["aggregations"]["level"] = {
"buckets": [
{
"key": bucket["key"],
"doc_count": bucket["courses"]["doc_count"],
}
for bucket in levels.get("buckets", [])
if bucket["courses"]["doc_count"] > 0
]
}
else:
search_result["aggregations"][aggregation_key] = search_result[
"aggregations"
][f"agg_filter_{aggregation_key}"][aggregation_key]
search_result["aggregations"].pop(f"agg_filter_{aggregation_key}")
types = search_result.get("aggregations", {}).get("type", {})
if types:
type_merges = dict(
zip(
(PODCAST_EPISODE_TYPE, LEARNING_PATH_TYPE),
(PODCAST_TYPE, USER_LIST_TYPE),
)
)
for child_type, parent_type in type_merges.items():
child_type_bucket = None
parent_type_bucket = None
for type_bucket in search_result["aggregations"]["type"]["buckets"]:
if type_bucket["key"] == child_type:
child_type_bucket = type_bucket
elif type_bucket["key"] == parent_type:
parent_type_bucket = type_bucket
if child_type_bucket and parent_type_bucket:
parent_type_bucket["doc_count"] = (
child_type_bucket["doc_count"] + parent_type_bucket["doc_count"]
)
search_result["aggregations"]["type"]["buckets"].remove(
child_type_bucket
)
elif child_type_bucket:
child_type_bucket["key"] = parent_type
search_result["aggregations"]["type"]["buckets"].sort(
key=lambda bucket: bucket["doc_count"], reverse=True
)
if not user.is_anonymous:
favorites = (
FavoriteItem.objects.select_related("content_type")
.filter(user=user)
.values_list("content_type__model", "object_id")
)
for hit in search_result.get("hits", {}).get("hits", []):
object_type = hit["_source"]["object_type"]
if object_type in LEARNING_RESOURCE_TYPES:
if object_type == LEARNING_PATH_TYPE:
object_type = USER_LIST_TYPE
object_id = hit["_source"]["id"]
hit["_source"]["is_favorite"] = (object_type, object_id) in favorites
hit["_source"]["lists"] = get_list_items_by_resource(
user, object_type, object_id
)
search_result = _transform_search_results_suggest(search_result)
return search_result
def find_related_documents(*, user, post_id):
"""
Execute a "more like this" query to find posts that are related to a specific post
Args:
user (User): The user executing the search
post_id (str): The id of the post that you want to find related posts for
Returns:
dict: The Elasticsearch response dict
"""
index = get_default_alias_name(ALIAS_ALL_INDICES)
search = Search(index=index)
search = _apply_general_query_filters(search, user)
search = search.query(
MoreLikeThis(
like={"_id": gen_post_id(post_id), "_type": GLOBAL_DOC_TYPE},
fields=RELATED_POST_RELEVANT_FIELDS,
min_term_freq=1,
min_doc_freq=1,
)
)
# Limit results to the number indicated in settings
search = search[0 : settings.OPEN_DISCUSSIONS_RELATED_POST_COUNT]
return search.execute().to_dict()
def find_similar_resources(*, user, value_doc):
"""
Execute a "more like this" query to find learning resources that are similar to the one provided.
Args:
user (User): The user executing the search
value_doc (dict):
a document representing the data fields we want to search with
Returns:
dict: The Elasticsearch response dict
"""
index = get_default_alias_name(ALIAS_ALL_INDICES)
search = Search(index=index)
search = _apply_general_query_filters(search, user)
search = search.filter(Q("terms", object_type=LEARNING_RESOURCE_TYPES))
search = search.query(
MoreLikeThis(
like={"doc": value_doc, "fields": list(value_doc.keys())},
fields=SIMILAR_RESOURCE_RELEVANT_FIELDS,
min_term_freq=settings.OPEN_RESOURCES_MIN_TERM_FREQ,
min_doc_freq=settings.OPEN_RESOURCES_MIN_DOC_FREQ,
)
)
response = search.execute()
if not user.is_anonymous:
favorites = (
FavoriteItem.objects.select_related("content_type")
.filter(user=user)
.values_list("content_type__model", "object_id")
)
objects = []
for hit in response.hits:
if getattr(hit, "id", False) and (
hit["id"] != value_doc.get("id", None)
or hit["object_type"] != value_doc.get("object_type", None)
):
if user.is_anonymous:
hit["is_favorite"] = False
hit["lists"] = []
else:
object_type = hit["object_type"]
if object_type in LEARNING_RESOURCE_TYPES:
if object_type == LEARNING_PATH_TYPE:
object_type = USER_LIST_TYPE
object_id = hit["id"]
hit["is_favorite"] = (object_type, object_id) in favorites
hit["lists"] = get_list_items_by_resource(
user, object_type, object_id
)
objects.append(hit.to_dict())
return objects[0 : settings.OPEN_DISCUSSIONS_SIMILAR_RESOURCES_COUNT]
def get_similar_topics(value_doc, num_topics, min_term_freq, min_doc_freq):
"""
Get a list of similar topics based on text values
Args:
value_doc (dict):
a document representing the data fields we want to search with
num_topics (int):
number of topics to return
min_term_freq (int):
minimum times a term needs to show up in input
min_doc_freq (int):
minimum times a term needs to show up in docs
Returns:
list of str:
list of topic values
"""
index = get_default_alias_name(ALIAS_ALL_INDICES)
search = Search(index=index)
search = search.filter(Q("terms", object_type=[COURSE_TYPE]))
search = search.query(
MoreLikeThis(
like=[{"doc": value_doc, "fields": list(value_doc.keys())}],
fields=["course_id", "title", "short_description", "full_description"],
min_term_freq=min_term_freq,
min_doc_freq=min_doc_freq,
)
)
search = search.source(includes="topics")
response = search.execute()
topics = [topic for hit in response.hits for topic in hit.topics]
counter = Counter(topics)
return list(dict(counter.most_common(num_topics)).keys())
| {
"repo_name": "mitodl/open-discussions",
"path": "search/api.py",
"copies": "1",
"size": "18625",
"license": "bsd-3-clause",
"hash": 1460504026180602600,
"line_mean": 31.3350694444,
"line_max": 101,
"alpha_frac": 0.6097181208,
"autogenerated": false,
"ratio": 4.027027027027027,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005197148773019375,
"num_lines": 576
} |
"""API for Google Nest Device Access bound to Home Assistant OAuth."""
import datetime
from aiohttp import ClientSession
from google.oauth2.credentials import Credentials
from google_nest_sdm.auth import AbstractAuth
from homeassistant.helpers import config_entry_oauth2_flow
from .const import API_URL, OAUTH2_TOKEN, SDM_SCOPES
# See https://developers.google.com/nest/device-access/registration
class AsyncConfigEntryAuth(AbstractAuth):
"""Provide Google Nest Device Access authentication tied to an OAuth2 based config entry."""
def __init__(
self,
websession: ClientSession,
oauth_session: config_entry_oauth2_flow.OAuth2Session,
client_id: str,
client_secret: str,
):
"""Initialize Google Nest Device Access auth."""
super().__init__(websession, API_URL)
self._oauth_session = oauth_session
self._client_id = client_id
self._client_secret = client_secret
async def async_get_access_token(self):
"""Return a valid access token for SDM API."""
if not self._oauth_session.valid_token:
await self._oauth_session.async_ensure_token_valid()
return self._oauth_session.token["access_token"]
async def async_get_creds(self):
"""Return an OAuth credential for Pub/Sub Subscriber."""
# We don't have a way for Home Assistant to refresh creds on behalf
# of the google pub/sub subscriber. Instead, build a full
# Credentials object with enough information for the subscriber to
# handle this on its own. We purposely don't refresh the token here
# even when it is expired to fully hand off this responsibility and
# know it is working at startup (then if not, fail loudly).
token = self._oauth_session.token
creds = Credentials(
token=token["access_token"],
refresh_token=token["refresh_token"],
token_uri=OAUTH2_TOKEN,
client_id=self._client_id,
client_secret=self._client_secret,
scopes=SDM_SCOPES,
)
creds.expiry = datetime.datetime.fromtimestamp(token["expires_at"])
return creds
| {
"repo_name": "partofthething/home-assistant",
"path": "homeassistant/components/nest/api.py",
"copies": "6",
"size": "2192",
"license": "mit",
"hash": 7637580668833042000,
"line_mean": 38.1428571429,
"line_max": 96,
"alpha_frac": 0.6665145985,
"autogenerated": false,
"ratio": 4.199233716475096,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001840942562592047,
"num_lines": 56
} |
""" API for heatcontrol """
import datetime, time
from tastypie import fields
from tastypie import resources
from tastypie.utils import timezone
from tastypie.authorization import ReadOnlyAuthorization, Authorization
from center.restapi import THSensorResource, THSensorResourceInstance
from application import restapi
from application.resource import ResourceMetaCommon
from heatcontrol import models
from tastypie.bundle import Bundle
import logging
logger = logging.getLogger(__name__)
class DayTypeResource(resources.ModelResource):
class Meta(ResourceMetaCommon):
queryset = models.DayType.objects.all()
authorization = ReadOnlyAuthorization()
filtering = {
'name': 'exact',
}
DayTypeResourceInstance = DayTypeResource()
restapi.RestApi.register(DayTypeResourceInstance)
class ControlResource(resources.ModelResource):
sensor_id = fields.IntegerField('sensor_id', readonly=True)
name = fields.CharField(readonly=True)
temperature = fields.FloatField(readonly=True, null=True)
target_temp = fields.FloatField(readonly=True, null=True)
pidcontrol = fields.FloatField(readonly=True, null=True)
age = fields.FloatField(null=True, readonly=True)
class Meta(ResourceMetaCommon):
queryset = models.Control.objects.select_related('sensor').order_by('sensor__id')
authorization = Authorization()
filtering = {
'sensor_id': 'exact',
}
def dehydrate_name(self, bundle):
return bundle.obj.sensor.name
def dehydrate_target_temp(self, bundle):
return bundle.obj.get_target_temp()
def dehydrate_age(self, bundle):
if bundle.obj.sensor.last_tsf:
return time.time() - bundle.obj.sensor.last_tsf
return None
def dehydrate(self, bundle):
c = bundle.obj.sensor.get_cache()
if c:
bundle.data['temperature'] = c.get('Temperature')
bundle.data['pidcontrol'] = c.get('pidcontrol')
return bundle
ControlResourceInstance = ControlResource()
restapi.RestApi.register(ControlResourceInstance)
class ProfileResource(resources.ModelResource):
control = fields.ForeignKey(ControlResource, 'control')
daytype = fields.ForeignKey(DayTypeResource, 'daytype')
class Meta(ResourceMetaCommon):
queryset = models.Profile.objects.order_by('start')
authorization = Authorization()
filtering = {
'control': 'exact',
'daytype': 'exact',
}
ordering = ('start',)
ProfileResourceInstance = ProfileResource()
restapi.RestApi.register(ProfileResourceInstance)
class ScheduledOverrideResource(resources.ModelResource):
control = fields.ForeignKey(ControlResource, 'control')
class Meta(ResourceMetaCommon):
queryset = models.ScheduledOverride.objects.order_by('start')
authorization = Authorization()
filtering = {
'control': 'exact',
}
def get_object_list(self, request):
return super(ScheduledOverrideResource, self).get_object_list(request).filter(end__gt=timezone.now())
ScheduledOverrideResourceInstance = ScheduledOverrideResource()
restapi.RestApi.register(ScheduledOverrideResourceInstance)
class InstantProfileResourceAuthorization(ReadOnlyAuthorization):
def update_detail(self, object_list, bundle):
return True
class InstantProfileResource(resources.ModelResource):
class Meta(ResourceMetaCommon):
always_return_data = False
queryset = models.InstantProfile.objects.order_by('id')
authorization = InstantProfileResourceAuthorization()
InstantProfileResourceInstance = InstantProfileResource()
restapi.RestApi.register(InstantProfileResourceInstance)
class CurrentDaytypeAuthorization(ReadOnlyAuthorization):
def update_detail(self, object_list, bundle):
return True
class CurrentDaytypeResource(resources.ModelResource):
daytype = fields.CharField()
class Meta(ResourceMetaCommon):
queryset = models.Calendar.objects.all()
authorization = CurrentDaytypeAuthorization()
list_allowed_methods = []
detail_allowed_methods = ['get', 'patch']
fields = ('daytype',)
def dehydrate_daytype(self, bundle):
return bundle.obj.daytype.name
def hydrate_daytype(self, bundle):
bundle.obj.daytype = DayTypeResourceInstance.obj_get(bundle=Bundle(request=bundle.request), name=bundle.data['daytype'])
return bundle
def obj_get(self, bundle, **kwargs):
return models.Calendar.objects.get(day=datetime.date.today())
CurrentDaytypeResourceInstance = CurrentDaytypeResource()
restapi.RestApi.register(CurrentDaytypeResourceInstance)
| {
"repo_name": "rkojedzinszky/thermo-center",
"path": "heatcontrol/restapi.py",
"copies": "1",
"size": "4774",
"license": "bsd-3-clause",
"hash": 6001425139055942000,
"line_mean": 34.1029411765,
"line_max": 128,
"alpha_frac": 0.7119815668,
"autogenerated": false,
"ratio": 4.258697591436218,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00250144110720155,
"num_lines": 136
} |
"""API for Home Connect bound to HASS OAuth."""
from asyncio import run_coroutine_threadsafe
import logging
import homeconnect
from homeconnect.api import HomeConnectError
from homeassistant import config_entries, core
from homeassistant.const import DEVICE_CLASS_TIMESTAMP, PERCENTAGE, TIME_SECONDS
from homeassistant.helpers import config_entry_oauth2_flow
from homeassistant.helpers.dispatcher import dispatcher_send
from .const import (
BSH_ACTIVE_PROGRAM,
BSH_POWER_OFF,
BSH_POWER_STANDBY,
SIGNAL_UPDATE_ENTITIES,
)
_LOGGER = logging.getLogger(__name__)
class ConfigEntryAuth(homeconnect.HomeConnectAPI):
"""Provide Home Connect authentication tied to an OAuth2 based config entry."""
def __init__(
self,
hass: core.HomeAssistant,
config_entry: config_entries.ConfigEntry,
implementation: config_entry_oauth2_flow.AbstractOAuth2Implementation,
):
"""Initialize Home Connect Auth."""
self.hass = hass
self.config_entry = config_entry
self.session = config_entry_oauth2_flow.OAuth2Session(
hass, config_entry, implementation
)
super().__init__(self.session.token)
self.devices = []
def refresh_tokens(self) -> dict:
"""Refresh and return new Home Connect tokens using Home Assistant OAuth2 session."""
run_coroutine_threadsafe(
self.session.async_ensure_token_valid(), self.hass.loop
).result()
return self.session.token
def get_devices(self):
"""Get a dictionary of devices."""
appl = self.get_appliances()
devices = []
for app in appl:
if app.type == "Dryer":
device = Dryer(self.hass, app)
elif app.type == "Washer":
device = Washer(self.hass, app)
elif app.type == "Dishwasher":
device = Dishwasher(self.hass, app)
elif app.type == "FridgeFreezer":
device = FridgeFreezer(self.hass, app)
elif app.type == "Oven":
device = Oven(self.hass, app)
elif app.type == "CoffeeMaker":
device = CoffeeMaker(self.hass, app)
elif app.type == "Hood":
device = Hood(self.hass, app)
elif app.type == "Hob":
device = Hob(self.hass, app)
else:
_LOGGER.warning("Appliance type %s not implemented", app.type)
continue
devices.append({"device": device, "entities": device.get_entity_info()})
self.devices = devices
return devices
class HomeConnectDevice:
"""Generic Home Connect device."""
# for some devices, this is instead BSH_POWER_STANDBY
# see https://developer.home-connect.com/docs/settings/power_state
power_off_state = BSH_POWER_OFF
def __init__(self, hass, appliance):
"""Initialize the device class."""
self.hass = hass
self.appliance = appliance
def initialize(self):
"""Fetch the info needed to initialize the device."""
try:
self.appliance.get_status()
except (HomeConnectError, ValueError):
_LOGGER.debug("Unable to fetch appliance status. Probably offline")
try:
self.appliance.get_settings()
except (HomeConnectError, ValueError):
_LOGGER.debug("Unable to fetch settings. Probably offline")
try:
program_active = self.appliance.get_programs_active()
except (HomeConnectError, ValueError):
_LOGGER.debug("Unable to fetch active programs. Probably offline")
program_active = None
if program_active and "key" in program_active:
self.appliance.status[BSH_ACTIVE_PROGRAM] = {"value": program_active["key"]}
self.appliance.listen_events(callback=self.event_callback)
def event_callback(self, appliance):
"""Handle event."""
_LOGGER.debug("Update triggered on %s", appliance.name)
_LOGGER.debug(self.appliance.status)
dispatcher_send(self.hass, SIGNAL_UPDATE_ENTITIES, appliance.haId)
class DeviceWithPrograms(HomeConnectDevice):
"""Device with programs."""
PROGRAMS = []
def get_programs_available(self):
"""Get the available programs."""
return self.PROGRAMS
def get_program_switches(self):
"""Get a dictionary with info about program switches.
There will be one switch for each program.
"""
programs = self.get_programs_available()
return [{"device": self, "program_name": p["name"]} for p in programs]
def get_program_sensors(self):
"""Get a dictionary with info about program sensors.
There will be one of the four types of sensors for each
device.
"""
sensors = {
"Remaining Program Time": (None, None, DEVICE_CLASS_TIMESTAMP, 1),
"Duration": (TIME_SECONDS, "mdi:update", None, 1),
"Program Progress": (PERCENTAGE, "mdi:progress-clock", None, 1),
}
return [
{
"device": self,
"desc": k,
"unit": unit,
"key": "BSH.Common.Option.{}".format(k.replace(" ", "")),
"icon": icon,
"device_class": device_class,
"sign": sign,
}
for k, (unit, icon, device_class, sign) in sensors.items()
]
class DeviceWithDoor(HomeConnectDevice):
"""Device that has a door sensor."""
def get_door_entity(self):
"""Get a dictionary with info about the door binary sensor."""
return {
"device": self,
"desc": "Door",
"device_class": "door",
}
class DeviceWithLight(HomeConnectDevice):
"""Device that has lighting."""
def get_light_entity(self):
"""Get a dictionary with info about the lighting."""
return {
"device": self,
"desc": "Light",
"ambient": None,
}
class DeviceWithAmbientLight(HomeConnectDevice):
"""Device that has ambient lighting."""
def get_ambientlight_entity(self):
"""Get a dictionary with info about the ambient lighting."""
return {
"device": self,
"desc": "AmbientLight",
"ambient": True,
}
class Dryer(DeviceWithDoor, DeviceWithPrograms):
"""Dryer class."""
PROGRAMS = [
{"name": "LaundryCare.Dryer.Program.Cotton"},
{"name": "LaundryCare.Dryer.Program.Synthetic"},
{"name": "LaundryCare.Dryer.Program.Mix"},
{"name": "LaundryCare.Dryer.Program.Blankets"},
{"name": "LaundryCare.Dryer.Program.BusinessShirts"},
{"name": "LaundryCare.Dryer.Program.DownFeathers"},
{"name": "LaundryCare.Dryer.Program.Hygiene"},
{"name": "LaundryCare.Dryer.Program.Jeans"},
{"name": "LaundryCare.Dryer.Program.Outdoor"},
{"name": "LaundryCare.Dryer.Program.SyntheticRefresh"},
{"name": "LaundryCare.Dryer.Program.Towels"},
{"name": "LaundryCare.Dryer.Program.Delicates"},
{"name": "LaundryCare.Dryer.Program.Super40"},
{"name": "LaundryCare.Dryer.Program.Shirts15"},
{"name": "LaundryCare.Dryer.Program.Pillow"},
{"name": "LaundryCare.Dryer.Program.AntiShrink"},
]
def get_entity_info(self):
"""Get a dictionary with infos about the associated entities."""
door_entity = self.get_door_entity()
program_sensors = self.get_program_sensors()
program_switches = self.get_program_switches()
return {
"binary_sensor": [door_entity],
"switch": program_switches,
"sensor": program_sensors,
}
class Dishwasher(DeviceWithDoor, DeviceWithAmbientLight, DeviceWithPrograms):
"""Dishwasher class."""
PROGRAMS = [
{"name": "Dishcare.Dishwasher.Program.Auto1"},
{"name": "Dishcare.Dishwasher.Program.Auto2"},
{"name": "Dishcare.Dishwasher.Program.Auto3"},
{"name": "Dishcare.Dishwasher.Program.Eco50"},
{"name": "Dishcare.Dishwasher.Program.Quick45"},
{"name": "Dishcare.Dishwasher.Program.Intensiv70"},
{"name": "Dishcare.Dishwasher.Program.Normal65"},
{"name": "Dishcare.Dishwasher.Program.Glas40"},
{"name": "Dishcare.Dishwasher.Program.GlassCare"},
{"name": "Dishcare.Dishwasher.Program.NightWash"},
{"name": "Dishcare.Dishwasher.Program.Quick65"},
{"name": "Dishcare.Dishwasher.Program.Normal45"},
{"name": "Dishcare.Dishwasher.Program.Intensiv45"},
{"name": "Dishcare.Dishwasher.Program.AutoHalfLoad"},
{"name": "Dishcare.Dishwasher.Program.IntensivPower"},
{"name": "Dishcare.Dishwasher.Program.MagicDaily"},
{"name": "Dishcare.Dishwasher.Program.Super60"},
{"name": "Dishcare.Dishwasher.Program.Kurz60"},
{"name": "Dishcare.Dishwasher.Program.ExpressSparkle65"},
{"name": "Dishcare.Dishwasher.Program.MachineCare"},
{"name": "Dishcare.Dishwasher.Program.SteamFresh"},
{"name": "Dishcare.Dishwasher.Program.MaximumCleaning"},
]
def get_entity_info(self):
"""Get a dictionary with infos about the associated entities."""
door_entity = self.get_door_entity()
program_sensors = self.get_program_sensors()
program_switches = self.get_program_switches()
return {
"binary_sensor": [door_entity],
"switch": program_switches,
"sensor": program_sensors,
}
class Oven(DeviceWithDoor, DeviceWithPrograms):
"""Oven class."""
PROGRAMS = [
{"name": "Cooking.Oven.Program.HeatingMode.PreHeating"},
{"name": "Cooking.Oven.Program.HeatingMode.HotAir"},
{"name": "Cooking.Oven.Program.HeatingMode.TopBottomHeating"},
{"name": "Cooking.Oven.Program.HeatingMode.PizzaSetting"},
{"name": "Cooking.Oven.Program.Microwave.600Watt"},
]
power_off_state = BSH_POWER_STANDBY
def get_entity_info(self):
"""Get a dictionary with infos about the associated entities."""
door_entity = self.get_door_entity()
program_sensors = self.get_program_sensors()
program_switches = self.get_program_switches()
return {
"binary_sensor": [door_entity],
"switch": program_switches,
"sensor": program_sensors,
}
class Washer(DeviceWithDoor, DeviceWithPrograms):
"""Washer class."""
PROGRAMS = [
{"name": "LaundryCare.Washer.Program.Cotton"},
{"name": "LaundryCare.Washer.Program.Cotton.CottonEco"},
{"name": "LaundryCare.Washer.Program.EasyCare"},
{"name": "LaundryCare.Washer.Program.Mix"},
{"name": "LaundryCare.Washer.Program.DelicatesSilk"},
{"name": "LaundryCare.Washer.Program.Wool"},
{"name": "LaundryCare.Washer.Program.Sensitive"},
{"name": "LaundryCare.Washer.Program.Auto30"},
{"name": "LaundryCare.Washer.Program.Auto40"},
{"name": "LaundryCare.Washer.Program.Auto60"},
{"name": "LaundryCare.Washer.Program.Chiffon"},
{"name": "LaundryCare.Washer.Program.Curtains"},
{"name": "LaundryCare.Washer.Program.DarkWash"},
{"name": "LaundryCare.Washer.Program.Dessous"},
{"name": "LaundryCare.Washer.Program.Monsoon"},
{"name": "LaundryCare.Washer.Program.Outdoor"},
{"name": "LaundryCare.Washer.Program.PlushToy"},
{"name": "LaundryCare.Washer.Program.ShirtsBlouses"},
{"name": "LaundryCare.Washer.Program.SportFitness"},
{"name": "LaundryCare.Washer.Program.Towels"},
{"name": "LaundryCare.Washer.Program.WaterProof"},
]
def get_entity_info(self):
"""Get a dictionary with infos about the associated entities."""
door_entity = self.get_door_entity()
program_sensors = self.get_program_sensors()
program_switches = self.get_program_switches()
return {
"binary_sensor": [door_entity],
"switch": program_switches,
"sensor": program_sensors,
}
class CoffeeMaker(DeviceWithPrograms):
"""Coffee maker class."""
PROGRAMS = [
{"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.Espresso"},
{"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.EspressoMacchiato"},
{"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.Coffee"},
{"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.Cappuccino"},
{"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.LatteMacchiato"},
{"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.CaffeLatte"},
{"name": "ConsumerProducts.CoffeeMaker.Program.CoffeeWorld.Americano"},
{"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.EspressoDoppio"},
{"name": "ConsumerProducts.CoffeeMaker.Program.CoffeeWorld.FlatWhite"},
{"name": "ConsumerProducts.CoffeeMaker.Program.CoffeeWorld.Galao"},
{"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.MilkFroth"},
{"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.WarmMilk"},
{"name": "ConsumerProducts.CoffeeMaker.Program.Beverage.Ristretto"},
{"name": "ConsumerProducts.CoffeeMaker.Program.CoffeeWorld.Cortado"},
]
power_off_state = BSH_POWER_STANDBY
def get_entity_info(self):
"""Get a dictionary with infos about the associated entities."""
program_sensors = self.get_program_sensors()
program_switches = self.get_program_switches()
return {"switch": program_switches, "sensor": program_sensors}
class Hood(DeviceWithLight, DeviceWithAmbientLight, DeviceWithPrograms):
"""Hood class."""
PROGRAMS = [
{"name": "Cooking.Common.Program.Hood.Automatic"},
{"name": "Cooking.Common.Program.Hood.Venting"},
{"name": "Cooking.Common.Program.Hood.DelayedShutOff"},
]
def get_entity_info(self):
"""Get a dictionary with infos about the associated entities."""
light_entity = self.get_light_entity()
ambientlight_entity = self.get_ambientlight_entity()
program_sensors = self.get_program_sensors()
program_switches = self.get_program_switches()
return {
"switch": program_switches,
"sensor": program_sensors,
"light": [light_entity, ambientlight_entity],
}
class FridgeFreezer(DeviceWithDoor):
"""Fridge/Freezer class."""
def get_entity_info(self):
"""Get a dictionary with infos about the associated entities."""
door_entity = self.get_door_entity()
return {"binary_sensor": [door_entity]}
class Hob(DeviceWithPrograms):
"""Hob class."""
PROGRAMS = [{"name": "Cooking.Hob.Program.PowerLevelMode"}]
def get_entity_info(self):
"""Get a dictionary with infos about the associated entities."""
program_sensors = self.get_program_sensors()
program_switches = self.get_program_switches()
return {"switch": program_switches, "sensor": program_sensors}
| {
"repo_name": "tboyce021/home-assistant",
"path": "homeassistant/components/home_connect/api.py",
"copies": "2",
"size": "15203",
"license": "apache-2.0",
"hash": -5196381825392821000,
"line_mean": 36.8184079602,
"line_max": 93,
"alpha_frac": 0.6186936789,
"autogenerated": false,
"ratio": 3.6206239580852584,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5239317636985258,
"avg_score": null,
"num_lines": null
} |
"""API for Honeywell Lyric bound to Home Assistant OAuth."""
import logging
from typing import cast
from aiohttp import BasicAuth, ClientSession
from aiolyric.client import LyricClient
from homeassistant.helpers import config_entry_oauth2_flow
from homeassistant.helpers.aiohttp_client import async_get_clientsession
_LOGGER = logging.getLogger(__name__)
class ConfigEntryLyricClient(LyricClient):
"""Provide Honeywell Lyric authentication tied to an OAuth2 based config entry."""
def __init__(
self,
websession: ClientSession,
oauth_session: config_entry_oauth2_flow.OAuth2Session,
):
"""Initialize Honeywell Lyric auth."""
super().__init__(websession)
self._oauth_session = oauth_session
async def async_get_access_token(self):
"""Return a valid access token."""
if not self._oauth_session.valid_token:
await self._oauth_session.async_ensure_token_valid()
return self._oauth_session.token["access_token"]
class LyricLocalOAuth2Implementation(
config_entry_oauth2_flow.LocalOAuth2Implementation
):
"""Lyric Local OAuth2 implementation."""
async def _token_request(self, data: dict) -> dict:
"""Make a token request."""
session = async_get_clientsession(self.hass)
data["client_id"] = self.client_id
if self.client_secret is not None:
data["client_secret"] = self.client_secret
headers = {
"Authorization": BasicAuth(self.client_id, self.client_secret).encode(),
"Content-Type": "application/x-www-form-urlencoded",
}
resp = await session.post(self.token_url, headers=headers, data=data)
resp.raise_for_status()
return cast(dict, await resp.json())
| {
"repo_name": "w1ll1am23/home-assistant",
"path": "homeassistant/components/lyric/api.py",
"copies": "5",
"size": "1784",
"license": "apache-2.0",
"hash": 8183852116710474000,
"line_mean": 31.4363636364,
"line_max": 86,
"alpha_frac": 0.673206278,
"autogenerated": false,
"ratio": 3.9910514541387023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004228901591984756,
"num_lines": 55
} |
"""API for latubot
- sport (latu, luistelu)
- area (OULU, SYOTE)
- group (oulu, haukipudas) (previously city)
- name ("Iinatti 8km"...) (previously place)
"""
import logging
from datetime import datetime
from dateutil.tz import tzutc
from latubot.source import kunto
from latubot import time_utils
logger = logging.getLogger(__name__)
def sport_names():
"""Supported sport names."""
return kunto.ALL_SPORTS
def area_names():
"""Supported area names."""
return kunto.ALL_AREAS
def load(sport, area, since=None, fn=None):
"""Load updates."""
sport = sport.lower()
if sport not in sport_names():
raise ValueError(f"Invalid sport {sport}")
area = area.upper()
if area not in area_names():
raise ValueError(f"Invalid area {area}")
# if new data sources are added, unify and combine data here
data = kunto.load(sport, area, fn=fn)
if since:
data = filter(_time_filter(since), data)
return list(data)
def _time_filter(since):
"""Filter function to pass only items w/ date not older than since."""
delta = time_utils.since_to_delta(since)
now = datetime.now(tzutc())
def f(v):
try:
return v["date"] and now - delta < v["date"]
except KeyError:
return False
return f
if __name__ == "__main__":
import sys
import json
logging.basicConfig(level=logging.DEBUG)
fn = sys.argv[1] if len(sys.argv) > 1 else None
d1 = load(sport="latu", area="OULU", since="7M", fn=fn)
logger.debug(f"Loaded {len(d1)} updates")
print(json.dumps(d1, cls=time_utils.DateTimeEncoder, indent=2))
| {
"repo_name": "suola/latubot",
"path": "latubot/source/api.py",
"copies": "1",
"size": "1646",
"license": "mit",
"hash": 4482847999241289000,
"line_mean": 21.8611111111,
"line_max": 74,
"alpha_frac": 0.6342648846,
"autogenerated": false,
"ratio": 3.318548387096774,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9452813271696774,
"avg_score": 0,
"num_lines": 72
} |
"""API for Lokalise."""
from __future__ import annotations
from pprint import pprint
import requests
from .util import get_lokalise_token
def get_api(project_id, debug=False) -> Lokalise:
"""Get Lokalise API."""
return Lokalise(project_id, get_lokalise_token(), debug)
class Lokalise:
"""Lokalise API."""
def __init__(self, project_id, token, debug):
"""Initialize Lokalise API."""
self.project_id = project_id
self.token = token
self.debug = debug
def request(self, method, path, data):
"""Make a request to the Lokalise API."""
method = method.upper()
kwargs = {"headers": {"x-api-token": self.token}}
if method == "GET":
kwargs["params"] = data
else:
kwargs["json"] = data
if self.debug:
print(method, f"{self.project_id}/{path}", data)
req = requests.request(
method,
f"https://api.lokalise.com/api2/projects/{self.project_id}/{path}",
**kwargs,
)
req.raise_for_status()
if self.debug:
pprint(req.json())
print()
return req.json()
def keys_list(self, params={}):
"""List keys.
https://app.lokalise.com/api2docs/curl/#transition-list-all-keys-get
"""
return self.request("GET", "keys", params)["keys"]
def keys_create(self, keys):
"""Create keys.
https://app.lokalise.com/api2docs/curl/#transition-create-keys-post
"""
return self.request("POST", "keys", {"keys": keys})["keys"]
def keys_delete_multiple(self, key_ids):
"""Delete multiple keys.
https://app.lokalise.com/api2docs/curl/#transition-delete-multiple-keys-delete
"""
return self.request("DELETE", "keys", {"keys": key_ids})
def keys_bulk_update(self, updates):
"""Update multiple keys.
https://app.lokalise.com/api2docs/curl/#transition-bulk-update-put
"""
return self.request("PUT", "keys", {"keys": updates})["keys"]
def translations_list(self, params={}):
"""List translations.
https://app.lokalise.com/api2docs/curl/#transition-list-all-translations-get
"""
return self.request("GET", "translations", params)["translations"]
def languages_list(self, params={}):
"""List languages.
https://app.lokalise.com/api2docs/curl/#transition-list-project-languages-get
"""
return self.request("GET", "languages", params)["languages"]
| {
"repo_name": "home-assistant/home-assistant",
"path": "script/translations/lokalise.py",
"copies": "7",
"size": "2569",
"license": "apache-2.0",
"hash": -8701533837666798000,
"line_mean": 27.5444444444,
"line_max": 86,
"alpha_frac": 0.5811599844,
"autogenerated": false,
"ratio": 3.7558479532163744,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7837007937616375,
"avg_score": null,
"num_lines": null
} |
"""API for Lokalise."""
from pprint import pprint
import requests
from .util import get_lokalise_token
def get_api(project_id, debug=False) -> "Lokalise":
"""Get Lokalise API."""
return Lokalise(project_id, get_lokalise_token(), debug)
class Lokalise:
"""Lokalise API."""
def __init__(self, project_id, token, debug):
"""Initialize Lokalise API."""
self.project_id = project_id
self.token = token
self.debug = debug
def request(self, method, path, data):
"""Make a request to the Lokalise API."""
method = method.upper()
kwargs = {"headers": {"x-api-token": self.token}}
if method == "GET":
kwargs["params"] = data
else:
kwargs["json"] = data
if self.debug:
print(method, f"{self.project_id}/{path}", data)
req = requests.request(
method,
f"https://api.lokalise.com/api2/projects/{self.project_id}/{path}",
**kwargs,
)
req.raise_for_status()
if self.debug:
pprint(req.json())
print()
return req.json()
def keys_list(self, params={}):
"""List keys.
https://app.lokalise.com/api2docs/curl/#transition-list-all-keys-get
"""
return self.request("GET", "keys", params)["keys"]
def keys_create(self, keys):
"""Create keys.
https://app.lokalise.com/api2docs/curl/#transition-create-keys-post
"""
return self.request("POST", "keys", {"keys": keys})["keys"]
def keys_delete_multiple(self, key_ids):
"""Delete multiple keys.
https://app.lokalise.com/api2docs/curl/#transition-delete-multiple-keys-delete
"""
return self.request("DELETE", "keys", {"keys": key_ids})
def keys_bulk_update(self, updates):
"""Update multiple keys.
https://app.lokalise.com/api2docs/curl/#transition-bulk-update-put
"""
return self.request("PUT", "keys", {"keys": updates})["keys"]
def translations_list(self, params={}):
"""List translations.
https://app.lokalise.com/api2docs/curl/#transition-list-all-translations-get
"""
return self.request("GET", "translations", params)["translations"]
def languages_list(self, params={}):
"""List languages.
https://app.lokalise.com/api2docs/curl/#transition-list-project-languages-get
"""
return self.request("GET", "languages", params)["languages"]
| {
"repo_name": "tboyce021/home-assistant",
"path": "script/translations/lokalise.py",
"copies": "14",
"size": "2535",
"license": "apache-2.0",
"hash": -4848184431465085000,
"line_mean": 27.8068181818,
"line_max": 86,
"alpha_frac": 0.5783037475,
"autogenerated": false,
"ratio": 3.7444608567208273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""API for managing channel memberships"""
import logging
import operator
from functools import reduce
from django.contrib.auth import get_user_model
from channels.api import get_admin_api
from channels.models import Channel
from profiles.filters import UserFilter
from profiles.models import Profile
log = logging.getLogger()
User = get_user_model()
def update_memberships_for_managed_channels(*, channel_ids=None, user_ids=None):
"""
Update channels that are managed and have channel memberhip configs
Args:
channel_ids (list of int):
optional list of channel ids to generate memberships for
user_ids (list of int):
optional list of user ids to filter to
"""
# channels with a membership config
channels = Channel.objects.filter(
membership_is_managed=True, channel_membership_configs__isnull=False
)
if channel_ids:
channels = channels.filter(id__in=channel_ids)
for channel in channels:
update_memberships_for_managed_channel(channel, user_ids=user_ids)
def update_memberships_for_managed_channel(channel, *, user_ids=None):
"""
Update the channel memberships for a given channel.
If the channel is not managed, nothing happens.
Args:
channel (Channel):
the channel to generate memberships for
user_ids (list of int):
optional list of user ids to filter to
"""
if (
not channel.membership_is_managed
or not channel.channel_membership_configs.exists()
):
log.debug(
"update_managed_channel_membership() called for a channel"
"that is not managed and/or has no channel membership configs: %s",
channel.name,
)
return
admin_api = get_admin_api()
active_users = User.objects.filter(is_active=True)
# create a list of user queries as generated by UserFilter
filtered_user_queries = [
UserFilter(config.query, queryset=active_users).qs
for config in channel.channel_membership_configs.all()
]
# bitwise OR the queries together so that users that match ANY of them are added to the channel
users_in_channel = reduce(
operator.or_,
filtered_user_queries,
# specify an initial empty query in case we hit a race condition and
# `filtered_user_queries` ended up empty, otherwise `reduce()` would fail
User.objects.none(),
)
# ensure that we're not about to select all users, because this is almost definitely not what we want
# this can happen if the query configs didn't match ANY filter options
# this is a bit of a hack to protect this, but it's sufficient for now
if str(users_in_channel.query) == str(active_users.query):
log.error(
"Membership query configs for channel '%s' result in all active users being added, this is likely not desired",
channel.name,
)
return
# filter here, rather than earlier
# this ensure the check above works in all cases
if user_ids:
users_in_channel = users_in_channel.filter(id__in=user_ids)
for user in users_in_channel.only("username").distinct():
try:
admin_api.add_contributor(user.username, channel.name)
admin_api.add_subscriber(user.username, channel.name)
except Profile.DoesNotExist:
log.exception(
"Channel %s membership update failed due to missing user profile: %s",
channel.name,
user.username,
)
| {
"repo_name": "mitodl/open-discussions",
"path": "channels/membership_api.py",
"copies": "1",
"size": "3588",
"license": "bsd-3-clause",
"hash": -1267214863484069600,
"line_mean": 34.5247524752,
"line_max": 123,
"alpha_frac": 0.6596989967,
"autogenerated": false,
"ratio": 4.3125,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006290448735674416,
"num_lines": 101
} |
"""API for Neato Botvac bound to Home Assistant OAuth."""
from asyncio import run_coroutine_threadsafe
import pybotvac
from homeassistant import config_entries, core
from homeassistant.helpers import config_entry_oauth2_flow
class ConfigEntryAuth(pybotvac.OAuthSession):
"""Provide Neato Botvac authentication tied to an OAuth2 based config entry."""
def __init__(
self,
hass: core.HomeAssistant,
config_entry: config_entries.ConfigEntry,
implementation: config_entry_oauth2_flow.AbstractOAuth2Implementation,
):
"""Initialize Neato Botvac Auth."""
self.hass = hass
self.session = config_entry_oauth2_flow.OAuth2Session(
hass, config_entry, implementation
)
super().__init__(self.session.token, vendor=pybotvac.Neato())
def refresh_tokens(self) -> str:
"""Refresh and return new Neato Botvac tokens using Home Assistant OAuth2 session."""
run_coroutine_threadsafe(
self.session.async_ensure_token_valid(), self.hass.loop
).result()
return self.session.token["access_token"]
class NeatoImplementation(config_entry_oauth2_flow.LocalOAuth2Implementation):
"""Neato implementation of LocalOAuth2Implementation.
We need this class because we have to add client_secret and scope to the authorization request.
"""
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
return {"client_secret": self.client_secret}
async def async_generate_authorize_url(self, flow_id: str) -> str:
"""Generate a url for the user to authorize.
We must make sure that the plus signs are not encoded.
"""
url = await super().async_generate_authorize_url(flow_id)
return f"{url}&scope=public_profile+control_robots+maps"
| {
"repo_name": "turbokongen/home-assistant",
"path": "homeassistant/components/neato/api.py",
"copies": "5",
"size": "1890",
"license": "apache-2.0",
"hash": -488377590062082800,
"line_mean": 35.3461538462,
"line_max": 99,
"alpha_frac": 0.6846560847,
"autogenerated": false,
"ratio": 4.2,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006258280726365833,
"num_lines": 52
} |
"""API for Ondilo ICO bound to Home Assistant OAuth."""
from asyncio import run_coroutine_threadsafe
import logging
from ondilo import Ondilo
from homeassistant import config_entries, core
from homeassistant.helpers import config_entry_oauth2_flow
_LOGGER = logging.getLogger(__name__)
class OndiloClient(Ondilo):
"""Provide Ondilo ICO authentication tied to an OAuth2 based config entry."""
def __init__(
self,
hass: core.HomeAssistant,
config_entry: config_entries.ConfigEntry,
implementation: config_entry_oauth2_flow.AbstractOAuth2Implementation,
) -> None:
"""Initialize Ondilo ICO Auth."""
self.hass = hass
self.config_entry = config_entry
self.session = config_entry_oauth2_flow.OAuth2Session(
hass, config_entry, implementation
)
super().__init__(self.session.token)
def refresh_tokens(self) -> dict:
"""Refresh and return new Ondilo ICO tokens using Home Assistant OAuth2 session."""
run_coroutine_threadsafe(
self.session.async_ensure_token_valid(), self.hass.loop
).result()
return self.session.token
def get_all_pools_data(self) -> dict:
"""Fetch pools and add pool details and last measures to pool data."""
pools = self.get_pools()
for pool in pools:
_LOGGER.debug(
"Retrieving data for pool/spa: %s, id: %d", pool["name"], pool["id"]
)
pool["ICO"] = self.get_ICO_details(pool["id"])
pool["sensors"] = self.get_last_pool_measures(pool["id"])
_LOGGER.debug("Retrieved the following sensors data: %s", pool["sensors"])
return pools
| {
"repo_name": "kennedyshead/home-assistant",
"path": "homeassistant/components/ondilo_ico/api.py",
"copies": "2",
"size": "1719",
"license": "apache-2.0",
"hash": 9131152548661264000,
"line_mean": 33.38,
"line_max": 91,
"alpha_frac": 0.6335078534,
"autogenerated": false,
"ratio": 3.9699769053117784,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5603484758711779,
"avg_score": null,
"num_lines": null
} |
"""API for open discussions integration"""
import logging
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import transaction
from open_discussions_api.client import OpenDiscussionsApi
from open_discussions_api.constants import ROLE_STAFF
from requests.exceptions import HTTPError
from rest_framework import status as statuses
from discussions.models import (
Channel,
ChannelProgram,
DiscussionUser,
)
from discussions.exceptions import (
DiscussionSyncException,
ChannelCreationException,
ChannelAlreadyExistsException,
ContributorSyncException,
DiscussionUserSyncException,
ModeratorSyncException,
SubscriberSyncException,
)
from discussions.utils import get_moderators_for_channel
from roles.models import Role
from roles.roles import Permissions
from search.api import adjust_search_for_percolator
from search.models import (
PercolateQuery,
PercolateQueryMembership,
)
log = logging.getLogger(__name__)
def get_staff_client():
"""
Gets a client configured for user management
"""
if not settings.OPEN_DISCUSSIONS_JWT_SECRET:
raise ImproperlyConfigured('OPEN_DISCUSSIONS_JWT_SECRET must be set')
if not settings.OPEN_DISCUSSIONS_BASE_URL:
raise ImproperlyConfigured('OPEN_DISCUSSIONS_BASE_URL must be set')
if not settings.OPEN_DISCUSSIONS_API_USERNAME:
raise ImproperlyConfigured('OPEN_DISCUSSIONS_API_USERNAME must be set')
return OpenDiscussionsApi(
settings.OPEN_DISCUSSIONS_JWT_SECRET,
settings.OPEN_DISCUSSIONS_BASE_URL,
settings.OPEN_DISCUSSIONS_API_USERNAME,
roles=[ROLE_STAFF]
)
def create_or_update_discussion_user(user_id, allow_email_optin=False):
"""
Create or update a DiscussionUser record and sync it
Args:
user_id (int): user id of the user to sync
allow_email_optin (bool): if True send email_optin in profile dict on users.update call
Returns:
DiscussionUser: The DiscussionUser connected to the user
"""
discussion_user, _ = DiscussionUser.objects.get_or_create(user_id=user_id)
with transaction.atomic():
discussion_user = (
DiscussionUser.objects.select_for_update()
.select_related('user')
.get(id=discussion_user.id)
)
# defer decision to create or update the profile until we have a lock
if discussion_user.username is None:
create_discussion_user(discussion_user)
else:
if settings.FEATURES.get('OPEN_DISCUSSIONS_USER_UPDATE', True):
update_discussion_user(discussion_user, allow_email_optin=allow_email_optin)
return discussion_user
def create_discussion_user(discussion_user):
"""
Creates the user's discussion user and profile
Args:
discussion_user (profiles.models.DiscussionUser): discussion user to create
Raises:
DiscussionUserSyncException: if there was an error syncing the profile
"""
profile = discussion_user.user.profile
api = get_staff_client()
result = api.users.create(
profile.user.username,
email=profile.user.email,
profile=dict(
name=profile.full_name,
image=profile.image.url if profile.image else None,
image_small=profile.image_small.url if profile.image_small else None,
image_medium=profile.image_medium.url if profile.image_medium else None,
email_optin=profile.email_optin
)
)
try:
result.raise_for_status()
except HTTPError as ex:
raise DiscussionUserSyncException(
"Error creating discussion user for {}".format(profile.user.username)
) from ex
discussion_user.username = result.json()['username']
discussion_user.last_sync = profile.updated_on
discussion_user.save()
def update_discussion_user(discussion_user, allow_email_optin=False):
"""
Updates the user's discussion user profile
Args:
discussion_user (discussions.models.DiscussionUser): discussion user to update
allow_email_optin (bool): if True send email_optin in profile dict on users.update call
Raises:
DiscussionUserSyncException: if there was an error syncing the profile
"""
profile = discussion_user.user.profile
if (
not allow_email_optin and
discussion_user.last_sync is not None and
profile.updated_on <= discussion_user.last_sync
):
return
api = get_staff_client()
profile_dict = dict(
name=profile.full_name,
image=profile.image.url if profile.image else None,
image_small=profile.image_small.url if profile.image_small else None,
image_medium=profile.image_medium.url if profile.image_medium else None,
)
if allow_email_optin:
profile_dict["email_optin"] = profile.email_optin
result = api.users.update(
discussion_user.username,
uid=profile.user.username,
email=profile.user.email,
profile=profile_dict
)
try:
result.raise_for_status()
except HTTPError as ex:
raise DiscussionUserSyncException(
"Error updating discussion user for {}".format(profile.user.username)
) from ex
discussion_user.last_sync = profile.updated_on
discussion_user.save()
def add_contributor_to_channel(channel_name, discussion_username):
"""
Add user to channel as a contributor
Args:
channel_name (str): An open-discussions channel
discussion_username (str): The username used by open-discussions
"""
admin_client = get_staff_client()
try:
admin_client.channels.add_contributor(channel_name, discussion_username).raise_for_status()
except HTTPError as ex:
raise ContributorSyncException("Error adding contributor {user} to channel {channel}".format(
user=discussion_username,
channel=channel_name,
)) from ex
def add_moderator_to_channel(channel_name, discussion_username):
"""
Add user to channel as a moderator
Args:
channel_name (str): An open-discussions channel
discussion_username (str): The username used by open-discussions
"""
admin_client = get_staff_client()
try:
admin_client.channels.add_moderator(channel_name, discussion_username).raise_for_status()
except HTTPError as ex:
raise ModeratorSyncException("Error adding moderator {user} to channel {channel}".format(
user=discussion_username,
channel=channel_name,
)) from ex
def remove_moderator_from_channel(channel_name, discussion_username):
"""
Remove user to channel as a moderator
Args:
channel_name (str): An open-discussions channel
discussion_username (str): The username used by open-discussions
"""
admin_client = get_staff_client()
try:
admin_client.channels.remove_moderator(channel_name, discussion_username).raise_for_status()
except HTTPError as ex:
raise ModeratorSyncException("Error removing moderator {user} to channel {channel}".format(
user=discussion_username,
channel=channel_name,
)) from ex
def add_subscriber_to_channel(channel_name, discussion_username):
"""
Add a subscriber to channel
Args:
channel_name (str): An open-discussions channel
discussion_username (str): The username used by open-discussions
"""
admin_client = get_staff_client()
try:
admin_client.channels.add_subscriber(channel_name, discussion_username).raise_for_status()
except HTTPError as ex:
raise SubscriberSyncException("Error adding subscriber {user} to channel {channel}".format(
user=discussion_username,
channel=channel_name,
)) from ex
def remove_contributor_from_channel(channel_name, discussion_username):
"""
Remove contributor from a channel
Args:
channel_name (str): An open-discussions channel
discussion_username (str): The username used by open-discussions
"""
admin_client = get_staff_client()
try:
admin_client.channels.remove_contributor(channel_name, discussion_username).raise_for_status()
except HTTPError as ex:
raise ContributorSyncException("Unable to remove a contributor {user} from channel {channel}".format(
user=discussion_username,
channel=channel_name
)) from ex
def remove_subscriber_from_channel(channel_name, discussion_username):
"""
Remove subscriber from a channel
Args:
channel_name (str): An open-discussions channel
discussion_username (str): The username used by open-discussions
"""
admin_client = get_staff_client()
try:
admin_client.channels.remove_subscriber(channel_name, discussion_username).raise_for_status()
except HTTPError as ex:
raise SubscriberSyncException("Unable to remove a subscriber {user} from channel {channel}".format(
user=discussion_username,
channel=channel_name,
)) from ex
def add_to_channel(channel_name, discussion_username):
"""
Add a user to channel as contributor and subscriber
Args:
channel_name (str): An open-discussions channel
discussion_username (str): The username used by open-discussions
"""
# This is done in this order because a user cannot be added as a subscriber
# to a private channel without first being a contributor
add_contributor_to_channel(channel_name, discussion_username)
add_subscriber_to_channel(channel_name, discussion_username)
def remove_from_channel(channel_name, discussion_username):
"""
Remove a user from a channel as contributor and subscriber
Args:
channel_name (str): An open-discussions channel
discussion_username (str): The username used by open-discussions
"""
# If the channel is private and the user is not a contributor, their subscription status will always
# look like it's false. To work around this we always remove the subscriber first.
remove_subscriber_from_channel(channel_name, discussion_username)
remove_contributor_from_channel(channel_name, discussion_username)
def get_membership_ids_needing_sync():
"""
Returns a list of memberships that need to be synced
Returns:
QuerySet: query for the list of database ids for memberships that need to be synced
"""
# Order by is_member (True before False) and updated_on (most recent first)
return PercolateQueryMembership.objects.filter(
query__source_type=PercolateQuery.DISCUSSION_CHANNEL_TYPE,
user__profile__isnull=False,
needs_update=True
).order_by('-is_member', '-updated_on').values_list("id", flat=True)
def sync_channel_memberships(membership_ids):
"""
Sync outstanding channel memberships
Args:
membership_ids (iterable of int): iterable of membership ids to sync
"""
program_ids_by_channel_id = {
channel_program.channel_id: channel_program.program_id
for channel_program in ChannelProgram.objects.all()
}
for membership_id in membership_ids:
with transaction.atomic():
membership = (
PercolateQueryMembership.objects
.filter(id=membership_id)
.prefetch_related('query__channels')
.select_for_update()
.first()
)
channel = membership.query.channels.first()
# if we can't look up the program, skip this one
# this covers a race condition where a PercolateQueryMembership is selected
# for a channel that wasn't present when program_ids_by_channel_id was queried
if channel is None or channel.id not in program_ids_by_channel_id:
continue
# if the user is a moderator, don't manipulate subscriptions
if Role.objects.filter(
role__in=Role.permission_to_roles[Permissions.CAN_CREATE_FORUMS],
user_id=membership.user_id,
program_id=program_ids_by_channel_id[channel.id],
).exists():
membership.needs_update = False
membership.save()
else:
try:
# This guards against a race condition where the user's profile is in a celery task
# and hasn't yet actually been created
discussion_user = create_or_update_discussion_user(membership.user_id)
if membership.is_member:
add_to_channel(channel.name, discussion_user.username)
else:
remove_from_channel(channel.name, discussion_user.username)
membership.needs_update = False
membership.save()
except DiscussionSyncException:
log.exception("Error updating channel membership")
def add_channel(
original_search, title, name, description, channel_type, program_id, creator_id,
): # pylint: disable=too-many-arguments
"""
Add the channel and associated query
Args:
original_search (Search):
The original search, which contains all back end filtering but no filtering specific to mail
or for percolated queries.
title (str): Title of the channel
name (str): Name of the channel
description (str): Description for the channel
channel_type (str): Whether the channel is public or private
program_id (int): The program id to connect the new channel to
creator_id (int): The user id of the creator of a channel
Returns:
Channel: A new channel object
"""
client = get_staff_client()
try:
client.channels.create(
title=title,
name=name,
description=description,
channel_type=channel_type,
).raise_for_status()
except HTTPError as ex:
if ex.response.status_code == statuses.HTTP_409_CONFLICT:
raise ChannelAlreadyExistsException("Channel {} already exists".format(name)) from ex
raise ChannelCreationException("Error creating channel {}".format(name)) from ex
updated_search = adjust_search_for_percolator(original_search)
with transaction.atomic():
percolate_query = PercolateQuery.objects.create(
original_query=original_search.to_dict(),
query=updated_search.to_dict(),
source_type=PercolateQuery.DISCUSSION_CHANNEL_TYPE,
)
channel = Channel.objects.create(
query=percolate_query,
name=name,
)
ChannelProgram.objects.create(
channel=channel,
program_id=program_id,
)
from discussions import tasks as discussions_tasks
discussions_tasks.add_moderators_to_channel.delay(channel.name)
# populate memberships based on the enrollments we found now
# subsequent matches will be picked up via indexing
from search import tasks as search_tasks
search_tasks.populate_query_memberships.delay(percolate_query.id)
# The creator is added in add_moderators_to_channel but do it here also to prevent a race condition
# where the user is redirected to the channel page before they have permission to access it.
discussion_user = create_or_update_discussion_user(creator_id)
add_and_subscribe_moderator(discussion_user.username, channel.name)
return channel
def add_moderators_to_channel(channel_name):
"""
Add moderators to a channel
Args:
channel_name (str): The name of the channel
"""
mod_ids = get_moderators_for_channel(channel_name)
for mod_id in mod_ids:
discussion_user = create_or_update_discussion_user(mod_id)
add_and_subscribe_moderator(discussion_user.username, channel_name)
def add_and_subscribe_moderator(discussion_username, channel_name):
"""
Add and subscribe a moderator to a channels
Args:
discussion_username (str): discussion username
channel_name (str): The name of the channel
"""
add_moderator_to_channel(channel_name, discussion_username)
add_subscriber_to_channel(channel_name, discussion_username)
| {
"repo_name": "mitodl/micromasters",
"path": "discussions/api.py",
"copies": "1",
"size": "16514",
"license": "bsd-3-clause",
"hash": -8962814243801495000,
"line_mean": 34.2863247863,
"line_max": 109,
"alpha_frac": 0.6681603488,
"autogenerated": false,
"ratio": 4.232188621219887,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5400348970019887,
"avg_score": null,
"num_lines": null
} |
"""API for performing and verifying device authentication."""
import binascii
import logging
import plistlib
from typing import Dict
from pyatv import exceptions
from pyatv.airplay.srp import LegacyCredentials, SRPAuthHandler
from pyatv.exceptions import AuthenticationError
from pyatv.support.http import HttpConnection, HttpResponse
_LOGGER = logging.getLogger(__name__)
_AIRPLAY_HEADERS = {
"User-Agent": "AirPlay/320.20",
"Connection": "keep-alive",
}
class AirPlayPairingProcedure:
"""Authenticate a device for AirPlay playback."""
def __init__(self, http: HttpConnection, auth_handler: SRPAuthHandler) -> None:
"""Initialize a new AirPlayPairingProcedure."""
self.http = http
self.srp = auth_handler
async def start_pairing(self) -> None:
"""Start the pairing process.
This method will show the expected PIN on screen.
"""
resp = await self.http.post("/pair-pin-start", headers=_AIRPLAY_HEADERS)
if resp.code != 200:
raise AuthenticationError("pair start failed")
async def finish_pairing(self, username: str, password: str) -> LegacyCredentials:
"""Finish pairing process.
A username (generated by new_credentials) and the PIN code shown on
screen must be provided.
"""
# Step 1
self.srp.step1(username, password)
resp = await self._send_plist(method="pin", user=username)
resp = plistlib.loads(
resp.body if isinstance(resp.body, bytes) else resp.body.encode("utf-8")
)
if not isinstance(resp, dict):
raise exceptions.ProtocolError(f"exoected dict, got {type(resp)}")
# Step 2
pub_key, key_proof = self.srp.step2(resp["pk"], resp["salt"])
await self._send_plist(
pk=binascii.unhexlify(pub_key), proof=binascii.unhexlify(key_proof)
)
# Step 3
epk, tag = self.srp.step3()
await self._send_plist(epk=epk, authTag=tag)
return self.srp.credentials
async def _send_plist(self, **kwargs) -> HttpResponse:
plist: Dict[str, str] = dict((str(k), v) for k, v in kwargs.items())
headers = _AIRPLAY_HEADERS.copy()
headers["Content-Type"] = "application/x-apple-binary-plist"
# TODO: For some reason pylint does not find FMT_BINARY, why?
# pylint: disable=no-member
return await self.http.post(
"/pair-setup-pin", body=plistlib.dumps(plist, fmt=plistlib.FMT_BINARY)
)
# pylint: disable=too-few-public-methods
class AirPlayPairingVerifier:
"""Verify if a device is allowed to perform AirPlay playback."""
def __init__(self, http: HttpConnection, auth_handler: SRPAuthHandler) -> None:
"""Initialize a new AirPlayPairingVerifier."""
self.http = http
self.srp = auth_handler
async def verify_authed(self) -> bool:
"""Verify if device is allowed to use AirPlau."""
resp = await self._send(self.srp.verify1())
atv_public_secret = resp.body[0:32]
data = resp.body[32:] # TODO: what is this?
await self._send(self.srp.verify2(atv_public_secret, data))
return True
async def _send(self, data: bytes) -> HttpResponse:
headers = _AIRPLAY_HEADERS.copy()
headers["Content-Type"] = "application/octet-stream"
return await self.http.post("/pair-verify", headers=headers, body=data)
| {
"repo_name": "postlund/pyatv",
"path": "pyatv/airplay/auth.py",
"copies": "1",
"size": "3449",
"license": "mit",
"hash": 7954700564257851000,
"line_mean": 34.193877551,
"line_max": 86,
"alpha_frac": 0.6436648304,
"autogenerated": false,
"ratio": 3.769398907103825,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49130637375038255,
"avg_score": null,
"num_lines": null
} |
"""API for persistent storage for the frontend."""
from functools import wraps
import voluptuous as vol
from homeassistant.components import websocket_api
DATA_STORAGE = 'frontend_storage'
STORAGE_VERSION_USER_DATA = 1
STORAGE_KEY_USER_DATA = 'frontend.user_data_{}'
async def async_setup_frontend_storage(hass):
"""Set up frontend storage."""
hass.data[DATA_STORAGE] = ({}, {})
hass.components.websocket_api.async_register_command(
websocket_set_user_data
)
hass.components.websocket_api.async_register_command(
websocket_get_user_data
)
def with_store(orig_func):
"""Decorate function to provide data."""
@wraps(orig_func)
async def with_store_func(hass, connection, msg):
"""Provide user specific data and store to function."""
stores, data = hass.data[DATA_STORAGE]
user_id = connection.user.id
store = stores.get(user_id)
if store is None:
store = stores[user_id] = hass.helpers.storage.Store(
STORAGE_VERSION_USER_DATA,
STORAGE_KEY_USER_DATA.format(connection.user.id)
)
if user_id not in data:
data[user_id] = await store.async_load() or {}
await orig_func(
hass, connection, msg,
store,
data[user_id],
)
return with_store_func
@websocket_api.websocket_command({
vol.Required('type'): 'frontend/set_user_data',
vol.Required('key'): str,
vol.Required('value'): vol.Any(bool, str, int, float, dict, list, None),
})
@websocket_api.async_response
@with_store
async def websocket_set_user_data(hass, connection, msg, store, data):
"""Handle set global data command.
Async friendly.
"""
data[msg['key']] = msg['value']
await store.async_save(data)
connection.send_message(websocket_api.result_message(
msg['id'],
))
@websocket_api.websocket_command({
vol.Required('type'): 'frontend/get_user_data',
vol.Optional('key'): str,
})
@websocket_api.async_response
@with_store
async def websocket_get_user_data(hass, connection, msg, store, data):
"""Handle get global data command.
Async friendly.
"""
connection.send_message(websocket_api.result_message(
msg['id'], {
'value': data.get(msg['key']) if 'key' in msg else data
}
))
| {
"repo_name": "HydrelioxGitHub/home-assistant",
"path": "homeassistant/components/frontend/storage.py",
"copies": "10",
"size": "2368",
"license": "apache-2.0",
"hash": -6972776977316450000,
"line_mean": 27.5301204819,
"line_max": 76,
"alpha_frac": 0.6321790541,
"autogenerated": false,
"ratio": 3.682737169517885,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 83
} |
"""API for persistent storage for the frontend."""
from functools import wraps
import voluptuous as vol
from homeassistant.components import websocket_api
DATA_STORAGE = "frontend_storage"
STORAGE_VERSION_USER_DATA = 1
STORAGE_KEY_USER_DATA = "frontend.user_data_{}"
async def async_setup_frontend_storage(hass):
"""Set up frontend storage."""
hass.data[DATA_STORAGE] = ({}, {})
hass.components.websocket_api.async_register_command(websocket_set_user_data)
hass.components.websocket_api.async_register_command(websocket_get_user_data)
def with_store(orig_func):
"""Decorate function to provide data."""
@wraps(orig_func)
async def with_store_func(hass, connection, msg):
"""Provide user specific data and store to function."""
stores, data = hass.data[DATA_STORAGE]
user_id = connection.user.id
store = stores.get(user_id)
if store is None:
store = stores[user_id] = hass.helpers.storage.Store(
STORAGE_VERSION_USER_DATA,
STORAGE_KEY_USER_DATA.format(connection.user.id),
)
if user_id not in data:
data[user_id] = await store.async_load() or {}
await orig_func(hass, connection, msg, store, data[user_id])
return with_store_func
@websocket_api.websocket_command(
{
vol.Required("type"): "frontend/set_user_data",
vol.Required("key"): str,
vol.Required("value"): vol.Any(bool, str, int, float, dict, list, None),
}
)
@websocket_api.async_response
@with_store
async def websocket_set_user_data(hass, connection, msg, store, data):
"""Handle set global data command.
Async friendly.
"""
data[msg["key"]] = msg["value"]
await store.async_save(data)
connection.send_message(websocket_api.result_message(msg["id"]))
@websocket_api.websocket_command(
{vol.Required("type"): "frontend/get_user_data", vol.Optional("key"): str}
)
@websocket_api.async_response
@with_store
async def websocket_get_user_data(hass, connection, msg, store, data):
"""Handle get global data command.
Async friendly.
"""
connection.send_message(
websocket_api.result_message(
msg["id"], {"value": data.get(msg["key"]) if "key" in msg else data}
)
)
| {
"repo_name": "fbradyirl/home-assistant",
"path": "homeassistant/components/frontend/storage.py",
"copies": "1",
"size": "2298",
"license": "apache-2.0",
"hash": -8548808680167196000,
"line_mean": 29.2368421053,
"line_max": 81,
"alpha_frac": 0.6514360313,
"autogenerated": false,
"ratio": 3.6188976377952757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4770333669095276,
"avg_score": null,
"num_lines": null
} |
"""API for persistent storage for the frontend."""
from functools import wraps
import voluptuous as vol
from homeassistant.components import websocket_api
# mypy: allow-untyped-calls, allow-untyped-defs
DATA_STORAGE = "frontend_storage"
STORAGE_VERSION_USER_DATA = 1
STORAGE_KEY_USER_DATA = "frontend.user_data_{}"
async def async_setup_frontend_storage(hass):
"""Set up frontend storage."""
hass.data[DATA_STORAGE] = ({}, {})
hass.components.websocket_api.async_register_command(websocket_set_user_data)
hass.components.websocket_api.async_register_command(websocket_get_user_data)
def with_store(orig_func):
"""Decorate function to provide data."""
@wraps(orig_func)
async def with_store_func(hass, connection, msg):
"""Provide user specific data and store to function."""
stores, data = hass.data[DATA_STORAGE]
user_id = connection.user.id
store = stores.get(user_id)
if store is None:
store = stores[user_id] = hass.helpers.storage.Store(
STORAGE_VERSION_USER_DATA,
STORAGE_KEY_USER_DATA.format(connection.user.id),
)
if user_id not in data:
data[user_id] = await store.async_load() or {}
await orig_func(hass, connection, msg, store, data[user_id])
return with_store_func
@websocket_api.websocket_command(
{
vol.Required("type"): "frontend/set_user_data",
vol.Required("key"): str,
vol.Required("value"): vol.Any(bool, str, int, float, dict, list, None),
}
)
@websocket_api.async_response
@with_store
async def websocket_set_user_data(hass, connection, msg, store, data):
"""Handle set global data command.
Async friendly.
"""
data[msg["key"]] = msg["value"]
await store.async_save(data)
connection.send_message(websocket_api.result_message(msg["id"]))
@websocket_api.websocket_command(
{vol.Required("type"): "frontend/get_user_data", vol.Optional("key"): str}
)
@websocket_api.async_response
@with_store
async def websocket_get_user_data(hass, connection, msg, store, data):
"""Handle get global data command.
Async friendly.
"""
connection.send_message(
websocket_api.result_message(
msg["id"], {"value": data.get(msg["key"]) if "key" in msg else data}
)
)
| {
"repo_name": "joopert/home-assistant",
"path": "homeassistant/components/frontend/storage.py",
"copies": "3",
"size": "2348",
"license": "apache-2.0",
"hash": -7974227940992741000,
"line_mean": 28.7215189873,
"line_max": 81,
"alpha_frac": 0.6533219761,
"autogenerated": false,
"ratio": 3.5902140672782874,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006212861003434226,
"num_lines": 79
} |
"""API for persistent storage for the frontend."""
from functools import wraps
import voluptuous as vol
from homeassistant.components import websocket_api
# mypy: allow-untyped-calls, allow-untyped-defs
DATA_STORAGE = "frontend_storage"
STORAGE_VERSION_USER_DATA = 1
async def async_setup_frontend_storage(hass):
"""Set up frontend storage."""
hass.data[DATA_STORAGE] = ({}, {})
hass.components.websocket_api.async_register_command(websocket_set_user_data)
hass.components.websocket_api.async_register_command(websocket_get_user_data)
def with_store(orig_func):
"""Decorate function to provide data."""
@wraps(orig_func)
async def with_store_func(hass, connection, msg):
"""Provide user specific data and store to function."""
stores, data = hass.data[DATA_STORAGE]
user_id = connection.user.id
store = stores.get(user_id)
if store is None:
store = stores[user_id] = hass.helpers.storage.Store(
STORAGE_VERSION_USER_DATA, f"frontend.user_data_{connection.user.id}"
)
if user_id not in data:
data[user_id] = await store.async_load() or {}
await orig_func(hass, connection, msg, store, data[user_id])
return with_store_func
@websocket_api.websocket_command(
{
vol.Required("type"): "frontend/set_user_data",
vol.Required("key"): str,
vol.Required("value"): vol.Any(bool, str, int, float, dict, list, None),
}
)
@websocket_api.async_response
@with_store
async def websocket_set_user_data(hass, connection, msg, store, data):
"""Handle set global data command.
Async friendly.
"""
data[msg["key"]] = msg["value"]
await store.async_save(data)
connection.send_message(websocket_api.result_message(msg["id"]))
@websocket_api.websocket_command(
{vol.Required("type"): "frontend/get_user_data", vol.Optional("key"): str}
)
@websocket_api.async_response
@with_store
async def websocket_get_user_data(hass, connection, msg, store, data):
"""Handle get global data command.
Async friendly.
"""
connection.send_message(
websocket_api.result_message(
msg["id"], {"value": data.get(msg["key"]) if "key" in msg else data}
)
)
| {
"repo_name": "home-assistant/home-assistant",
"path": "homeassistant/components/frontend/storage.py",
"copies": "21",
"size": "2277",
"license": "apache-2.0",
"hash": 5892880078299762000,
"line_mean": 28.5714285714,
"line_max": 85,
"alpha_frac": 0.6556873079,
"autogenerated": false,
"ratio": 3.5971563981042656,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""API for plugin writers to adhere to.
Engine shall instantiate the drawingboard module.
"""
version = "1.0"
import os
plugSourcesPath = "C:\\Amit\\dev\\cpp\\of_v0.7.4_vs2010_release\\of_v0.7.4_vs2010_release\\apps\\myApps\\alpha-goldDust\\bin\\plug\\py\\sources" ##os.path.join("..", "sources")
###############################################################
### TODO. we should not explicitly set the paths here. They should be automatically
#### by done by the __init__ in sources
webplugSourcesPath = os.path.abspath(os.path.join(plugSourcesPath, "weby"))
#print("web plug path: {0}").format(os.path.abspath(webplugSourcesPath))
import sys; sys.path.insert(0, webplugSourcesPath)
import pprint
import imp
## TODO: we should provide a proxy minto module for non-embedded testing
try:
import minto
print("minto api: {0}").format(minto.api_version())
minto.log_message("", version+" loaded")
except:
print("You must be in testing mode since minto is not available")
pp = pprint.PrettyPrinter(indent=4)
module_plugins = []
plugin_objects = []
# key-value pair for pluginName to pluginObject
plugin_commands = {}
class DrawingBoardInitError(Exception):
pass
class drawingboard(object):
"""static class drawingboard instantiated by the Engine"""
def __init__(self):
raise DrawingBoardInitError("static class")
def version(self):
pass
def log_message(self, sender, data):
pass
def log_error(self, sender, data):
pass
def get_all_market_names(self):
pass
def get_all_market_streams(self, market = "default"):
pass
def get_schema_for_stream(self, stream):
pass
def get_input_command(self, commandname):
pass
def get_output_command(self, commandname):
pass
def get_all_input_command(self):
pass
def get_all_output_command(self):
pass
def get_stats_for_command(self, commandname):
pass
def set_new_stream(self, stream, schema, provider):
pass
def set_provider(self, stream, provider):
pass
def register_provider(self, stream, filter, provider):
pass
def unregister_provider(self, stream, provider):
pass
def onData(self, stream, driver, data):
pass
# events - TODO
# Document the methods, remove redundant, provide example usages
# TODO - doc strings and group the apis based on usage. caller etc
class DataCommand(object):
is_command = True
def __init__(self, **kwargs):
print "DataCommand init called"
self.args = {}
def set_defaults(self):
self.set_display_name("TODO") #we should try getting a name from the subclass name. TODO.
self.set_status_string("initializing")
# TODO: intelligent about the png. Use class name to figure out the icon. If not found,
# then use the default png
self.set_icon("plugin.png")
self.set_command_name("noname")
self.set_display_name("noname")
self.set_initialized(False)
def set_command_name(self, name):
self.args['command_name'] = name
def get_command_name(self):
return self.args['command_name']
def set_display_name(self, name):
self.args['display_name'] = name
def get_display_name(self):
return self.args['display_name']
def get_status_string(self):
return self.args['status_string']
def set_status_string(self, status):
self.args['status_string'] = status
def set_icon(self, resource):
self.args['icon'] = resource
def get_icon(self):
return self.args['icon']
def init(self, **kwargs):
pass
# user defined plugin has to explicitly initialize a plugin when they know they are ready
def is_initialized(self):
return self.args['initialized']
def set_initialized(self, status):
self.args['initialized'] = status
def load_resources(self, **kwargs):
self.set_defaults()
def _start(self):
pass
def _stop(self):
pass
def start(self, **kwargs):
pass
def stop(self):
pass
def is_stopped(self):
pass
def suspend(self):
pass
def is_suspended(self):
pass
def shutdown(self):
pass
def is_shutdown(self):
pass
def can_batch(self):
pass
def set_batchsize(self, size):
pass
def get_batchsize(self):
pass
def set_flush_interval(self, interval):
pass
def get_flush_interval(self):
pass
def subscribe_my_stats(self, callback):
"""If someone is interested in the DataCommand stats, they can provide a callback."""
pass
def on_new_stream(self, stream, schema):
pass
def set_port_hints(self):
pass
# should we make the get/set as properties?
def set_input_port_count(self, count):
pass
def set_output_port_count(self, count):
pass
def get_input_port_count(self):
pass
def get_output_port_count(self):
pass
def set_output_port_schema(self, port, schema):
pass
def get_output_port_schema(self, port):
pass
def set_input_port_schema(self, port, schema):
pass
def get_input_port_schema(self, port):
pass
def submit_command(self, port, **commandArgs):
pass
def execute(self, **kwargs):
pass
def send_output(self, port, data):
pass
def send_output_async(self, port, data):
pass
def cancel_last_command(self):
pass
def is_command_canceled(self):
pass
def send_error_port(self, data):
pass
def send_admin_port(self, status):
pass
def has_admin_port(self):
pass
def check_connections(self):
"""typecheck schema for ports."""
pass
def get_params(self):
pass
def set_params(self, **kwParams):
pass
#event subscription todo
def resume(self):
pass
def is_resumed(self):
pass
def list_directory(directory, fileExt = [".py"]):
fileList = [f for f in os.listdir(directory)]
fileList = [os.path.splitext(f)[0] for f in fileList if os.path.splitext(f)[1] in fileExt]
print "fileList {0}".format(fileList)
return fileList
def load_data_plugins(directory, fileExt = [".py"]):
modNames = list_directory(directory, fileExt)
for moduleName in modNames:
print "Checking module {0}".format(moduleName)
if moduleName == "drawingboard":
print "Skipping module {0}".format(moduleName)
continue
m_info = imp.find_module(moduleName)
m = imp.load_module(moduleName, *m_info)
for name in dir(m):
t = m.__dict__[name]
try:
if t.__bases__:
try:
if t.is_command and name != "DataCommand":
minto.log_message("", "Found plugin class: "+name)
print "Found plugin class: {0}".format(name)
module_plugins.append(t)
except AttributeError:
pass
#print "class: {0} not a plugin class".format(name)
except AttributeError:
pass
def get_command_name(commandClass):
return str(commandClass)
def load_plugin_commands():
for klass in module_plugins:
load_plugin_command(klass)
def load_plugin_command(klass):
pluginObject = klass(dummy="dummy")
pluginObject.load_resources(dummy="dummy")
# TODO
# we should first try to get the name from the plugin object and then use the generic routine
name = pluginObject.get_display_name() #get_command_name(klass)
print "command name: {0}".format(name)
plugin_commands[name] = pluginObject
minto.on_plugin_loaded("db", name)
def create_data_commands():
cmds = []
for (plugName, plugObject) in plugin_commands.iteritems():
create_data_command(plugName, plugObject)
cmds.append(plugObject)
return cmds
def create_data_command(pluginName, plugin):
if plugin.is_initialized():
minto.on_plugin_starting("", pluginName)
plugin.start(en="english")
minto.on_plugin_ready("", pluginName)
else:
minto.on_plugin_start_error(pluginName, "plugin not initialized")
def execute_data_command(pluginName, pluginCommand):
#global plugin_commands
if pluginName in plugin_commands:
print "executing for {0}: {1}".format(pluginName, pluginCommand)
minto.log_message("", "executing: {0} with {1}".format(pluginName, pluginCommand))
#plugin_commands[pluginName].execute(pluginCommand)
else:
minto.log_message("", "{0}: not found".format(pluginName))
# def create_data_commands():
# cmds = []
# for cls in module_plugins:
# pluginObject = cls(dummy="dummy")
# pluginObject.start(en="english")
# cmds.append(pluginObject)
# plugin_objects.append(pluginObject)
# return cmds
def testExecute():
for pluginObject in plugin_objects:
if pluginObject.is_initialized():
pluginObject.execute(search = "now is the time")
def test_main():
pp.pprint(sys.path)
#webPlugPathTest = os.path.join(plugSourcesPath, "weby")
print("webplugSourcesPath: {0}").format(webplugSourcesPath) #webPlugPathTest)
minto.log_message("", "webplugSourcesPath: "+webplugSourcesPath)
load_data_plugins(webplugSourcesPath) #webPlugPathTest)
print("completed identifying plugins")
minto.log_message("", "completed identifying plugins")
load_plugin_commands()
cmds = create_data_commands()
return cmds
def main():
pp.pprint(sys.path)
webPlugPathTest = os.path.join(plugSourcesPath, "weby")
load_data_plugins(os.path.abspath(webPlugPathTest))
cmds = create_data_commands()
plugin_objects.extend(cmds)
if __name__ == '__main__':
main()
testExecute()
| {
"repo_name": "decebel/dataAtom_alpha",
"path": "bin/plug/py/api/drawingboard.py",
"copies": "1",
"size": "9016",
"license": "apache-2.0",
"hash": -3267209283881317400,
"line_mean": 21.0980392157,
"line_max": 176,
"alpha_frac": 0.6937666371,
"autogenerated": false,
"ratio": 3.131642931573463,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9112173782406454,
"avg_score": 0.0426471572534018,
"num_lines": 408
} |
"""API for Python Jeeves libary.
:synopsis: Functions for creating sensitive values, labels, and policies.
.. moduleauthor:: Travis Hance <tjhance7@gmail.com>
.. moduleauthor:: Jean Yang <jeanyang@csail.mit.edu>
"""
from env.ConcreteCache import ConcreteCache
from env.VarEnv import VarEnv
from env.PolicyEnv import PolicyEnv
from env.PathVars import PathVars
from env.WritePolicyEnv import WritePolicyEnv
from smt.Z3 import Z3
from fast.AST import Facet, fexpr_cast, Constant, Var, Not, FExpr, Unassigned, FObject, jeevesState
import copy
def set_log_policies(filehandle):
"""
Set policy logging.
"""
jeevesState.set_log_policies(filehandle)
def log_policies():
"""
Write policies to the policy files.
"""
jeevesState.log_policies()
def log_counts(label_count):
jeevesState.log_counts(label_count)
def clear_policy_count():
jeevesState.clear_policy_count()
def get_num_concretize():
return jeevesState.num_concretize
def get_num_concretize_labels():
return jeevesState.num_labels
def get_num_env_labels():
return len(jeevesState.policyenv.policies.keys())
def init():
"""Initialization function for Jeeves library.
You should always call this before you do anything Jeeves-y.
"""
jeevesState.init()
# TODO this needs to be GC'ed somehow
def supports_jeeves(f):
f.__jeeves = 0
return f
@supports_jeeves
def mkLabel(varName = "", uniquify=True):
"""Makes a label to associate with policies and sensitive values.
:param varName: Optional variable name (to help with debugging).
:type varName: string
:returns: Var - fresh label.
"""
label = jeevesState.policyenv.mkLabel(varName, uniquify)
jeevesState.all_labels[label.name] = label
return label
@supports_jeeves
def doesLabelExist(varName):
return varName in jeevesState.all_labels
@supports_jeeves
def getLabel(varName):
return jeevesState.all_labels[varName]
@supports_jeeves
def restrict(varLabel, pred, use_empty_env=False):
"""Associates a policy with a label.
:param varLabel: Label to associate with policy.
:type varLabel: string
:param pred: Policy: function taking output channel and returning Boolean result.
:type pred: T -> bool, where T is the type of the output channel
"""
jeevesState.policyenv.restrict(varLabel, pred, use_empty_env)
@supports_jeeves
def mkSensitive(varLabel, vHigh, vLow):
"""Creates a sensitive value with two facets.
:param varLabel: Label to associate with sensitive value.
:type varLabel: Var
:param vHigh: High-confidentiality facet for viewers with restricted access.
:type vHigh: T
:param vLow: Low-confidentiality facet for other viewers.
:type vLow: T
"""
if isinstance(varLabel, Var):
return Facet(varLabel, fexpr_cast(vHigh), fexpr_cast(vLow))
else:
return JeevesLib.jif(varLabel, lambda:vHigh, lambda:vLow)
@supports_jeeves
def concretize(ctxt, v):
"""Projects out a single value to the viewer.
:param ctxt: Output channel (viewer).
:type ctxt: T, where policies have type T -> bool
:param v: Value to concretize.
:type v: FExpr
:returns: The concrete (non-faceted) version of T under the policies in the environment.
"""
pathvars = jeevesState.pathenv.getEnv()
# Check to see if the value is in the cache.
# cache_key = jeevesState.concretecache.get_cache_key(ctxt, v, pathvars)
# cval = jeevesState.concretecache.cache_lookup(cache_key)
# if cval is None:
return jeevesState.policyenv.concretizeExp(ctxt, v, pathvars)
# jeevesState.concretecache.cache_value(cache_key, cval)
# return cval
@supports_jeeves
def jif(cond, thn_fn, els_fn):
condTrans = fexpr_cast(cond).partialEval(jeevesState.pathenv.getEnv())
if condTrans.type != bool:
raise TypeError("jif must take a boolean as a condition")
return jif2(condTrans, thn_fn, els_fn)
def jif2(cond, thn_fn, els_fn):
if isinstance(cond, Constant):
return thn_fn() if cond.v else els_fn()
elif isinstance(cond, Facet):
if not isinstance(cond.cond, Var):
raise TypeError("facet conditional is of type %s" % cond.cond.__class__.__name__)
with PositiveVariable(cond.cond):
thn = jif2(cond.thn, thn_fn, els_fn)
with NegativeVariable(cond.cond):
els = jif2(cond.els, thn_fn, els_fn)
return Facet(cond.cond, thn, els)
else:
raise TypeError("jif condition must be a constant or a var")
# supports short-circuiting
# without short-circuiting jif is unnecessary
# are there performance issues?
@supports_jeeves
def jand(l, r): # inputs are functions
left = l()
if not isinstance(left, FExpr):
return left and r()
return jif(left, r, lambda:left)
@supports_jeeves
def jor(l, r): # inputs are functions
left = l()
if not isinstance(left, FExpr):
return left or r()
return jif(left, lambda:left, r)
# this one is more straightforward
# just takes an expression
@supports_jeeves
def jnot(f):
if isinstance(f, FExpr):
return Not(f)
else:
return not f
@supports_jeeves
def jassign(old, new, base_env={}):
res = new
for vs in jeevesState.pathenv.conditions:
(var, val) = (vs.var, vs.val)
if var.name not in base_env:
if val:
res = Facet(var, res, old)
else:
res = Facet(var, old, res)
if isinstance(res, FExpr):
return res.partialEval({}, True)
else:
return res
'''
Caching.
'''
def start_caching():
jeevesState.concretecache.start_caching()
def stop_caching():
jeevesState.concretecache.stop_caching()
def cache_size():
return jeevesState.concretecache.cache_size()
def clear_cache():
return jeevesState.concretecache.clear_cache()
def get_cache():
return jeevesState.concretecache.cache
def get_solverstate():
return jeevesState.solverstate
'''
Early concretization optimization.
'''
def set_viewer(viewer):
jeevesState.set_viewer(viewer)
jeevesState.reset_solverstate(viewer)
def clear_viewer():
jeevesState.reset_viewer()
jeevesState.clear_solverstate()
def get_viewer():
return jeevesState.viewer
class PositiveVariable:
def __init__(self, var):
self.var = var
def __enter__(self):
jeevesState.pathenv.push(self.var, True)
def __exit__(self, type, value, traceback):
jeevesState.pathenv.pop()
class NegativeVariable:
def __init__(self, var):
self.var = var
def __enter__(self):
jeevesState.pathenv.push(self.var, False)
def __exit__(self, type, value, traceback):
jeevesState.pathenv.pop()
def liftTuple(t):
t = fexpr_cast(t)
if isinstance(t, FObject):
return t.v
elif isinstance(t, Facet):
a = liftTuple(t.thn)
b = liftTuple(t.els)
return tuple([Facet(t.cond, a1, b1) for (a1, b1) in zip(a, b)])
else:
raise TypeError("bad use of liftTuple")
class Namespace:
def __init__(self, kw, funcname):
self.__dict__.update(kw)
self.__dict__['_jeeves_funcname'] = funcname
self.__dict__['_jeeves_base_env'] = jeevesState.pathenv.getEnv()
def __setattr__(self, attr, value):
self.__dict__[attr] = jassign(self.__dict__.get(
attr, Unassigned("variable '%s' in %s" % \
(attr, self._jeeves_funcname)))
, value, self.__dict__['_jeeves_base_env'])
@supports_jeeves
def jgetattr(obj, attr):
if isinstance(obj, FExpr):
return getattr(obj, attr)
else:
return getattr(obj, attr) if hasattr(obj, attr) else Unassigned("attribute '%s'" % attr)
@supports_jeeves
def jgetitem(obj, item):
try:
return obj[item]
except (KeyError, KeyError, TypeError) as e:
return Unassigned("item '%s'" % attr)
@supports_jeeves
def jmap(iterable, mapper):
if isinstance(iterable, JList2):
return jmap_jlist2(iterable, mapper)
if isinstance(iterable, FObject) and isinstance(iterable.v, JList2):
return jmap_jlist2(iterable.v, mapper)
iterable = fexpr_cast(iterable).partialEval(jeevesState.pathenv.getEnv())
return FObject(JList(jmap2(iterable, mapper)))
def jmap2(iterator, mapper):
if isinstance(iterator, Facet):
if jeevesState.pathenv.hasPosVar(iterator.cond):
return jmap2(iterator.thn, mapper)
if jeevesState.pathenv.hasNegVar(iterator.cond):
return jmap2(iterator.els, mapper)
with PositiveVariable(iterator.cond):
thn = jmap2(iterator.thn, mapper)
with NegativeVariable(iterator.cond):
els = jmap2(iterator.els, mapper)
return Facet(iterator.cond, thn, els)
elif isinstance(iterator, FObject):
return jmap2(iterator.v, mapper)
elif isinstance(iterator, JList):
return jmap2(iterator.l, mapper)
elif isinstance(iterator, JList2):
return jmap2(iterator.convert_to_jlist1().l, mapper)
elif isinstance(iterator, list) or isinstance(iterator, tuple):
return FObject([mapper(item) for item in iterator])
else:
return jmap2(iterator.__iter__(), mapper)
def jmap_jlist2(jlist2, mapper):
ans = JList2([])
env = jeevesState.pathenv.getEnv()
for i, e in jlist2.l:
popcount = 0
for vname, vval in e.iteritems():
if vname not in env:
v = getLabel(vname)
jeevesState.pathenv.push(v, vval)
popcount += 1
elif env[vname] != vval:
break
ans.l.append((mapper(i), e))
for _ in xrange(popcount):
jeevesState.pathenv.pop()
return FObject(ans)
def facetMapper(facet, fn, wrapper=fexpr_cast):
"""
"""
if isinstance(facet, Facet):
return Facet(facet.cond, facetMapper(facet.thn, fn, wrapper)
, facetMapper(facet.els, fn, wrapper))
elif isinstance(facet, Constant) or isinstance(facet, FObject):
return wrapper(fn(facet.v))
class JList:
def validate(self):
def foo(x):
assert isinstance(x, list), 'thingy is ' + str(x.l.v)
return x
facetMapper(self.l, foo, lambda x : x)
def __init__(self, l):
self.l = l if isinstance(l, FExpr) else FObject(l)
self.validate()
def __getitem__(self, i):
return self.l[i]
def __setitem__(self, i, val):
self.l[i] = jassign(self.l[i], val)
def __len__(self):
return self.l.__len__()
def __iter__(self):
return self.l.__iter__()
def append(self, val):
l2 = facetMapper(self.l, list, FObject) #deep copy
l2.append(val)
self.l = jassign(self.l, l2)
self.validate()
def prettyPrint(self):
def tryPrint(x):
return x.__class__.__name__
'''
try:
return x.__class__.__name__ #x.prettyPrint()
except AttributeError:
return str(x)
'''
return str(len(self.l)) #''.join(map(tryPrint, self.l))
class JList2:
def __init__(self, l=[]):
if isinstance(l, list):
self.l = [(i, {}) for i in l]
else:
raise NotImplementedError
def append(self, val):
self.l.append((val, jeevesState.pathenv.getEnv()))
def eval(self, env):
return [i for i,e in self.l if all(env[getLabel(v)] == e[v] for v in e)]
def vars(self):
all_vars = set()
for _, e in self.l:
all_vars.update(set(e.keys()))
return {getLabel(v) for v in all_vars}
def convert_to_jlist1(self):
all_vars = [v.name for v in self.vars()]
def rec(cur_e, i):
if i == len(all_vars):
return FObject(
[i for i,e in self.l if all(cur_e[v] == e[v] for v in e)])
else:
cur_e1 = dict(cur_e)
cur_e2 = dict(cur_e)
cur_e1[all_vars[i]] = True
cur_e2[all_vars[i]] = False
return Facet(getLabel(all_vars[i]),
rec(cur_e1, i+1), rec(cur_e2, i+1))
return JList(rec({}, 0))
def __getitem__(self, i):
return self.convert_to_jlist1().__getitem__(i)
def __setitem__(self, i, val):
raise NotImplementedError
def __len__(self):
return self.convert_to_jlist1().__len__()
class JIterator:
def __init__(self, l):
self.l = l
@supports_jeeves
def jfun(f, *args, **kw):
if hasattr(f, '__jeeves'):
return f(*args, **kw)
else:
env = jeevesState.pathenv.getEnv()
if len(args) > 0:
return jfun2(
f, args, kw, 0, fexpr_cast(args[0]).partialEval(env), [])
else:
it = kw.__iter__()
try:
fst = next(it)
except StopIteration:
return fexpr_cast(f())
return jfun3(
f, kw, it, fst, fexpr_cast(kw[fst]).partialEval(env), (), {})
def jfun2(f, args, kw, i, arg, args_concrete):
if isinstance(arg, Constant) or isinstance(arg, FObject):
env = jeevesState.pathenv.getEnv()
if i < len(args) - 1:
return jfun2(f, args, kw, i+1
, fexpr_cast(args[i+1]).partialEval(env)
, tuple(list(args_concrete) + [arg.v]))
else:
it = kw.__iter__()
try:
fst = next(it)
except StopIteration:
return fexpr_cast(f(*tuple(list(args_concrete) + [arg.v])))
return jfun3(f, kw, it, fst, fexpr_cast(kw[fst]).partialEval(env)
, tuple(list(args_concrete) + [arg.v]), {})
else:
with PositiveVariable(arg.cond):
thn = jfun2(f, args, kw, i, arg.thn, args_concrete)
with NegativeVariable(arg.cond):
els = jfun2(f, args, kw, i, arg.els, args_concrete)
return Facet(arg.cond, thn, els)
from itertools import tee
def jfun3(f, kw, it, key, val, args_concrete, kw_concrete):
if isinstance(val, Constant) or isinstance(val, FObject):
kw_c = dict(kw_concrete)
kw_c[key] = val.v
try:
next_key = next(it)
except StopIteration:
return fexpr_cast(f(*args_concrete, **kw_c))
env = jeevesState.pathenv.getEnv()
return jfun3(f, kw, it, next_key
, fexpr_cast(kw[next_key]).partialEval(env), args_concrete, kw_c)
else:
it1, it2 = tee(it)
with PositiveVariable(val.cond):
thn = jfun3(f, kw, it1, key, val.thn, args_concrete, kw_concrete)
with NegativeVariable(val.cond):
els = jfun3(f, kw, it2, key, val.els, args_concrete, kw_concrete)
return Facet(val.cond, thn, els)
def evalToConcrete(f):
g = fexpr_cast(f).partialEval(jeevesState.pathenv.getEnv())
if isinstance(g, Constant):
return g.v
elif isinstance(g, FObject):
return g.v
elif isinstance(g, Facet):
if g.thn == g.els:
return g.thn
else:
raise Exception("evalToConcrete on non-concrete")
else:
raise Exception("wow such error: evalToConcrete on non-concrete thingy-ma-bob")
from jlib.JContainer import *
| {
"repo_name": "jonathanmarvens/jeeves",
"path": "JeevesLib.py",
"copies": "2",
"size": "17374",
"license": "mit",
"hash": 9138850045440676000,
"line_mean": 35.1958333333,
"line_max": 176,
"alpha_frac": 0.5386784851,
"autogenerated": false,
"ratio": 3.7810663764961916,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012904004921561265,
"num_lines": 480
} |
"""API for reading notebooks.
Authors:
* Jonathan Frederic
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import json
from . import v1
from . import v2
from . import v3
versions = {
1: v1,
2: v2,
3: v3,
}
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class NotJSONError(ValueError):
pass
def parse_json(s, **kwargs):
"""Parse a JSON string into a dict."""
try:
nb_dict = json.loads(s, **kwargs)
except ValueError:
# Limit the error message to 80 characters. Display whatever JSON will fit.
raise NotJSONError(("Notebook does not appear to be JSON: %r" % s)[:77] + "...")
return nb_dict
# High level API
def get_version(nb):
"""Get the version of a notebook.
Parameters
----------
nb : dict
NotebookNode or dict containing notebook data.
Returns
-------
Tuple containing major (int) and minor (int) version numbers
"""
major = nb.get('nbformat', 1)
minor = nb.get('nbformat_minor', 0)
return (major, minor)
def reads(s, **kwargs):
"""Read a notebook from a json string and return the
NotebookNode object.
This function properly reads notebooks of any version. No version
conversion is performed.
Parameters
----------
s : unicode
The raw unicode string to read the notebook from.
Returns
-------
nb : NotebookNode
The notebook that was read.
"""
nb_dict = parse_json(s, **kwargs)
(major, minor) = get_version(nb_dict)
if major in versions:
return versions[major].to_notebook_json(nb_dict, minor=minor)
else:
raise NBFormatError('Unsupported nbformat version %s' % major)
def read(fp, **kwargs):
"""Read a notebook from a file and return the NotebookNode object.
This function properly reads notebooks of any version. No version
conversion is performed.
Parameters
----------
fp : file
Any file-like object with a read method.
Returns
-------
nb : NotebookNode
The notebook that was read.
"""
return reads(fp.read(), **kwargs)
| {
"repo_name": "EricCline/CEM_inc",
"path": "env/lib/python2.7/site-packages/IPython/nbformat/reader.py",
"copies": "4",
"size": "2709",
"license": "mit",
"hash": 6114249196295753000,
"line_mean": 24.3177570093,
"line_max": 88,
"alpha_frac": 0.5145810262,
"autogenerated": false,
"ratio": 4.591525423728814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7106106449928815,
"avg_score": null,
"num_lines": null
} |
"""API for reading notebooks of different versions"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import json
class NotJSONError(ValueError):
pass
def parse_json(s, **kwargs):
"""Parse a JSON string into a dict."""
try:
nb_dict = json.loads(s, **kwargs)
except ValueError:
# Limit the error message to 80 characters. Display whatever JSON will fit.
raise NotJSONError(("Notebook does not appear to be JSON: %r" % s)[:77] + "...")
return nb_dict
# High level API
def get_version(nb):
"""Get the version of a notebook.
Parameters
----------
nb : dict
NotebookNode or dict containing notebook data.
Returns
-------
Tuple containing major (int) and minor (int) version numbers
"""
major = nb.get('nbformat', 1)
minor = nb.get('nbformat_minor', 0)
return (major, minor)
def reads(s, **kwargs):
"""Read a notebook from a json string and return the
NotebookNode object.
This function properly reads notebooks of any version. No version
conversion is performed.
Parameters
----------
s : unicode
The raw unicode string to read the notebook from.
Returns
-------
nb : NotebookNode
The notebook that was read.
"""
from . import versions, NBFormatError
nb_dict = parse_json(s, **kwargs)
(major, minor) = get_version(nb_dict)
if major in versions:
return versions[major].to_notebook_json(nb_dict, minor=minor)
else:
raise NBFormatError('Unsupported nbformat version %s' % major)
def read(fp, **kwargs):
"""Read a notebook from a file and return the NotebookNode object.
This function properly reads notebooks of any version. No version
conversion is performed.
Parameters
----------
fp : file
Any file-like object with a read method.
Returns
-------
nb : NotebookNode
The notebook that was read.
"""
return reads(fp.read(), **kwargs)
| {
"repo_name": "nitin-cherian/LifeLongLearning",
"path": "Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/nbformat/reader.py",
"copies": "17",
"size": "2060",
"license": "mit",
"hash": -7127258372401524000,
"line_mean": 24.1219512195,
"line_max": 88,
"alpha_frac": 0.6286407767,
"autogenerated": false,
"ratio": 4.153225806451613,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""API for reading notebooks of different versions"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import json
class NotJSONError(ValueError):
pass
def parse_json(s, **kwargs):
"""Parse a JSON string into a dict."""
try:
nb_dict = json.loads(s, **kwargs)
except ValueError:
# Limit the error message to 80 characters. Display whatever JSON will
# fit.
raise NotJSONError(
("Notebook does not appear to be JSON: %r" % s)[:77] + "...")
return nb_dict
# High level API
def get_version(nb):
"""Get the version of a notebook.
Parameters
----------
nb : dict
NotebookNode or dict containing notebook data.
Returns
-------
Tuple containing major (int) and minor (int) version numbers
"""
major = nb.get('nbformat', 1)
minor = nb.get('nbformat_minor', 0)
return (major, minor)
def reads(s, **kwargs):
"""Read a notebook from a json string and return the
NotebookNode object.
This function properly reads notebooks of any version. No version
conversion is performed.
Parameters
----------
s : unicode
The raw unicode string to read the notebook from.
Returns
-------
nb : NotebookNode
The notebook that was read.
"""
from . import versions, NBFormatError
nb_dict = parse_json(s, **kwargs)
(major, minor) = get_version(nb_dict)
if major in versions:
return versions[major].to_notebook_json(nb_dict, minor=minor)
else:
raise NBFormatError('Unsupported nbformat version %s' % major)
def read(fp, **kwargs):
"""Read a notebook from a file and return the NotebookNode object.
This function properly reads notebooks of any version. No version
conversion is performed.
Parameters
----------
fp : file
Any file-like object with a read method.
Returns
-------
nb : NotebookNode
The notebook that was read.
"""
return reads(fp.read(), **kwargs)
| {
"repo_name": "mattvonrocketstein/smash",
"path": "smashlib/ipy3x/nbformat/reader.py",
"copies": "1",
"size": "2082",
"license": "mit",
"hash": 4237860362427194000,
"line_mean": 22.9310344828,
"line_max": 79,
"alpha_frac": 0.6219980788,
"autogenerated": false,
"ratio": 4.164,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005174615757255472,
"num_lines": 87
} |
# API for the TI eQEP hardware driver I wrote
# We need OS operations for this
import os
import select
class eQEP(object):
# Modes
MODE_ABSOLUTE = 0
MODE_RELATIVE = 1
# eQEP Controller Locations
eQEP0 = "/sys/devices/ocp.2/48300000.epwmss/48300180.eqep"
eQEP1 = "/sys/devices/ocp.2/48302000.epwmss/48302180.eqep"
eQEP2 = "/sys/devices/ocp.2/48304000.epwmss/48304180.eqep"
# Set the mode of the eQEP hardware
def set_mode(self, mode):
# Open the mode attribute file
attribute = open(self.path + "/mode", "w")
# Write the desired mode into the file
attribute.write(str(mode))
# Close the file
attribute.close()
# Get the mode of the eQEP hardware
def get_mode(self):
# Open the attribute file
attribute = open(self.path + "/mode", "r")
# Get the value
mode = int(attribute.readline())
# Close the attribute
attribute.close()
# Return the mode
return mode
# Set the unit timer period of the eQEP hardware
def set_period(self, period):
# Open the mode attribute file
attribute = open(self.path + "/period", "w")
# Write the desired mode into the file
attribute.write(str(period))
# Close the file
attribute.close()
# Get the unit timer period of the eQEP hardware
def get_period(self):
# Open the attribute file
attribute = open(self.path + "/period", "r")
# Get the value
period = int(attribute.readline())
# Close the attribute
attribute.close()
# Return the mode
return period
# Set the current position of the encoder hardware
def set_position(self, position):
# Open the mode attribute file
attribute = open(self.path + "/position", "w")
# Write the desired mode into the file
attribute.write(str(position))
# Close the file
attribute.close()
# Get the immediate position of the encoder hardare
def get_position(self):
# Open the attribute file
attribute = open(self.path + "/position", "r")
# Get the value
position = int(attribute.readline())
# Close the attribute
attribute.close()
# Return the mode
return position
# Poll the position, returns when new data is available
def poll_position(self):
# Poll the position file
self.poller.poll(-1)
# Seek to the beginning of the file to get the data
os.lseek(self.fd, 0, 0)
# Return the position
return int(os.read(self.fd, 16))
# Constructor - specify the path and the mode
def __init__(self, path, mode):
# Base path of the eQEP sysfs entry (ex. /sys/devices/ocp.2/48302000.epwmss/48302180.eqep)
self.path = path;
# Set the mode
self.set_mode(mode)
# Reset the position
self.set_position(0)
# Setup polling system
self.fd = os.open(self.path + "/position", os.O_RDONLY, os.O_NONBLOCK)
# Create the poll object
self.poller = select.poll()
self.poller.register(self.fd, select.POLLPRI)
# Deconstructor
def __del__(self):
# Cleanup polling system
self.poller.unregister(self.fd)
os.close(self.fd)
| {
"repo_name": "ValRose/Rose_Bone",
"path": "PythonLibraries/eqep.py",
"copies": "1",
"size": "3612",
"license": "mit",
"hash": -4257724933076015600,
"line_mean": 27.4409448819,
"line_max": 98,
"alpha_frac": 0.5631229236,
"autogenerated": false,
"ratio": 4.128,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.51911229236,
"avg_score": null,
"num_lines": null
} |
# API for the TI eQEP hardware driver I wrote
# We need OS operations for this
import os, select
class eQEP(object):
# Modes
MODE_ABSOLUTE = 0
MODE_RELATIVE = 1
# eQEP Controller Locations
eQEP0 = "/sys/devices/ocp.2/48300000.epwmss/48300180.eqep"
eQEP1 = "/sys/devices/ocp.2/48302000.epwmss/48302180.eqep"
eQEP2 = "/sys/devices/ocp.2/48304000.epwmss/48304180.eqep"
# Set the mode of the eQEP hardware
def set_mode(self, mode):
# Open the mode attribute file
attribute = open(self.path + "/mode", "w")
# Write the desired mode into the file
attribute.write(str(mode))
# Close the file
attribute.close()
# Get the mode of the eQEP hardware
def get_mode(self):
# Open the attribute file
attribute = open(self.path + "/mode", "r")
# Get the value
mode = int(attribute.readline())
# Close the attribute
attribute.close()
# Return the mode
return mode
# Set the unit timer period of the eQEP hardware
def set_period(self, period):
# Open the mode attribute file
attribute = open(self.path + "/period", "w")
# Write the desired mode into the file
attribute.write(str(period))
# Close the file
attribute.close()
# Get the unit timer period of the eQEP hardware
def get_period(self):
# Open the attribute file
attribute = open(self.path + "/period", "r")
# Get the value
period = int(attribute.readline())
# Close the attribute
attribute.close()
# Return the mode
return period
# Set the current position of the encoder hardware
def set_position(self, position):
# Open the mode attribute file
attribute = open(self.path + "/position", "w")
# Write the desired mode into the file
attribute.write(str(position))
# Close the file
attribute.close()
# Get the immediate position of the encoder hardare
def get_position(self):
# Open the attribute file
attribute = open(self.path + "/position", "r")
# Get the value
position = int(attribute.readline())
# Close the attribute
attribute.close()
# Return the mode
return position
# Poll the position, returns when new data is available
def poll_position(self):
# Poll the position file
self.poller.poll(-1)
# Seek to the beginning of the file to get the data
os.lseek(self.fd, 0, 0)
# Return the position
return int(os.read(self.fd, 16))
# Constructor - specify the path and the mode
def __init__(self, path, mode):
# Base path of the eQEP sysfs entry (ex. /sys/devices/ocp.2/48302000.epwmss/48302180.eqep)
self.path = path;
# Set the mode
self.set_mode(mode)
# Reset the position
self.set_position(0)
# Setup polling system
self.fd = os.open(self.path + "/position", os.O_RDONLY, os.O_NONBLOCK)
# Create the poll object
self.poller = select.poll()
self.poller.register(self.fd, select.POLLPRI)
# Deconstructor
def __del__(self):
# Cleanup polling system
self.poller.unregister(self.fd)
os.close(self.fd)
| {
"repo_name": "mcdeoliveira/pyctrl",
"path": "pyctrl/bbb/eqep.py",
"copies": "3",
"size": "3588",
"license": "apache-2.0",
"hash": 7913465065470100000,
"line_mean": 27.935483871,
"line_max": 98,
"alpha_frac": 0.5652173913,
"autogenerated": false,
"ratio": 4.109965635738831,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6175183027038832,
"avg_score": null,
"num_lines": null
} |
"""API for the Universal Device's ISY
This is a Work in progress
Supporting a Simple and OO interface for ISY home automation netapp
see also : http://www.universal-devices.com/residential/
http://wiki.universal-devices.com/index.php?title=Main_Page
NOTE: This Libaray is not written my or supported by universal devices
-----
to use set the following env vars
ISY_ADDR the IP address of your ISY device
ISY_AUTH your loging and password
eg:
export ISY_AUTH=admin:mypasswd
export ISY_ADDR=192.168.1.2
Files:
ISY/* - ISY Python lib
bin/isy_find.py - Upnp probe for devices on your network
bin/isy_nodespy - List registered devices
bin/isy_log.py - Get event or error logs
bin/isy_showevents.py - print live stream of events from ISY
bin/isy_var.py - Set, Get or display system vars
bin/isy_progs.py - List/Run registered programs
bin/isy_nestset.py - sync values from a Nest thermostat with an ISY
bin/isy_net_res.py - call registered net resorces on ISY
bin/isy_net_wol.py - send WOL to registered devices
The example code included it ment to demonstrate API use with minimal
code for clarity.
This package provides the following classes:
- Isy - primary class for interacting with a ISY network appliance
from this class most operations can be made though a simple call interface
- IsyNode - Node Object
Represent lights, switches, motion sensors
- IsyScene - Scene Object
Represents Scenes contains Nodes that comprise a "Scene"
- IsyNodeFolder - Can hold Scene's or Nodes
a organizational obj for Scene's and Nodes
- IsyVar - ISY device Variable
Represents a variables that are avalible in the ISY device
- IsyProgram - ISY device Programs
Represents a variables that are avalible in the ISY device
Additional support functions
- isy_discover - use Upnp to discover IP addr or ISY device
Internal classes
- IsyUtil - base class for most ISY classes
- IsySubClass - base class for sub Objects ( eg: Nodes, Scenes, Vars, Programs )
Exception Classes:
IsyError
IsyCommandError
IsyNodeError
IsyResponseError
IsyPropertyError
IsyValueError
IsyInvalidCmdError
IsyAttributeError
UpnpLimitExpired
"""
import sys
if sys.hexversion < 0x2070100:
sys.stderr.write("You need python 2.7.1 or later to run this script (ver={:0X})\n".format(sys.hexversion))
__revision__ = "$Id$"
__version__ = "0.1.20160710"
__author__ = 'Peter Shipley <peter.shipley@gmail.com>'
__copyright__ = "Copyright (C) 2016 Peter Shipley"
__license__ = "BSD"
#
# from ISY.IsyUtilClass import IsyUtil
#
from ISY.IsyClass import Isy, IsyGetArg
from ISY.IsyDiscover import isy_discover
from ISY.IsyNodeClass import IsyNode, IsyScene, IsyNodeFolder
from ISY.IsyVarClass import IsyVar
from ISY.IsyProgramClass import IsyProgram
from ISY.IsyExceptionClass import IsyError
from ISY.IsyDebug import *
#
#__all__ = ['IsyUtil', 'Isy', 'IsyNode', 'IsyProgram', 'IsyVar']
__all__ = ['Isy', 'IsyUtil', 'IsyUtilClass', 'IsyClass', 'IsyNode', 'IsyVar',
'isy_discover', 'IsyGetArg']
#__all__ = ['IsyUtil', 'Isy']
#
# Do nothing
# (syntax check)
#
if __name__ == "__main__":
import __main__
#print(__main__.__file___)
print("ISY.__init__")
print("syntax ok")
exit(0)
| {
"repo_name": "evilpete/ISYlib-python",
"path": "ISY/__init__.py",
"copies": "1",
"size": "3385",
"license": "bsd-2-clause",
"hash": 4461183024855848000,
"line_mean": 25.8650793651,
"line_max": 110,
"alpha_frac": 0.6980797637,
"autogenerated": false,
"ratio": 3.334975369458128,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4533055133158128,
"avg_score": null,
"num_lines": null
} |
"""API for the Universal Device's ISY
This is a Work in progress
Supporting a Simple and OO interface for ISY home automation netapp
see also : http://www.universal-devices.com/residential/
http://wiki.universal-devices.com/index.php?title=Main_Page
NOTE: This Libaray is not written my or supported by universal devices
-----
to use set the following env vars
ISY_ADDR the IP address of your ISY device
ISY_AUTH your loging and password
eg:
export ISY_AUTH=admin:mypasswd
export ISY_ADDR=192.168.1.2
Files:
ISY/* - ISY Python lib
bin/isy_find.py - Upnp probe for devices on your network
bin/isy_nodespy - List registered devices
bin/isy_log.py - Get event or error logs
bin/isy_showevents.py - print live stream of events from ISY
bin/isy_var.py - Set, Get or display system vars
bin/isy_progs.py - List/Run registered programs
bin/isy_nestset.py - sync values from a Nest thermostat with an ISY
bin/isy_net_res.py - call registered net resorces on ISY
bin/isy_net_wol.py - send WOL to registered devices
The example code included it ment to demonstrate API use with minimal
code for clarity.
This package provides the following classes :
- Isy - primary class for interacting with a ISY network appliance
from this class most operations can be made though a simple call interface
- IsyNode - Node Object
Represent lights, switches, motion sensors
- IsyScene - Scene Object
Represents Scenes contains Nodes that comprise a "Scene"
- IsyNodeFolder - Can hold Scene's or Nodes
a organizational obj for Scene's and Nodes
- IsyVar - ISY device Variable
Represents a variables that are avalible in the ISY device
- IsyProgram - ISY device Programs
Represents a variables that are avalible in the ISY device
Additional support functions
- isy_discover - use Upnp to discover IP addr or ISY device
Internal classes
- IsyUtil - base class for most ISY classes
- IsySubClass - base class for sub Objects ( eg: Nodes, Scenes, Vars, Programs )
Exception Classes :
IsyError
IsyCommandError
IsyNodeError
IsyResponseError
IsyPropertyError
IsyValueError
IsyInvalidCmdError
IsyAttributeError
UpnpLimitExpired
"""
import sys
if sys.hexversion < 0x20703f0:
sys.stderr.write("You need python 2.7 or later to run this script\n")
__revision__ = "$Id$"
__version__ = "0.1.20140704"
__author__ = 'Peter Shipley <peter.shipley@gmail.com>'
__copyright__ = "Copyright (C) 2014 Peter Shipley"
__license__ = "BSD"
#
# from ISY.IsyUtilClass import IsyUtil
#
from ISY.IsyClass import Isy, IsyGetArg
from ISY.IsyDiscover import isy_discover
from ISY.IsyNodeClass import IsyNode, IsyScene, IsyNodeFolder
from ISY.IsyVarClass import IsyVar
from ISY.IsyProgramClass import IsyProgram
from ISY.IsyExceptionClass import IsyError
from ISY.IsyDebug import *
#
#__all__ = ['IsyUtil', 'Isy', 'IsyNode', 'IsyProgram', 'IsyVar']
__all__ = ['Isy', 'IsyUtil', 'IsyUtilClass', 'IsyClass', 'IsyNode', 'IsyVar', 'isy_discover', 'IsyGetArg']
#__all__ = ['IsyUtil', 'Isy']
#
# Do nothing
# (syntax check)
#
if __name__ == "__main__":
#import __main__
#print(__main__.__file___)
print("ISY.__init__")
print("syntax ok")
exit(0)
| {
"repo_name": "fxstein/ISYlib-python",
"path": "ISY/__init__.py",
"copies": "1",
"size": "3305",
"license": "bsd-2-clause",
"hash": 7753496546243822000,
"line_mean": 26.7731092437,
"line_max": 107,
"alpha_frac": 0.7074130106,
"autogenerated": false,
"ratio": 3.282025819265144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4489438829865144,
"avg_score": null,
"num_lines": null
} |
"""api for totals"""
from flask import Flask, jsonify, request
import finds
app = Flask(__name__)
mongohandle = finds.connect()
@app.route('/corporation/<int:corporationid>', methods=['GET'])
def get_by_corporation(corporationid):
"""get totals of all losses for a corporation over specified time period"""
if request.args.get('system') != None and request.args.get('days') != None:
(shiptotals, itemtotals, ammototals) = finds.corporation_system_days(mongohandle,
corporationid,
request.args.get('system'),
request.args.get('days'))
elif request.args.get('system') != None and request.args.get('date') != None:
(shiptotals, itemtotals, ammototals) = finds.corporation_system_date(mongohandle,
corporationid,
request.args.get('system'),
request.args.get('date'))
elif request.args.get('system') != None:
system = int(request.args.get('system'))
(shiptotals, itemtotals, ammototals) = finds.corporation_system_oneday(mongohandle,
corporationid,
request.args.get('system'))
elif request.args.get('days') != None:
(shiptotals, itemtotals, ammototals) = finds.corporation_days(mongohandle,
corporationid,
request.args.get('days'))
elif request.args.get('date') != None:
(shiptotals, itemtotals, ammototals) = finds.corporation_date(mongohandle,
corporationid,
request.args.get('date'))
else:
(shiptotals, itemtotals, ammototals) = finds.corporation_oneday(mongohandle,
corporationid)
return jsonify({'__shiptotals': shiptotals,
'_itemtotals': itemtotals,
'ammototals': ammototals})
@app.route('/alliance/<int:allianceid>', methods=['GET'])
def get_by_alliance(allianceid):
"""get totals of all losses for a alliance over specified time period"""
if request.args.get('system') != None and request.args.get('days') != None:
(shiptotals, itemtotals, ammototals) = finds.alliance_system_days(mongohandle,
allianceid,
request.args.get('system'),
request.args.get('days'))
elif request.args.get('system') != None and request.args.get('date') != None:
(shiptotals, itemtotals, ammototals) = finds.alliance_system_date(mongohandle,
allianceid,
request.args.get('system'),
request.args.get('date'))
elif request.args.get('system') != None:
(shiptotals, itemtotals, ammototals) = finds.alliance_system_oneday(mongohandle,
allianceid,
request.args.get('system'))
elif request.args.get('days') != None:
(shiptotals, itemtotals, ammototals) = finds.alliance_days(mongohandle,
allianceid,
request.args.get('days'))
elif request.args.get('date') != None:
(shiptotals, itemtotals, ammototals) = finds.alliance_date(mongohandle,
allianceid,
request.args.get('date'))
else:
(shiptotals, itemtotals, ammototals) = finds.alliance_oneday(mongohandle,
allianceid)
return jsonify({'__shiptotals': shiptotals,
'_itemtotals': itemtotals,
'ammototals': ammototals})
@app.route('/doctrines', methods=['GET'])
def get_doctrines():
"""get doctrines by date or by last 24 hour time period"""
if request.args.get('date') != None:
doctrines = finds.doctrines_date(mongohandle, request.args.get('date'))
elif request.args.get('days') != None:
doctrines = finds.doctrines_days(mongohandle, request.args.get('days'))
else:
doctrines = finds.doctrines(mongohandle)
return jsonify(doctrines)
if __name__ == '__main__':
app.run(debug=True)
| {
"repo_name": "namrak/fpapi",
"path": "server.py",
"copies": "1",
"size": "5404",
"license": "mit",
"hash": -6847418402877151000,
"line_mean": 62.5764705882,
"line_max": 106,
"alpha_frac": 0.4420799408,
"autogenerated": false,
"ratio": 4.908265213442325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5850345154242326,
"avg_score": null,
"num_lines": null
} |
"""API for traversing the AST nodes. Implemented by the compiler and
meta introspection.
"""
from .nodes import Node
class NodeVisitor:
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
return getattr(self, f"visit_{node.__class__.__name__}", None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
| {
"repo_name": "pallets/jinja2",
"path": "src/jinja2/visitor.py",
"copies": "3",
"size": "3184",
"license": "bsd-3-clause",
"hash": -8247278693582226000,
"line_mean": 39.3037974684,
"line_max": 76,
"alpha_frac": 0.5926507538,
"autogenerated": false,
"ratio": 4.465638148667602,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6558288902467603,
"avg_score": null,
"num_lines": null
} |
"""API for traversing the AST nodes. Implemented by the compiler and
meta introspection.
"""
import typing as t
from .nodes import Node
if t.TYPE_CHECKING:
import typing_extensions as te
class VisitCallable(te.Protocol):
def __call__(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
...
class NodeVisitor:
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node: Node) -> "t.Optional[VisitCallable]":
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
return getattr(self, f"visit_{type(node).__name__}", None) # type: ignore
def visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> Node:
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.List[Node]:
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
return [rv]
return rv
| {
"repo_name": "pallets/jinja",
"path": "src/jinja2/visitor.py",
"copies": "1",
"size": "3572",
"license": "bsd-3-clause",
"hash": 4126757494746424300,
"line_mean": 37.8260869565,
"line_max": 84,
"alpha_frac": 0.5935050392,
"autogenerated": false,
"ratio": 4.18757327080891,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.528107831000891,
"avg_score": null,
"num_lines": null
} |
"""API for USB radio receiver for initializing device and data I/O"""
import usb.core
import usb.util
MICROCHIP_VID = 0x04D8
EXAMPLE_PID = 0x003F
INTERFACE = 0
# Read / Write / Reply
READ = 0x00
WRITE = 0x01
REPLY = 0x02
# Message Types
ERROR = 0x00
CONFIG = 0x01
DATA = 0x02
RESETDEVICE = 0x03
RTC = 0x04
MESSAGETYPES = ['NONE', 'CONFIG', 'DATA', 'RESET DEVICE', 'RTC']
class ReceiverError(Exception):
"""Raised when receiver causes unrecoverable error"""
pass
class MicrochipReceiver(object):
"""Simple API for Microchip Example USB device"""
def __init__(self):
"""Initialize the class"""
self.device = None
self.kernel_driver_detached = False
def init(self, vendor_id=MICROCHIP_VID, product_id=EXAMPLE_PID):
"""Initialize USB receiver device
:vendor_id: USB vendor ID (optional)
:product_id: USB product ID (optional)
:returns: None
"""
if self.device is not None:
return
self.device = usb.core.find(
idVendor=vendor_id,
idProduct=product_id)
if self.device.is_kernel_driver_active(INTERFACE):
self.device.detach_kernel_driver(0)
self.kernel_driver_detached = True
self.device.set_configuration()
self.device.reset()
def release(self):
"""Release the device and reattach kernel driver
:returns: None
"""
if self.device is None:
return
usb.util.release_interface(self.device, INTERFACE)
if self.kernel_driver_detached:
self.device.attach_kernel_driver(INTERFACE)
self.kernel_detached = False
self.device = None
def write(self, message_type, message):
"""Write given message type and data to the device
:returns: None
"""
self.init()
data = [message_type, WRITE, len(message)] + message
bytes_written = self.device.write(1, data, 0)
if bytes_written != len(data):
raise ReceiverError(
"Write failed! Sent %s bytes, but %s bytes "
"was written" % (len(data), bytes_written))
self.release()
return bytes_written
def read(self, message_type, message=''):
"""Read data from the device
:returns: List of read bytes
"""
self.init()
data = [message_type, READ]
if message != '':
data += [len(message)] + message
pipe_number = 0x01
timeout = 0
self.device.write(pipe_number, data, timeout)
pipe_number = 0x81
length = 64
timeout = 100
data = self.device.read(pipe_number, length, timeout)
self.release()
return data
def read_rtc(self):
"""Helper method for writing to device real time clock (RTC)
:returns: Current time as datetime instance
"""
data = self.read(RTC)
# TODO: why some other components should handle weird responses?
# better to return datetime instances
if(len(data) < 3):
return []
if(data[0] != RTC):
return []
if(data[1] != REPLY):
return []
if(data[2] != 7):
return []
return data
def write_rtc(self, date):
"""Set device real time clock to given `date`.
:date: New date as datetime instance
:returns: None
"""
self.write(RTC, date)
def write_data(self, data):
"""Helper for writing data to device
:data: List of bytes to write
:returns: None
"""
return self.write(DATA, data)
def read_data(self):
"""Read data from device
:returns: List of bytes that was read from device
"""
return self.read(DATA)
def read_config(address, length):
"""Read configuration from device
:address: Configuration register address
:length: Length of register
:returns: Configuration register
"""
data = [0 for x in range(length)]
data[0] = address
return self.read(CONFIG, data)
| {
"repo_name": "surfmikko/weatherstation",
"path": "weatherstation/receiver.py",
"copies": "1",
"size": "4206",
"license": "mit",
"hash": -7179288121762727000,
"line_mean": 20.90625,
"line_max": 72,
"alpha_frac": 0.5706134094,
"autogenerated": false,
"ratio": 4.1114369501466275,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5182050359546627,
"avg_score": null,
"num_lines": null
} |
"""API for user module."""
import requests
from flask import Blueprint, render_template, request, flash, url_for, redirect
from flask import current_app
from flask_security.utils import login_user
from tagio.models.user import User
from tagio.utils import string_generator
blueprint = Blueprint('slack',
__name__,
url_prefix='/slack')
@blueprint.route('/auth')
def auth():
"""authenticate by slack."""
code = request.args.get('code', None)
if code is None:
raise Exception('test')
print('code: ', code)
payload = {
'code': code,
'client_id': current_app.config['SLACK_APP_ID'],
'client_secret': current_app.config['SLACK_APP_SECRET_CODE']
}
r = requests.post('https://slack.com/api/oauth.access', params=payload)
if r.status_code != 200:
flash('Can not request to slack %s' % (r.status_code), 'error')
return redirect(url_for('security.login'))
rj = r.json()
if not rj.get('ok', False):
flash('Can not request to slack %s' % (rj.get('error', 'unknown')), 'error')
return redirect(url_for('security.login'))
team = rj.get('team', None)
if team is None:
flash('Invalid response from slack!', 'error')
return redirect(url_for('security.login'))
team_id = team.get('id')
if team_id != current_app.config['SLACK_TEAM_ID']:
flash('Invalid slack team!', 'error')
return redirect(url_for('security.login'))
slack_user = rj.get('user', None)
if slack_user is None:
flash('Invalid response from slack!', 'error')
return redirect(url_for('security.login'))
slack_email = slack_user['email']
if slack_email is None:
flash('Invalid response from slack!', 'error')
return redirect(url_for('security.login'))
slack_email = slack_email.lower()
slack_name = slack_user['name']
access_token = rj['access_token']
user = User.query.filter(User.email == slack_email).first()
if user is None:
username = slack_email.split('@')[0]
password = string_generator(16)
user = User(username=username, email=slack_email, password=password)
user.save()
user['slack_name'] = slack_name
user['slack_access_token'] = access_token
user.save()
if not user.active:
flash('User is not activated!', 'error')
return redirect(url_for('security.login'))
login_user(user, False)
return render_template('public/home.html')
| {
"repo_name": "makerhanoi/tagio",
"path": "tagio/views/slack.py",
"copies": "1",
"size": "2535",
"license": "bsd-3-clause",
"hash": 3029470987435965000,
"line_mean": 29.1785714286,
"line_max": 84,
"alpha_frac": 0.617357002,
"autogenerated": false,
"ratio": 3.7007299270072993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9817386648895254,
"avg_score": 0.00014005602240896358,
"num_lines": 84
} |
"""API for working with a Nvim Buffer."""
from .common import Remote
from ..compat import IS_PYTHON3, check_async
__all__ = ('Buffer')
if IS_PYTHON3:
basestring = str
def adjust_index(idx, default=None):
"""Convert from python indexing convention to nvim indexing convention."""
if idx is None:
return default
elif idx < 0:
return idx - 1
else:
return idx
class Buffer(Remote):
"""A remote Nvim buffer."""
_api_prefix = "nvim_buf_"
def __len__(self):
"""Return the number of lines contained in a Buffer."""
return self.request('nvim_buf_line_count')
def __getitem__(self, idx):
"""Get a buffer line or slice by integer index.
Indexes may be negative to specify positions from the end of the
buffer. For example, -1 is the last line, -2 is the line before that
and so on.
When retrieving slices, omiting indexes(eg: `buffer[:]`) will bring
the whole buffer.
"""
if not isinstance(idx, slice):
i = adjust_index(idx)
return self.request('nvim_buf_get_lines', i, i + 1, True)[0]
start = adjust_index(idx.start, 0)
end = adjust_index(idx.stop, -1)
return self.request('nvim_buf_get_lines', start, end, False)
def __setitem__(self, idx, item):
"""Replace a buffer line or slice by integer index.
Like with `__getitem__`, indexes may be negative.
When replacing slices, omiting indexes(eg: `buffer[:]`) will replace
the whole buffer.
"""
if not isinstance(idx, slice):
i = adjust_index(idx)
lines = [item] if item is not None else []
return self.request('nvim_buf_set_lines', i, i + 1, True, lines)
lines = item if item is not None else []
start = adjust_index(idx.start, 0)
end = adjust_index(idx.stop, -1)
return self.request('nvim_buf_set_lines', start, end, False, lines)
def __iter__(self):
"""Iterate lines of a buffer.
This will retrieve all lines locally before iteration starts. This
approach is used because for most cases, the gain is much greater by
minimizing the number of API calls by transfering all data needed to
work.
"""
lines = self[:]
for line in lines:
yield line
def __delitem__(self, idx):
"""Delete line or slice of lines from the buffer.
This is the same as __setitem__(idx, [])
"""
self.__setitem__(idx, None)
def append(self, lines, index=-1):
"""Append a string or list of lines to the buffer."""
if isinstance(lines, (basestring, bytes)):
lines = [lines]
return self.request('nvim_buf_set_lines', index, index, True, lines)
def mark(self, name):
"""Return (row, col) tuple for a named mark."""
return self.request('nvim_buf_get_mark', name)
def range(self, start, end):
"""Return a `Range` object, which represents part of the Buffer."""
return Range(self, start, end)
def add_highlight(self, hl_group, line, col_start=0,
col_end=-1, src_id=-1, async_=None,
**kwargs):
"""Add a highlight to the buffer."""
async_ = check_async(async_, kwargs, src_id != 0)
return self.request('nvim_buf_add_highlight', src_id, hl_group,
line, col_start, col_end, async_=async_)
def clear_highlight(self, src_id, line_start=0, line_end=-1, async_=None,
**kwargs):
"""Clear highlights from the buffer."""
async_ = check_async(async_, kwargs, True)
self.request('nvim_buf_clear_highlight', src_id,
line_start, line_end, async_=async_)
def update_highlights(self, src_id, hls, clear_start=0, clear_end=-1,
clear=False, async_=True):
"""Add or update highlights in batch to avoid unnecessary redraws.
A `src_id` must have been allocated prior to use of this function. Use
for instance `nvim.new_highlight_source()` to get a src_id for your
plugin.
`hls` should be a list of highlight items. Each item should be a list
or tuple on the form `("GroupName", linenr, col_start, col_end)` or
`("GroupName", linenr)` to highlight an entire line.
By default existing highlights are preserved. Specify a line range with
clear_start and clear_end to replace highlights in this range. As a
shorthand, use clear=True to clear the entire buffer before adding the
new highlights.
"""
if clear and clear_start is None:
clear_start = 0
lua = self._session._get_lua_private()
lua.update_highlights(self, src_id, hls, clear_start, clear_end,
async_=async_)
@property
def name(self):
"""Get the buffer name."""
return self.request('nvim_buf_get_name')
@name.setter
def name(self, value):
"""Set the buffer name. BufFilePre/BufFilePost are triggered."""
return self.request('nvim_buf_set_name', value)
@property
def valid(self):
"""Return True if the buffer still exists."""
return self.request('nvim_buf_is_valid')
@property
def number(self):
"""Get the buffer number."""
return self.handle
class Range(object):
def __init__(self, buffer, start, end):
self._buffer = buffer
self.start = start - 1
self.end = end - 1
def __len__(self):
return self.end - self.start + 1
def __getitem__(self, idx):
if not isinstance(idx, slice):
return self._buffer[self._normalize_index(idx)]
start = self._normalize_index(idx.start)
end = self._normalize_index(idx.stop)
if start is None:
start = self.start
if end is None:
end = self.end + 1
return self._buffer[start:end]
def __setitem__(self, idx, lines):
if not isinstance(idx, slice):
self._buffer[self._normalize_index(idx)] = lines
return
start = self._normalize_index(idx.start)
end = self._normalize_index(idx.stop)
if start is None:
start = self.start
if end is None:
end = self.end
self._buffer[start:end + 1] = lines
def __iter__(self):
for i in range(self.start, self.end + 1):
yield self._buffer[i]
def append(self, lines, i=None):
i = self._normalize_index(i)
if i is None:
i = self.end + 1
self._buffer.append(lines, i)
def _normalize_index(self, index):
if index is None:
return None
if index < 0:
index = self.end
else:
index += self.start
if index > self.end:
index = self.end
return index
| {
"repo_name": "zchee/python-client",
"path": "pynvim/api/buffer.py",
"copies": "2",
"size": "7024",
"license": "apache-2.0",
"hash": 3680403572262321000,
"line_mean": 32.4476190476,
"line_max": 79,
"alpha_frac": 0.5727505695,
"autogenerated": false,
"ratio": 3.9931779420125073,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 210
} |
"""API for working with a Nvim Buffer."""
from .common import Remote
from ..compat import IS_PYTHON3
__all__ = ('Buffer')
if IS_PYTHON3:
basestring = str
def adjust_index(idx, default=None):
"""Convert from python indexing convention to nvim indexing convention."""
if idx is None:
return default
elif idx < 0:
return idx - 1
else:
return idx
class Buffer(Remote):
"""A remote Nvim buffer."""
_api_prefix = "nvim_buf_"
def __len__(self):
"""Return the number of lines contained in a Buffer."""
return self.request('buffer_line_count')
def __getitem__(self, idx):
"""Get a buffer line or slice by integer index.
Indexes may be negative to specify positions from the end of the
buffer. For example, -1 is the last line, -2 is the line before that
and so on.
When retrieving slices, omiting indexes(eg: `buffer[:]`) will bring
the whole buffer.
"""
if not isinstance(idx, slice):
i = adjust_index(idx)
return self.request('nvim_buf_get_lines', i, i + 1, True)[0]
start = adjust_index(idx.start, 0)
end = adjust_index(idx.stop, -1)
return self.request('nvim_buf_get_lines', start, end, False)
def __setitem__(self, idx, item):
"""Replace a buffer line or slice by integer index.
Like with `__getitem__`, indexes may be negative.
When replacing slices, omiting indexes(eg: `buffer[:]`) will replace
the whole buffer.
"""
if not isinstance(idx, slice):
i = adjust_index(idx)
lines = [item] if item is not None else []
return self.request('nvim_buf_set_lines', i, i + 1, True, lines)
lines = item if item is not None else []
start = adjust_index(idx.start, 0)
end = adjust_index(idx.stop, -1)
return self.request('buffer_set_lines', start, end, False, lines)
def __iter__(self):
"""Iterate lines of a buffer.
This will retrieve all lines locally before iteration starts. This
approach is used because for most cases, the gain is much greater by
minimizing the number of API calls by transfering all data needed to
work.
"""
lines = self[:]
for line in lines:
yield line
def __delitem__(self, idx):
"""Delete line or slice of lines from the buffer.
This is the same as __setitem__(idx, [])
"""
self.__setitem__(idx, None)
def append(self, lines, index=-1):
"""Append a string or list of lines to the buffer."""
if isinstance(lines, (basestring, bytes)):
lines = [lines]
return self.request('nvim_buf_set_lines', index, index, True, lines)
def mark(self, name):
"""Return (row, col) tuple for a named mark."""
return self.request('nvim_buf_get_mark', name)
def range(self, start, end):
"""Return a `Range` object, which represents part of the Buffer."""
return Range(self, start, end)
def add_highlight(self, hl_group, line, col_start=0,
col_end=-1, src_id=-1, async=None):
"""Add a highlight to the buffer."""
if async is None:
async = (src_id != 0)
return self.request('nvim_buf_add_highlight', src_id, hl_group,
line, col_start, col_end, async=async)
def clear_highlight(self, src_id, line_start=0, line_end=-1, async=True):
"""Clear highlights from the buffer."""
self.request('nvim_buf_clear_highlight', src_id,
line_start, line_end, async=async)
@property
def name(self):
"""Get the buffer name."""
return self.request('nvim_buf_get_name')
@name.setter
def name(self, value):
"""Set the buffer name. BufFilePre/BufFilePost are triggered."""
return self.request('nvim_buf_set_name', value)
@property
def valid(self):
"""Return True if the buffer still exists."""
return self.request('nvim_buf_is_valid')
@property
def number(self):
"""Get the buffer number."""
return self.handle
class Range(object):
def __init__(self, buffer, start, end):
self._buffer = buffer
self.start = start - 1
self.end = end - 1
def __len__(self):
return self.end - self.start + 1
def __getitem__(self, idx):
if not isinstance(idx, slice):
return self._buffer[self._normalize_index(idx)]
start = self._normalize_index(idx.start)
end = self._normalize_index(idx.stop)
if start is None:
start = self.start
if end is None:
end = self.end + 1
return self._buffer[start:end]
def __setitem__(self, idx, lines):
if not isinstance(idx, slice):
self._buffer[self._normalize_index(idx)] = lines
return
start = self._normalize_index(idx.start)
end = self._normalize_index(idx.stop)
if start is None:
start = self.start
if end is None:
end = self.end + 1
self._buffer[start:end] = lines
def __iter__(self):
for i in range(self.start, self.end + 1):
yield self._buffer[i]
def append(self, lines, i=None):
i = self._normalize_index(i)
if i is None:
i = self.end + 1
self._buffer.append(lines, i)
def _normalize_index(self, index):
if index is None:
return None
if index < 0:
index = self.end
else:
index += self.start
if index > self.end:
index = self.end
return index
| {
"repo_name": "brcolow/python-client",
"path": "neovim/api/buffer.py",
"copies": "1",
"size": "5788",
"license": "apache-2.0",
"hash": 5721764985224044000,
"line_mean": 30.2864864865,
"line_max": 78,
"alpha_frac": 0.5668624741,
"autogenerated": false,
"ratio": 3.9643835616438357,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5031246035743835,
"avg_score": null,
"num_lines": null
} |
"""API for working with a Nvim Buffer."""
from pynvim.api.common import Remote
from pynvim.compat import IS_PYTHON3, check_async
__all__ = ('Buffer')
if IS_PYTHON3:
basestring = str
def adjust_index(idx, default=None):
"""Convert from python indexing convention to nvim indexing convention."""
if idx is None:
return default
elif idx < 0:
return idx - 1
else:
return idx
class Buffer(Remote):
"""A remote Nvim buffer."""
_api_prefix = "nvim_buf_"
def __len__(self):
"""Return the number of lines contained in a Buffer."""
return self.request('nvim_buf_line_count')
def __getitem__(self, idx):
"""Get a buffer line or slice by integer index.
Indexes may be negative to specify positions from the end of the
buffer. For example, -1 is the last line, -2 is the line before that
and so on.
When retrieving slices, omiting indexes(eg: `buffer[:]`) will bring
the whole buffer.
"""
if not isinstance(idx, slice):
i = adjust_index(idx)
return self.request('nvim_buf_get_lines', i, i + 1, True)[0]
start = adjust_index(idx.start, 0)
end = adjust_index(idx.stop, -1)
return self.request('nvim_buf_get_lines', start, end, False)
def __setitem__(self, idx, item):
"""Replace a buffer line or slice by integer index.
Like with `__getitem__`, indexes may be negative.
When replacing slices, omiting indexes(eg: `buffer[:]`) will replace
the whole buffer.
"""
if not isinstance(idx, slice):
i = adjust_index(idx)
lines = [item] if item is not None else []
return self.request('nvim_buf_set_lines', i, i + 1, True, lines)
lines = item if item is not None else []
start = adjust_index(idx.start, 0)
end = adjust_index(idx.stop, -1)
return self.request('nvim_buf_set_lines', start, end, False, lines)
def __iter__(self):
"""Iterate lines of a buffer.
This will retrieve all lines locally before iteration starts. This
approach is used because for most cases, the gain is much greater by
minimizing the number of API calls by transfering all data needed to
work.
"""
lines = self[:]
for line in lines:
yield line
def __delitem__(self, idx):
"""Delete line or slice of lines from the buffer.
This is the same as __setitem__(idx, [])
"""
self.__setitem__(idx, None)
def __ne__(self, other):
"""Test inequality of Buffers.
Necessary for Python 2 compatibility.
"""
return not self.__eq__(other)
def append(self, lines, index=-1):
"""Append a string or list of lines to the buffer."""
if isinstance(lines, (basestring, bytes)):
lines = [lines]
return self.request('nvim_buf_set_lines', index, index, True, lines)
def mark(self, name):
"""Return (row, col) tuple for a named mark."""
return self.request('nvim_buf_get_mark', name)
def range(self, start, end):
"""Return a `Range` object, which represents part of the Buffer."""
return Range(self, start, end)
def add_highlight(self, hl_group, line, col_start=0,
col_end=-1, src_id=-1, async_=None,
**kwargs):
"""Add a highlight to the buffer."""
async_ = check_async(async_, kwargs, src_id != 0)
return self.request('nvim_buf_add_highlight', src_id, hl_group,
line, col_start, col_end, async_=async_)
def clear_highlight(self, src_id, line_start=0, line_end=-1, async_=None,
**kwargs):
"""Clear highlights from the buffer."""
async_ = check_async(async_, kwargs, True)
self.request('nvim_buf_clear_highlight', src_id,
line_start, line_end, async_=async_)
def update_highlights(self, src_id, hls, clear_start=0, clear_end=-1,
clear=False, async_=True):
"""Add or update highlights in batch to avoid unnecessary redraws.
A `src_id` must have been allocated prior to use of this function. Use
for instance `nvim.new_highlight_source()` to get a src_id for your
plugin.
`hls` should be a list of highlight items. Each item should be a list
or tuple on the form `("GroupName", linenr, col_start, col_end)` or
`("GroupName", linenr)` to highlight an entire line.
By default existing highlights are preserved. Specify a line range with
clear_start and clear_end to replace highlights in this range. As a
shorthand, use clear=True to clear the entire buffer before adding the
new highlights.
"""
if clear and clear_start is None:
clear_start = 0
lua = self._session._get_lua_private()
lua.update_highlights(self, src_id, hls, clear_start, clear_end,
async_=async_)
@property
def name(self):
"""Get the buffer name."""
return self.request('nvim_buf_get_name')
@name.setter
def name(self, value):
"""Set the buffer name. BufFilePre/BufFilePost are triggered."""
return self.request('nvim_buf_set_name', value)
@property
def valid(self):
"""Return True if the buffer still exists."""
return self.request('nvim_buf_is_valid')
@property
def number(self):
"""Get the buffer number."""
return self.handle
class Range(object):
def __init__(self, buffer, start, end):
self._buffer = buffer
self.start = start - 1
self.end = end - 1
def __len__(self):
return self.end - self.start + 1
def __getitem__(self, idx):
if not isinstance(idx, slice):
return self._buffer[self._normalize_index(idx)]
start = self._normalize_index(idx.start)
end = self._normalize_index(idx.stop)
if start is None:
start = self.start
if end is None:
end = self.end + 1
return self._buffer[start:end]
def __setitem__(self, idx, lines):
if not isinstance(idx, slice):
self._buffer[self._normalize_index(idx)] = lines
return
start = self._normalize_index(idx.start)
end = self._normalize_index(idx.stop)
if start is None:
start = self.start
if end is None:
end = self.end
self._buffer[start:end + 1] = lines
def __iter__(self):
for i in range(self.start, self.end + 1):
yield self._buffer[i]
def append(self, lines, i=None):
i = self._normalize_index(i)
if i is None:
i = self.end + 1
self._buffer.append(lines, i)
def _normalize_index(self, index):
if index is None:
return None
if index < 0:
index = self.end
else:
index += self.start
if index > self.end:
index = self.end
return index
| {
"repo_name": "Shougo/python-client",
"path": "pynvim/api/buffer.py",
"copies": "2",
"size": "7205",
"license": "apache-2.0",
"hash": 6590644328629005000,
"line_mean": 32.202764977,
"line_max": 79,
"alpha_frac": 0.572796669,
"autogenerated": false,
"ratio": 3.9916897506925206,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.556448641969252,
"avg_score": null,
"num_lines": null
} |
"""API for working with Nvim buffers."""
from .common import Remote
from ..compat import IS_PYTHON3
__all__ = ('Buffer')
if IS_PYTHON3:
basestring = str
class Buffer(Remote):
"""A remote Nvim buffer."""
_api_prefix = "buffer_"
def __len__(self):
"""Return the number of lines contained in a Buffer."""
return self.request('buffer_line_count')
def __getitem__(self, idx):
"""Get a buffer line or slice by integer index.
Indexes may be negative to specify positions from the end of the
buffer. For example, -1 is the last line, -2 is the line before that
and so on.
When retrieving slices, omiting indexes(eg: `buffer[:]`) will bring
the whole buffer.
"""
if not isinstance(idx, slice):
return self._session.request('buffer_get_line', self, idx)
include_end = False
start = idx.start
end = idx.stop
if start is None:
start = 0
if end is None:
end = -1
include_end = True
return self._session.request('buffer_get_line_slice', self, start, end,
True, include_end)
def __setitem__(self, idx, lines):
"""Replace a buffer line or slice by integer index.
Like with `__getitem__`, indexes may be negative.
When replacing slices, omiting indexes(eg: `buffer[:]`) will replace
the whole buffer.
"""
if not isinstance(idx, slice):
if lines is None:
return self._session.request('buffer_del_line', self, idx)
else:
return self._session.request('buffer_set_line', self, idx,
lines)
if lines is None:
lines = []
include_end = False
start = idx.start
end = idx.stop
if start is None:
start = 0
if end is None:
end = -1
include_end = True
return self._session.request('buffer_set_line_slice', self, start, end,
True, include_end, lines)
def __iter__(self):
"""Iterate lines of a buffer.
This will retrieve all lines locally before iteration starts. This
approach is used because for most cases, the gain is much greater by
minimizing the number of API calls by transfering all data needed to
work.
"""
lines = self[:]
for line in lines:
yield line
def __delitem__(self, idx):
"""Delete line or slice of lines from the buffer.
This is the same as __setitem__(idx, [])
"""
if not isinstance(idx, slice):
self.__setitem__(idx, None)
else:
self.__setitem__(idx, [])
def get_line_slice(self, start, stop, start_incl, end_incl):
"""More flexible wrapper for retrieving slices."""
return self._session.request('buffer_get_line_slice', self, start,
stop, start_incl, end_incl)
def set_line_slice(self, start, stop, start_incl, end_incl, lines):
"""More flexible wrapper for replacing slices."""
return self._session.request('buffer_set_line_slice', self, start,
stop, start_incl, end_incl, lines)
def append(self, lines, index=-1):
"""Append a string or list of lines to the buffer."""
if isinstance(lines, basestring):
lines = [lines]
return self._session.request('buffer_insert', self, index, lines)
def mark(self, name):
"""Return (row, col) tuple for a named mark."""
return self.request('buffer_get_mark', name)
def range(self, start, end):
"""Return a `Range` object, which represents part of the Buffer."""
return Range(self, start, end)
def add_highlight(self, hl_group, line, col_start=0,
col_end=-1, src_id=-1, async=None):
"""Add a highlight to the buffer."""
if async is None:
async = (src_id != 0)
return self.request('buffer_add_highlight', src_id, hl_group,
line, col_start, col_end, async=async)
def clear_highlight(self, src_id, line_start=0, line_end=-1, async=True):
"""clear highlights from the buffer."""
self.request('buffer_clear_highlight', src_id,
line_start, line_end, async=async)
@property
def name(self):
"""Get the buffer name."""
return self.request('buffer_get_name')
@name.setter
def name(self, value):
"""Set the buffer name. BufFilePre/BufFilePost are triggered."""
return self.request('buffer_set_name', value)
@property
def valid(self):
"""Return True if the buffer still exists."""
return self.request('buffer_is_valid')
@property
def number(self):
"""Get the buffer number."""
return self.request('buffer_get_number')
class Range(object):
def __init__(self, buffer, start, end):
self._buffer = buffer
self.start = start - 1
self.end = end - 1
def __len__(self):
return self.end - self.start + 1
def __getitem__(self, idx):
if not isinstance(idx, slice):
return self._buffer[self._normalize_index(idx)]
start = self._normalize_index(idx.start)
end = self._normalize_index(idx.stop)
if start is None:
start = self.start
if end is None:
end = self.end + 1
return self._buffer[start:end]
def __setitem__(self, idx, lines):
if not isinstance(idx, slice):
self._buffer[self._normalize_index(idx)] = lines
return
start = self._normalize_index(idx.start)
end = self._normalize_index(idx.stop)
if start is None:
start = self.start
if end is None:
end = self.end + 1
self._buffer[start:end] = lines
def __iter__(self):
for i in range(self.start, self.end + 1):
yield self._buffer[i]
def append(self, lines, i=None):
i = self._normalize_index(i)
if i is None:
i = self.end + 1
self._buffer.append(lines, i)
def _normalize_index(self, index):
if index is None:
return None
if index < 0:
index = self.end
else:
index += self.start
if index > self.end:
index = self.end
return index
| {
"repo_name": "timeyyy/python-client",
"path": "neovim/api/buffer.py",
"copies": "1",
"size": "6609",
"license": "apache-2.0",
"hash": -2438271176882659300,
"line_mean": 31.3970588235,
"line_max": 79,
"alpha_frac": 0.548645786,
"autogenerated": false,
"ratio": 4.2041984732824424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5252844259282442,
"avg_score": null,
"num_lines": null
} |
"""API for working with Nvim buffers."""
from .common import Remote, RemoteMap
from ..compat import IS_PYTHON3
__all__ = ('Buffer')
if IS_PYTHON3:
basestring = str
class Buffer(Remote):
"""A remote Nvim buffer."""
def __init__(self, session, code_data):
"""Initialize from session and code_data immutable object.
The `code_data` contains serialization information required for
msgpack-rpc calls. It must be immutable for Buffer equality to work.
"""
self._session = session
self.code_data = code_data
self.vars = RemoteMap(session, 'buffer_get_var', 'buffer_set_var',
self)
self.options = RemoteMap(session, 'buffer_get_option',
'buffer_set_option', self)
def __len__(self):
"""Return the number of lines contained in a Buffer."""
return self._session.request('buffer_line_count', self)
def __getitem__(self, idx):
"""Get a buffer line or slice by integer index.
Indexes may be negative to specify positions from the end of the
buffer. For example, -1 is the last line, -2 is the line before that
and so on.
When retrieving slices, omiting indexes(eg: `buffer[:]`) will bring
the whole buffer.
"""
if not isinstance(idx, slice):
return self._session.request('buffer_get_line', self, idx)
include_end = False
start = idx.start
end = idx.stop
if start is None:
start = 0
if end is None:
end = -1
include_end = True
return self._session.request('buffer_get_line_slice', self, start, end,
True, include_end)
def __setitem__(self, idx, lines):
"""Replace a buffer line or slice by integer index.
Like with `__getitem__`, indexes may be negative.
When replacing slices, omiting indexes(eg: `buffer[:]`) will replace
the whole buffer.
"""
if not isinstance(idx, slice):
if lines is None:
return self._session.request('buffer_del_line', self, idx)
else:
return self._session.request('buffer_set_line', self, idx,
lines)
if lines is None:
lines = []
include_end = False
start = idx.start
end = idx.stop
if start is None:
start = 0
if end is None:
end = -1
include_end = True
return self._session.request('buffer_set_line_slice', self, start, end,
True, include_end, lines)
def __iter__(self):
"""Iterate lines of a buffer.
This will retrieve all lines locally before iteration starts. This
approach is used because for most cases, the gain is much greater by
minimizing the number of API calls by transfering all data needed to
work.
"""
lines = self[:]
for line in lines:
yield line
def __delitem__(self, idx):
"""Delete line or slice of lines from the buffer.
This is the same as __setitem__(idx, [])
"""
if not isinstance(idx, slice):
self.__setitem__(idx, None)
else:
self.__setitem__(idx, [])
def get_line_slice(self, start, stop, start_incl, end_incl):
"""More flexible wrapper for retrieving slices."""
return self._session.request('buffer_get_line_slice', self, start,
stop, start_incl, end_incl)
def set_line_slice(self, start, stop, start_incl, end_incl, lines):
"""More flexible wrapper for replacing slices."""
return self._session.request('buffer_set_line_slice', self, start,
stop, start_incl, end_incl, lines)
def append(self, lines, index=-1):
"""Append a string or list of lines to the buffer."""
if isinstance(lines, basestring):
lines = [lines]
return self._session.request('buffer_insert', self, index, lines)
def mark(self, name):
"""Return (row, col) tuple for a named mark."""
return self._session.request('buffer_get_mark', self, name)
def range(self, start, end):
"""Return a `Range` object, which represents part of the Buffer."""
return Range(self, start, end)
@property
def name(self):
"""Get the buffer name."""
return self._session.request('buffer_get_name', self)
@name.setter
def name(self, value):
"""Set the buffer name. BufFilePre/BufFilePost are triggered."""
return self._session.request('buffer_set_name', self, value)
@property
def valid(self):
"""Return True if the buffer still exists."""
return self._session.request('buffer_is_valid', self)
@property
def number(self):
"""Get the buffer number."""
return self._session.request('buffer_get_number', self)
class Range(object):
def __init__(self, buffer, start, end):
self._buffer = buffer
self.start = start - 1
self.end = end
def __len__(self):
return self.end - self.start
def __getitem__(self, idx):
if not isinstance(idx, slice):
return self._buffer[self._normalize_index(idx)]
start = self._normalize_index(idx.start)
end = self._normalize_index(idx.stop)
if start is None:
start = self.start
if end is None:
end = self.end
return self._buffer[start:end]
def __setitem__(self, idx, lines):
if not isinstance(idx, slice):
self._buffer[self._normalize_index(idx)] = lines
return
start = self._normalize_index(idx.start)
end = self._normalize_index(idx.stop)
if start is None:
start = self.start
if end is None:
end = self.end
self._buffer[start:end] = lines
def __iter__(self):
for i in range(self.start, self.end):
yield self._buffer[i]
def append(self, lines, i=None):
i = self._normalize_index(i)
if i is None:
i = self.end
self._buffer.append(lines, i)
def _normalize_index(self, index):
if index is None:
return None
if index < 0:
index = self.end - 1
else:
index += self.start
if index >= self.end:
index = self.end - 1
return index
| {
"repo_name": "0x90sled/python-client",
"path": "neovim/api/buffer.py",
"copies": "4",
"size": "6645",
"license": "apache-2.0",
"hash": 624168714945834000,
"line_mean": 31.896039604,
"line_max": 79,
"alpha_frac": 0.5550037622,
"autogenerated": false,
"ratio": 4.300970873786408,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 202
} |
"""API for working with Nvim tabpages."""
from .common import Remote, RemoteMap, RemoteSequence
__all__ = ('Tabpage')
class Tabpage(Remote):
"""A remote Nvim tabpage."""
def __init__(self, session, code_data):
"""Initialize from session and code_data immutable object.
The `code_data` contains serialization information required for
msgpack-rpc calls. It must be immutable for Tabpage equality to work.
"""
self._session = session
self.code_data = code_data
self.windows = RemoteSequence(session, 'tabpage_get_windows', self)
self.vars = RemoteMap(session, 'tabpage_get_var', 'tabpage_set_var',
self)
@property
def window(self):
"""Get the `Window` currently focused on the tabpage."""
return self._session.request('tabpage_get_window', self)
@property
def valid(self):
"""Return True if the tabpage still exists."""
return self._session.request('tabpage_is_valid', self)
| {
"repo_name": "starcraftman/python-client",
"path": "neovim/api/tabpage.py",
"copies": "5",
"size": "1031",
"license": "apache-2.0",
"hash": 6166943724153261000,
"line_mean": 31.21875,
"line_max": 77,
"alpha_frac": 0.6285160039,
"autogenerated": false,
"ratio": 4.124,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 32
} |
"""API for working with Nvim windows."""
from .common import Remote, RemoteMap
__all__ = ('Window')
class Window(Remote):
"""A remote Nvim window."""
def __init__(self, session, code_data):
"""Initialize from session and code_data immutable object.
The `code_data` contains serialization information required for
msgpack-rpc calls. It must be immutable for Window equality to work.
"""
self._session = session
self.code_data = code_data
self.vars = RemoteMap(session, 'window_get_var', 'window_set_var',
self)
self.options = RemoteMap(session, 'window_get_option',
'window_set_option', self)
@property
def buffer(self):
"""Get the `Buffer` currently being displayed by the window."""
return self._session.request('window_get_buffer', self)
@property
def cursor(self):
"""Get the (row, col) tuple with the current cursor position."""
return self._session.request('window_get_cursor', self)
@cursor.setter
def cursor(self, pos):
"""Set the (row, col) tuple as the new cursor position."""
return self._session.request('window_set_cursor', self, pos)
@property
def height(self):
"""Get the window height in rows."""
return self._session.request('window_get_height', self)
@height.setter
def height(self, height):
"""Set the window height in rows."""
return self._session.request('window_set_height', self, height)
@property
def width(self):
"""Get the window width in rows."""
return self._session.request('window_get_width', self)
@width.setter
def width(self, width):
"""Set the window height in rows."""
return self._session.request('window_set_width', self, width)
@property
def row(self):
"""0-indexed, on-screen window position(row) in display cells."""
return self._session.request('window_get_position', self)[0]
@property
def col(self):
"""0-indexed, on-screen window position(col) in display cells."""
return self._session.request('window_get_position', self)[1]
@property
def tabpage(self):
"""Get the `Tabpage` that contains the window."""
return self._session.request('window_get_tabpage', self)
@property
def valid(self):
"""Return True if the window still exists."""
return self._session.request('window_is_valid', self)
| {
"repo_name": "starcraftman/python-client",
"path": "neovim/api/window.py",
"copies": "5",
"size": "2549",
"license": "apache-2.0",
"hash": 2235249087990921500,
"line_mean": 31.6794871795,
"line_max": 76,
"alpha_frac": 0.6112200863,
"autogenerated": false,
"ratio": 4.192434210526316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 78
} |
"""API for xbox bound to Home Assistant OAuth."""
from aiohttp import ClientSession
from xbox.webapi.authentication.manager import AuthenticationManager
from xbox.webapi.authentication.models import OAuth2TokenResponse
from homeassistant.helpers import config_entry_oauth2_flow
from homeassistant.util.dt import utc_from_timestamp
class AsyncConfigEntryAuth(AuthenticationManager):
"""Provide xbox authentication tied to an OAuth2 based config entry."""
def __init__(
self,
websession: ClientSession,
oauth_session: config_entry_oauth2_flow.OAuth2Session,
):
"""Initialize xbox auth."""
# Leaving out client credentials as they are handled by Home Assistant
super().__init__(websession, "", "", "")
self._oauth_session = oauth_session
self.oauth = self._get_oauth_token()
async def refresh_tokens(self) -> None:
"""Return a valid access token."""
if not self._oauth_session.valid_token:
await self._oauth_session.async_ensure_token_valid()
self.oauth = self._get_oauth_token()
# This will skip the OAuth refresh and only refresh User and XSTS tokens
await super().refresh_tokens()
def _get_oauth_token(self) -> OAuth2TokenResponse:
tokens = {**self._oauth_session.token}
issued = tokens["expires_at"] - tokens["expires_in"]
del tokens["expires_at"]
token_response = OAuth2TokenResponse.parse_obj(tokens)
token_response.issued = utc_from_timestamp(issued)
return token_response
| {
"repo_name": "GenericStudent/home-assistant",
"path": "homeassistant/components/xbox/api.py",
"copies": "12",
"size": "1572",
"license": "apache-2.0",
"hash": 1841012023460980000,
"line_mean": 39.3076923077,
"line_max": 80,
"alpha_frac": 0.6806615776,
"autogenerated": false,
"ratio": 4.214477211796247,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""API functionality for integrating with NovoEd"""
from urllib.parse import urljoin
import logging
import operator
import requests
from django.conf import settings
from djangosaml2idp.processors import BaseProcessor
from rest_framework import status
from klasses.models import BootcampRunEnrollment
from main.utils import now_in_utc
from novoed.constants import (
REGISTER_USER_URL_STUB,
UNENROLL_USER_URL_STUB,
SAML_ID_STAGING_PREFIX,
)
from profiles.api import get_first_and_last_names
log = logging.getLogger(__name__)
def enroll_in_novoed_course(user, novoed_course_stub):
"""
Enrolls a user in a course on NovoEd
Args:
user (django.contrib.auth.models.User):
novoed_course_stub (str): The stub of the course in NovoEd (can be found in the NovoEd course's URL)
Returns:
(bool, bool): A flag indicating whether or not the enrollment succeeded, paired with a flag indicating
whether or not the enrollment already existed
Raises:
HTTPError: Raised if the HTTP response indicates an error
"""
first_name, last_name = get_first_and_last_names(user)
new_user_req_body = {
"api_key": settings.NOVOED_API_KEY,
"api_secret": settings.NOVOED_API_SECRET,
"catalog_id": novoed_course_stub,
"first_name": first_name,
"last_name": last_name,
"email": user.email,
"external_id": str(user.id),
}
new_user_url = urljoin(
settings.NOVOED_API_BASE_URL, f"{novoed_course_stub}/{REGISTER_USER_URL_STUB}"
)
resp = requests.post(new_user_url, json=new_user_req_body)
created, existed = False, False
if resp.status_code == status.HTTP_200_OK:
created = True
elif resp.status_code == status.HTTP_207_MULTI_STATUS:
existed = True
elif resp.ok:
log.error(
"Received an unexpected response from NovoEd when enrolling (%s, %s)",
user.email,
novoed_course_stub,
)
else:
resp.raise_for_status()
# Update the 'novoed_sync_date' value for the enrollment that matches this user/run, as long as we got a response
# that indicated the enrollment exists in NovoEd, and the existing sync date is None
if created or existed:
BootcampRunEnrollment.objects.filter(
user=user,
bootcamp_run__novoed_course_stub=novoed_course_stub,
novoed_sync_date=None,
).update(novoed_sync_date=now_in_utc())
return created, existed
def unenroll_from_novoed_course(user, novoed_course_stub):
"""
Enrolls a user from a course on NovoEd
Args:
user (django.contrib.auth.models.User):
novoed_course_stub (str): The stub of the course in NovoEd (can be found in the NovoEd course's URL)
Raises:
HTTPError: Raised if the HTTP response indicates an error
"""
unenroll_user_req_body = {
"api_key": settings.NOVOED_API_KEY,
"api_secret": settings.NOVOED_API_SECRET,
"email": user.email,
}
unenroll_user_url = urljoin(
settings.NOVOED_API_BASE_URL, f"{novoed_course_stub}/{UNENROLL_USER_URL_STUB}"
)
resp = requests.post(unenroll_user_url, json=unenroll_user_req_body)
resp.raise_for_status()
class NovoEdSamlProcessor(BaseProcessor):
"""
SAML request processor that overrides the default to allow for specialized functionality
when responding to auth requests.
"""
def create_identity(self, user, sp_attribute_mapping):
results = {}
for user_attr, out_attr in sp_attribute_mapping.items():
# This line allows the attribute map for a ServiceProvider record to have keys that refer to
# an object path (e.g.: "profile.name") rather than just a property name
attr = operator.attrgetter(user_attr)(user)
if attr is not None:
attr_value = attr() if callable(attr) else attr
if user_attr == "id" and settings.ENVIRONMENT not in {
"prod",
"production",
}:
attr_value = "".join([SAML_ID_STAGING_PREFIX, str(attr_value)])
results[out_attr] = attr_value
return results
| {
"repo_name": "mitodl/bootcamp-ecommerce",
"path": "novoed/api.py",
"copies": "1",
"size": "4260",
"license": "bsd-3-clause",
"hash": 6557026964378166000,
"line_mean": 34.7983193277,
"line_max": 117,
"alpha_frac": 0.6455399061,
"autogenerated": false,
"ratio": 3.6724137931034484,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48179536992034483,
"avg_score": null,
"num_lines": null
} |
'''API functions for adding data to CKAN.'''
import logging
import random
import re
from pylons import config
import paste.deploy.converters
import ckan.lib.plugins as lib_plugins
import ckan.logic as logic
import ckan.rating as ratings
import ckan.plugins as plugins
import ckan.lib.dictization
import ckan.logic.action
import ckan.logic.schema
import ckan.lib.dictization.model_dictize as model_dictize
import ckan.lib.dictization.model_save as model_save
import ckan.lib.navl.dictization_functions
import ckan.lib.uploader as uploader
import ckan.lib.navl.validators as validators
import ckan.lib.mailer as mailer
from ckan.common import _
# FIXME this looks nasty and should be shared better
from ckan.logic.action.update import _update_package_relationship
log = logging.getLogger(__name__)
# Define some shortcuts
# Ensure they are module-private so that they don't get loaded as available
# actions in the action API.
_validate = ckan.lib.navl.dictization_functions.validate
_check_access = logic.check_access
_get_action = logic.get_action
ValidationError = logic.ValidationError
NotFound = logic.NotFound
_get_or_bust = logic.get_or_bust
def package_create(context, data_dict):
'''Create a new dataset (package).
You must be authorized to create new datasets. If you specify any groups
for the new dataset, you must also be authorized to edit these groups.
Plugins may change the parameters of this function depending on the value
of the ``type`` parameter, see the ``IDatasetForm`` plugin interface.
:param name: the name of the new dataset, must be between 2 and 100
characters long and contain only lowercase alphanumeric characters,
``-`` and ``_``, e.g. ``'warandpeace'``
:type name: string
:param title: the title of the dataset (optional, default: same as
``name``)
:type title: string
:param author: the name of the dataset's author (optional)
:type author: string
:param author_email: the email address of the dataset's author (optional)
:type author_email: string
:param maintainer: the name of the dataset's maintainer (optional)
:type maintainer: string
:param maintainer_email: the email address of the dataset's maintainer
(optional)
:type maintainer_email: string
:param license_id: the id of the dataset's license, see ``license_list()``
for available values (optional)
:type license_id: license id string
:param notes: a description of the dataset (optional)
:type notes: string
:param url: a URL for the dataset's source (optional)
:type url: string
:param version: (optional)
:type version: string, no longer than 100 characters
:param state: the current state of the dataset, e.g. ``'active'`` or
``'deleted'``, only active datasets show up in search results and
other lists of datasets, this parameter will be ignored if you are not
authorized to change the state of the dataset (optional, default:
``'active'``)
:type state: string
:param type: the type of the dataset (optional), ``IDatasetForm`` plugins
associate themselves with different dataset types and provide custom
dataset handling behaviour for these types
:type type: string
:param resources: the dataset's resources, see ``resource_create()``
for the format of resource dictionaries (optional)
:type resources: list of resource dictionaries
:param tags: the dataset's tags, see ``tag_create()`` for the format
of tag dictionaries (optional)
:type tags: list of tag dictionaries
:param extras: the dataset's extras (optional), extras are arbitrary
(key: value) metadata items that can be added to datasets, each extra
dictionary should have keys ``'key'`` (a string), ``'value'`` (a
string)
:type extras: list of dataset extra dictionaries
:param relationships_as_object: see ``package_relationship_create()`` for
the format of relationship dictionaries (optional)
:type relationships_as_object: list of relationship dictionaries
:param relationships_as_subject: see ``package_relationship_create()`` for
the format of relationship dictionaries (optional)
:type relationships_as_subject: list of relationship dictionaries
:param groups: the groups to which the dataset belongs (optional), each
group dictionary should have one or more of the following keys which
identify an existing group:
``'id'`` (the id of the group, string), ``'name'`` (the name of the
group, string), ``'title'`` (the title of the group, string), to see
which groups exist call ``group_list()``
:type groups: list of dictionaries
:param owner_org: the id of the dataset's owning organization, see
``organization_list()`` or ``organization_list_for_user`` for
available values (optional)
:type owner_org: string
:returns: the newly created dataset (unless 'return_id_only' is set to True
in the context, in which case just the dataset id will be returned)
:rtype: dictionary
'''
model = context['model']
user = context['user']
package_type = data_dict.get('type')
package_plugin = lib_plugins.lookup_package_plugin(package_type)
if 'schema' in context:
schema = context['schema']
else:
schema = package_plugin.create_package_schema()
_check_access('package_create', context, data_dict)
if 'api_version' not in context:
# check_data_dict() is deprecated. If the package_plugin has a
# check_data_dict() we'll call it, if it doesn't have the method we'll
# do nothing.
check_data_dict = getattr(package_plugin, 'check_data_dict', None)
if check_data_dict:
try:
check_data_dict(data_dict, schema)
except TypeError:
# Old plugins do not support passing the schema so we need
# to ensure they still work
package_plugin.check_data_dict(data_dict)
data, errors = _validate(data_dict, schema, context)
log.debug('package_create validate_errs=%r user=%s package=%s data=%r',
errors, context.get('user'),
data.get('name'), data_dict)
if errors:
model.Session.rollback()
raise ValidationError(errors)
rev = model.repo.new_revision()
rev.author = user
if 'message' in context:
rev.message = context['message']
else:
rev.message = _(u'REST API: Create object %s') % data.get("name")
admins = []
if user:
user_obj = model.User.by_name(user.decode('utf8'))
if user_obj:
admins = [user_obj]
data['creator_user_id'] = user_obj.id
pkg = model_save.package_dict_save(data, context)
model.setup_default_user_roles(pkg, admins)
# Needed to let extensions know the package id
model.Session.flush()
data['id'] = pkg.id
context_org_update = context.copy()
context_org_update['ignore_auth'] = True
context_org_update['defer_commit'] = True
_get_action('package_owner_org_update')(context_org_update,
{'id': pkg.id,
'organization_id': pkg.owner_org})
for item in plugins.PluginImplementations(plugins.IPackageController):
item.create(pkg)
item.after_create(context, data)
if not context.get('defer_commit'):
model.repo.commit()
## need to let rest api create
context["package"] = pkg
## this is added so that the rest controller can make a new location
context["id"] = pkg.id
log.debug('Created object %s' % pkg.name)
# Make sure that a user provided schema is not used on package_show
context.pop('schema', None)
return_id_only = context.get('return_id_only', False)
output = context['id'] if return_id_only \
else _get_action('package_show')(context, {'id':context['id']})
return output
def resource_create(context, data_dict):
'''Appends a new resource to a datasets list of resources.
:param package_id: id of package that the resource needs should be added to.
:type package_id: string
:param url: url of resource
:type url: string
:param revision_id: (optional)
:type revisiion_id: string
:param description: (optional)
:type description: string
:param format: (optional)
:type format: string
:param hash: (optional)
:type hash: string
:param name: (optional)
:type name: string
:param resource_type: (optional)
:type resource_type: string
:param mimetype: (optional)
:type mimetype: string
:param mimetype_inner: (optional)
:type mimetype_inner: string
:param webstore_url: (optional)
:type webstore_url: string
:param cache_url: (optional)
:type cache_url: string
:param size: (optional)
:type size: int
:param created: (optional)
:type created: iso date string
:param last_modified: (optional)
:type last_modified: iso date string
:param cache_last_updated: (optional)
:type cache_last_updated: iso date string
:param webstore_last_updated: (optional)
:type webstore_last_updated: iso date string
:param upload: (optional)
:type upload: FieldStorage (optional) needs multipart/form-data
:returns: the newly created resource
:rtype: dictionary
'''
model = context['model']
user = context['user']
package_id = _get_or_bust(data_dict, 'package_id')
data_dict.pop('package_id')
pkg_dict = _get_action('package_show')(context, {'id': package_id})
_check_access('resource_create', context, data_dict)
if not 'resources' in pkg_dict:
pkg_dict['resources'] = []
upload = uploader.ResourceUpload(data_dict)
pkg_dict['resources'].append(data_dict)
try:
context['defer_commit'] = True
context['use_cache'] = False
_get_action('package_update')(context, pkg_dict)
context.pop('defer_commit')
except ValidationError, e:
errors = e.error_dict['resources'][-1]
raise ValidationError(errors)
## Get out resource_id resource from model as it will not appear in
## package_show until after commit
md5 = upload.upload(context['package'].resources[-1].id,
uploader.get_max_resource_size())
model.repo.commit()
## Run package show again to get out actual last_resource
pkg_dict = _get_action('package_show')(context, {'id': package_id})
resource = pkg_dict['resources'][-1]
#{#JOE#}
resource['hash'] = md5
_get_action('package_update')(context, pkg_dict)
return resource
def related_create(context, data_dict):
'''Add a new related item to a dataset.
You must provide your API key in the Authorization header.
:param title: the title of the related item
:type title: string
:param type: the type of the related item, e.g. ``'Application'``,
``'Idea'`` or ``'Visualisation'``
:type type: string
:param id: the id of the related item (optional)
:type id: string
:param description: the description of the related item (optional)
:type description: string
:param url: the URL to the related item (optional)
:type url: string
:param image_url: the URL to the image for the related item (optional)
:type image_url: string
:param dataset_id: the name or id of the dataset that the related item
belongs to (optional)
:type dataset_id: string
:returns: the newly created related item
:rtype: dictionary
'''
model = context['model']
session = context['session']
user = context['user']
userobj = model.User.get(user)
_check_access('related_create', context, data_dict)
data_dict["owner_id"] = userobj.id
data, errors = _validate(data_dict,
ckan.logic.schema.default_related_schema(),
context)
if errors:
model.Session.rollback()
raise ValidationError(errors)
related = model_save.related_dict_save(data, context)
if not context.get('defer_commit'):
model.repo.commit_and_remove()
dataset_dict = None
if 'dataset_id' in data_dict:
dataset = model.Package.get(data_dict['dataset_id'])
dataset.related.append( related )
model.repo.commit_and_remove()
dataset_dict = ckan.lib.dictization.table_dictize(dataset, context)
session.flush()
related_dict = model_dictize.related_dictize(related, context)
activity_dict = {
'user_id': userobj.id,
'object_id': related.id,
'activity_type': 'new related item',
}
activity_dict['data'] = {
'related': related_dict,
'dataset': dataset_dict,
}
activity_create_context = {
'model': model,
'user': user,
'defer_commit': True,
'ignore_auth': True,
'session': session
}
logic.get_action('activity_create')(activity_create_context,
activity_dict)
session.commit()
context["related"] = related
context["id"] = related.id
log.debug('Created object %s' % related.title)
return related_dict
def package_relationship_create(context, data_dict):
'''Create a relationship between two datasets (packages).
You must be authorized to edit both the subject and the object datasets.
:param subject: the id or name of the dataset that is the subject of the
relationship
:type subject: string
:param object: the id or name of the dataset that is the object of the
relationship
:param type: the type of the relationship, one of ``'depends_on'``,
``'dependency_of'``, ``'derives_from'``, ``'has_derivation'``,
``'links_to'``, ``'linked_from'``, ``'child_of'`` or ``'parent_of'``
:type type: string
:param comment: a comment about the relationship (optional)
:type comment: string
:returns: the newly created package relationship
:rtype: dictionary
'''
model = context['model']
user = context['user']
schema = context.get('schema') or ckan.logic.schema.default_create_relationship_schema()
api = context.get('api_version')
ref_package_by = 'id' if api == 2 else 'name'
id, id2, rel_type = _get_or_bust(data_dict, ['subject', 'object', 'type'])
comment = data_dict.get('comment', u'')
pkg1 = model.Package.get(id)
pkg2 = model.Package.get(id2)
if not pkg1:
raise NotFound('Subject package %r was not found.' % id)
if not pkg2:
return NotFound('Object package %r was not found.' % id2)
data, errors = _validate(data_dict, schema, context)
if errors:
model.Session.rollback()
raise ValidationError(errors)
_check_access('package_relationship_create', context, data_dict)
# Create a Package Relationship.
existing_rels = pkg1.get_relationships_with(pkg2, rel_type)
if existing_rels:
return _update_package_relationship(existing_rels[0],
comment, context)
rev = model.repo.new_revision()
rev.author = user
rev.message = _(u'REST API: Create package relationship: %s %s %s') % (pkg1, rel_type, pkg2)
rel = pkg1.add_relationship(rel_type, pkg2, comment=comment)
if not context.get('defer_commit'):
model.repo.commit_and_remove()
context['relationship'] = rel
relationship_dicts = rel.as_dict(ref_package_by=ref_package_by)
return relationship_dicts
def member_create(context, data_dict=None):
'''Make an object (e.g. a user, dataset or group) a member of a group.
If the object is already a member of the group then the capacity of the
membership will be updated.
You must be authorized to edit the group.
:param id: the id or name of the group to add the object to
:type id: string
:param object: the id or name of the object to add
:type object: string
:param object_type: the type of the object being added, e.g. ``'package'``
or ``'user'``
:type object_type: string
:param capacity: the capacity of the membership
:type capacity: string
:returns: the newly created (or updated) membership
:rtype: dictionary
'''
model = context['model']
user = context['user']
rev = model.repo.new_revision()
rev.author = user
if 'message' in context:
rev.message = context['message']
else:
rev.message = _(u'REST API: Create member object %s') % data_dict.get('name', '')
group_id, obj_id, obj_type, capacity = _get_or_bust(data_dict, ['id', 'object', 'object_type', 'capacity'])
group = model.Group.get(group_id)
if not group:
raise NotFound('Group was not found.')
obj_class = ckan.logic.model_name_to_class(model, obj_type)
obj = obj_class.get(obj_id)
if not obj:
raise NotFound('%s was not found.' % obj_type.title())
_check_access('member_create', context, data_dict)
# Look up existing, in case it exists
member = model.Session.query(model.Member).\
filter(model.Member.table_name == obj_type).\
filter(model.Member.table_id == obj.id).\
filter(model.Member.group_id == group.id).\
filter(model.Member.state == 'active').first()
if not member:
member = model.Member(table_name = obj_type,
table_id = obj.id,
group_id = group.id,
state = 'active')
member.capacity = capacity
model.Session.add(member)
model.repo.commit()
return model_dictize.member_dictize(member, context)
def _group_or_org_create(context, data_dict, is_org=False):
model = context['model']
user = context['user']
session = context['session']
data_dict['is_organization'] = is_org
upload = uploader.Upload('group')
upload.update_data_dict(data_dict, 'image_url',
'image_upload', 'clear_upload')
# get the schema
group_plugin = lib_plugins.lookup_group_plugin(
group_type=data_dict.get('type'))
try:
schema = group_plugin.form_to_db_schema_options({'type':'create',
'api':'api_version' in context,
'context': context})
except AttributeError:
schema = group_plugin.form_to_db_schema()
if 'api_version' not in context:
# old plugins do not support passing the schema so we need
# to ensure they still work
try:
group_plugin.check_data_dict(data_dict, schema)
except TypeError:
group_plugin.check_data_dict(data_dict)
data, errors = _validate(data_dict, schema, context)
log.debug('group_create validate_errs=%r user=%s group=%s data_dict=%r',
errors, context.get('user'), data_dict.get('name'), data_dict)
if errors:
session.rollback()
raise ValidationError(errors)
rev = model.repo.new_revision()
rev.author = user
if 'message' in context:
rev.message = context['message']
else:
rev.message = _(u'REST API: Create object %s') % data.get("name")
group = model_save.group_dict_save(data, context)
if user:
admins = [model.User.by_name(user.decode('utf8'))]
else:
admins = []
model.setup_default_user_roles(group, admins)
# Needed to let extensions know the group id
session.flush()
if is_org:
plugin_type = plugins.IOrganizationController
else:
plugin_type = plugins.IGroupController
for item in plugins.PluginImplementations(plugin_type):
item.create(group)
if is_org:
activity_type = 'new organization'
else:
activity_type = 'new group'
user_id = model.User.by_name(user.decode('utf8')).id
activity_dict = {
'user_id': user_id,
'object_id': group.id,
'activity_type': activity_type,
}
activity_dict['data'] = {
'group': ckan.lib.dictization.table_dictize(group, context)
}
activity_create_context = {
'model': model,
'user': user,
'defer_commit': True,
'ignore_auth': True,
'session': session
}
logic.get_action('activity_create')(activity_create_context,
activity_dict)
upload.upload(uploader.get_max_image_size())
if not context.get('defer_commit'):
model.repo.commit()
context["group"] = group
context["id"] = group.id
# creator of group/org becomes an admin
# this needs to be after the repo.commit or else revisions break
member_dict = {
'id': group.id,
'object': user_id,
'object_type': 'user',
'capacity': 'admin',
}
member_create_context = {
'model': model,
'user': user,
'ignore_auth': True, # we are not a member of the group at this point
'session': session
}
logic.get_action('member_create')(member_create_context, member_dict)
log.debug('Created object %s' % group.name)
return model_dictize.group_dictize(group, context)
def group_create(context, data_dict):
'''Create a new group.
You must be authorized to create groups.
Plugins may change the parameters of this function depending on the value
of the ``type`` parameter, see the ``IGroupForm`` plugin interface.
:param name: the name of the group, a string between 2 and 100 characters
long, containing only lowercase alphanumeric characters, ``-`` and
``_``
:type name: string
:param id: the id of the group (optional)
:type id: string
:param title: the title of the group (optional)
:type title: string
:param description: the description of the group (optional)
:type description: string
:param image_url: the URL to an image to be displayed on the group's page
(optional)
:type image_url: string
:param type: the type of the group (optional), ``IGroupForm`` plugins
associate themselves with different group types and provide custom
group handling behaviour for these types
Cannot be 'organization'
:type type: string
:param state: the current state of the group, e.g. ``'active'`` or
``'deleted'``, only active groups show up in search results and
other lists of groups, this parameter will be ignored if you are not
authorized to change the state of the group (optional, default:
``'active'``)
:type state: string
:param approval_status: (optional)
:type approval_status: string
:param extras: the group's extras (optional), extras are arbitrary
(key: value) metadata items that can be added to groups, each extra
dictionary should have keys ``'key'`` (a string), ``'value'`` (a
string), and optionally ``'deleted'``
:type extras: list of dataset extra dictionaries
:param packages: the datasets (packages) that belong to the group, a list
of dictionaries each with keys ``'name'`` (string, the id or name of
the dataset) and optionally ``'title'`` (string, the title of the
dataset)
:type packages: list of dictionaries
:param groups: the groups that belong to the group, a list of dictionaries
each with key ``'name'`` (string, the id or name of the group) and
optionally ``'capacity'`` (string, the capacity in which the group is
a member of the group)
:type groups: list of dictionaries
:param users: the users that belong to the group, a list of dictionaries
each with key ``'name'`` (string, the id or name of the user) and
optionally ``'capacity'`` (string, the capacity in which the user is
a member of the group)
:type users: list of dictionaries
:returns: the newly created group
:rtype: dictionary
'''
# wrapper for creating groups
if data_dict.get('type') == 'organization':
# FIXME better exception?
raise Exception(_('Trying to create an organization as a group'))
_check_access('group_create', context, data_dict)
return _group_or_org_create(context, data_dict)
def organization_create(context, data_dict):
'''Create a new organization.
You must be authorized to create organizations.
Plugins may change the parameters of this function depending on the value
of the ``type`` parameter, see the ``IGroupForm`` plugin interface.
:param name: the name of the organization, a string between 2 and 100 characters
long, containing only lowercase alphanumeric characters, ``-`` and
``_``
:type name: string
:param id: the id of the organization (optional)
:type id: string
:param title: the title of the organization (optional)
:type title: string
:param description: the description of the organization (optional)
:type description: string
:param image_url: the URL to an image to be displayed on the organization's page
(optional)
:type image_url: string
:param state: the current state of the organization, e.g. ``'active'`` or
``'deleted'``, only active organizations show up in search results and
other lists of organizations, this parameter will be ignored if you are not
authorized to change the state of the organization (optional, default:
``'active'``)
:type state: string
:param approval_status: (optional)
:type approval_status: string
:param extras: the organization's extras (optional), extras are arbitrary
(key: value) metadata items that can be added to organizations, each extra
dictionary should have keys ``'key'`` (a string), ``'value'`` (a
string), and optionally ``'deleted'``
:type extras: list of dataset extra dictionaries
:param packages: the datasets (packages) that belong to the organization, a list
of dictionaries each with keys ``'name'`` (string, the id or name of
the dataset) and optionally ``'title'`` (string, the title of the
dataset)
:type packages: list of dictionaries
:param users: the users that belong to the organization, a list of dictionaries
each with key ``'name'`` (string, the id or name of the user) and
optionally ``'capacity'`` (string, the capacity in which the user is
a member of the organization)
:type users: list of dictionaries
:returns: the newly created organization
:rtype: dictionary
'''
# wrapper for creating organizations
data_dict['type'] = 'organization'
_check_access('organization_create', context, data_dict)
return _group_or_org_create(context, data_dict, is_org=True)
@logic.auth_audit_exempt
def rating_create(context, data_dict):
'''Rate a dataset (package).
You must provide your API key in the Authorization header.
:param package: the name or id of the dataset to rate
:type package: string
:param rating: the rating to give to the dataset, an integer between 1 and
5
:type rating: int
:returns: a dictionary with two keys: ``'rating average'`` (the average
rating of the dataset you rated) and ``'rating count'`` (the number of
times the dataset has been rated)
:rtype: dictionary
'''
model = context['model']
user = context.get("user")
package_ref = data_dict.get('package')
rating = data_dict.get('rating')
opts_err = None
if not package_ref:
opts_err = _('You must supply a package id or name (parameter "package").')
elif not rating:
opts_err = _('You must supply a rating (parameter "rating").')
else:
try:
rating_int = int(rating)
except ValueError:
opts_err = _('Rating must be an integer value.')
else:
package = model.Package.get(package_ref)
if rating < ratings.MIN_RATING or rating > ratings.MAX_RATING:
opts_err = _('Rating must be between %i and %i.') % (ratings.MIN_RATING, ratings.MAX_RATING)
elif not package:
opts_err = _('Not found') + ': %r' % package_ref
if opts_err:
raise ValidationError(opts_err)
user = model.User.by_name(user)
ratings.set_rating(user, package, rating_int)
package = model.Package.get(package_ref)
ret_dict = {'rating average':package.get_average_rating(),
'rating count': len(package.ratings)}
return ret_dict
def user_create(context, data_dict):
'''Create a new user.
You must be authorized to create users.
:param name: the name of the new user, a string between 2 and 100
characters in length, containing only lowercase alphanumeric
characters, ``-`` and ``_``
:type name: string
:param email: the email address for the new user
:type email: string
:param password: the password of the new user, a string of at least 4
characters
:type password: string
:param id: the id of the new user (optional)
:type id: string
:param fullname: the full name of the new user (optional)
:type fullname: string
:param about: a description of the new user (optional)
:type about: string
:param openid: (optional)
:type openid: string
:returns: the newly created yser
:rtype: dictionary
'''
model = context['model']
schema = context.get('schema') or ckan.logic.schema.default_user_schema()
session = context['session']
_check_access('user_create', context, data_dict)
data, errors = _validate(data_dict, schema, context)
if errors:
session.rollback()
raise ValidationError(errors)
user = model_save.user_dict_save(data, context)
# Flush the session to cause user.id to be initialised, because
# activity_create() (below) needs it.
session.flush()
activity_create_context = {
'model': model,
'user': context['user'],
'defer_commit': True,
'ignore_auth': True,
'session': session
}
activity_dict = {
'user_id': user.id,
'object_id': user.id,
'activity_type': 'new user',
}
logic.get_action('activity_create')(activity_create_context,
activity_dict)
if not context.get('defer_commit'):
model.repo.commit()
# A new context is required for dictizing the newly constructed user in
# order that all the new user's data is returned, in particular, the
# api_key.
#
# The context is copied so as not to clobber the caller's context dict.
user_dictize_context = context.copy()
user_dictize_context['keep_apikey'] = True
user_dictize_context['keep_email'] = True
user_dict = model_dictize.user_dictize(user, user_dictize_context)
context['user_obj'] = user
context['id'] = user.id
model.Dashboard.get(user.id) # Create dashboard for user.
log.debug('Created user {name}'.format(name=user.name))
return user_dict
def user_invite(context, data_dict):
'''Invite a new user.
You must be authorized to create group members.
:param email: the email of the user to be invited to the group
:type email: string
:param group_id: the id or name of the group
:type group_id: string
:param role: role of the user in the group. One of ``member``, ``editor``,
or ``admin``
:type role: string
:returns: the newly created yser
:rtype: dictionary
'''
_check_access('user_invite', context, data_dict)
schema = context.get('schema',
ckan.logic.schema.default_user_invite_schema())
data, errors = _validate(data_dict, schema, context)
if errors:
raise ValidationError(errors)
name = _get_random_username_from_email(data['email'])
password = str(random.SystemRandom().random())
data['name'] = name
data['password'] = password
data['state'] = ckan.model.State.PENDING
user_dict = _get_action('user_create')(context, data)
user = ckan.model.User.get(user_dict['id'])
member_dict = {
'username': user.id,
'id': data['group_id'],
'role': data['role']
}
_get_action('group_member_create')(context, member_dict)
mailer.send_invite(user)
return model_dictize.user_dictize(user, context)
def _get_random_username_from_email(email):
localpart = email.split('@')[0]
cleaned_localpart = re.sub(r'[^\w]', '-', localpart)
# if we can't create a unique user name within this many attempts
# then something else is probably wrong and we should give up
max_name_creation_attempts = 100
for i in range(max_name_creation_attempts):
random_number = random.SystemRandom().random() * 10000
name = '%s-%d' % (cleaned_localpart, random_number)
if not ckan.model.User.get(name):
return name
return cleaned_localpart
## Modifications for rest api
def package_create_rest(context, data_dict):
_check_access('package_create_rest', context, data_dict)
dictized_package = model_save.package_api_to_dict(data_dict, context)
dictized_after = _get_action('package_create')(context, dictized_package)
pkg = context['package']
package_dict = model_dictize.package_to_api(pkg, context)
data_dict['id'] = pkg.id
return package_dict
def group_create_rest(context, data_dict):
_check_access('group_create_rest', context, data_dict)
dictized_group = model_save.group_api_to_dict(data_dict, context)
dictized_after = _get_action('group_create')(context, dictized_group)
group = context['group']
group_dict = model_dictize.group_to_api(group, context)
data_dict['id'] = group.id
return group_dict
def vocabulary_create(context, data_dict):
'''Create a new tag vocabulary.
You must be a sysadmin to create vocabularies.
:param name: the name of the new vocabulary, e.g. ``'Genre'``
:type name: string
:param tags: the new tags to add to the new vocabulary, for the format of
tag dictionaries see ``tag_create()``
:type tags: list of tag dictionaries
:returns: the newly-created vocabulary
:rtype: dictionary
'''
model = context['model']
schema = context.get('schema') or ckan.logic.schema.default_create_vocabulary_schema()
_check_access('vocabulary_create', context, data_dict)
data, errors = _validate(data_dict, schema, context)
if errors:
model.Session.rollback()
raise ValidationError(errors)
vocabulary = model_save.vocabulary_dict_save(data, context)
if not context.get('defer_commit'):
model.repo.commit()
log.debug('Created Vocabulary %s' % vocabulary.name)
return model_dictize.vocabulary_dictize(vocabulary, context)
def activity_create(context, activity_dict, **kw):
'''Create a new activity stream activity.
You must be a sysadmin to create new activities.
:param user_id: the name or id of the user who carried out the activity,
e.g. ``'seanh'``
:type user_id: string
:param object_id: the name or id of the object of the activity, e.g.
``'my_dataset'``
:param activity_type: the type of the activity, this must be an activity
type that CKAN knows how to render, e.g. ``'new package'``,
``'changed user'``, ``'deleted group'`` etc. (for a full list see
``activity_renderers`` in ``ckan/logic/action/get.py``
:type activity_type: string
:param data: any additional data about the activity
:type data: dictionary
:returns: the newly created activity
:rtype: dictionary
'''
_check_access('activity_create', context, activity_dict)
# this action had a ignore_auth param which has been removed
# removed in 2.2
if 'ignore_auth' in kw:
raise Exception('Activity Stream calling parameters have changed '
'ignore_auth must be passed in the context not as '
'a param')
if not paste.deploy.converters.asbool(
config.get('ckan.activity_streams_enabled', 'true')):
return
model = context['model']
# Any revision_id that the caller attempts to pass in the activity_dict is
# ignored and overwritten here.
if getattr(model.Session, 'revision', None):
activity_dict['revision_id'] = model.Session.revision.id
else:
activity_dict['revision_id'] = None
schema = context.get('schema') or ckan.logic.schema.default_create_activity_schema()
data, errors = _validate(activity_dict, schema, context)
if errors:
raise ValidationError(errors)
activity = model_save.activity_dict_save(data, context)
if not context.get('defer_commit'):
model.repo.commit()
log.debug("Created '%s' activity" % activity.activity_type)
return model_dictize.activity_dictize(activity, context)
def package_relationship_create_rest(context, data_dict):
# rename keys
key_map = {'id': 'subject',
'id2': 'object',
'rel': 'type'}
# Don't be destructive to enable parameter values for
# object and type to override the URL parameters.
data_dict = ckan.logic.action.rename_keys(data_dict, key_map, destructive=False)
relationship_dict = _get_action('package_relationship_create')(context, data_dict)
return relationship_dict
def tag_create(context, data_dict):
'''Create a new vocabulary tag.
You must be a sysadmin to create vocabulary tags.
You can only use this function to create tags that belong to a vocabulary,
not to create free tags. (To create a new free tag simply add the tag to
a package, e.g. using the ``package_update`` function.)
:param name: the name for the new tag, a string between 2 and 100
characters long containing only alphanumeric characters and ``-``,
``_`` and ``.``, e.g. ``'Jazz'``
:type name: string
:param vocabulary_id: the name or id of the vocabulary that the new tag
should be added to, e.g. ``'Genre'``
:type vocabulary_id: string
:returns: the newly-created tag
:rtype: dictionary
'''
model = context['model']
_check_access('tag_create', context, data_dict)
schema = context.get('schema') or ckan.logic.schema.default_create_tag_schema()
data, errors = _validate(data_dict, schema, context)
if errors:
raise ValidationError(errors)
tag = model_save.tag_dict_save(data_dict, context)
if not context.get('defer_commit'):
model.repo.commit()
log.debug("Created tag '%s' " % tag)
return model_dictize.tag_dictize(tag, context)
def follow_user(context, data_dict):
'''Start following another user.
You must provide your API key in the Authorization header.
:param id: the id or name of the user to follow, e.g. ``'joeuser'``
:type id: string
:returns: a representation of the 'follower' relationship between yourself
and the other user
:rtype: dictionary
'''
if 'user' not in context:
raise logic.NotAuthorized(_("You must be logged in to follow users"))
model = context['model']
session = context['session']
userobj = model.User.get(context['user'])
if not userobj:
raise logic.NotAuthorized(_("You must be logged in to follow users"))
schema = (context.get('schema')
or ckan.logic.schema.default_follow_user_schema())
validated_data_dict, errors = _validate(data_dict, schema, context)
if errors:
model.Session.rollback()
raise ValidationError(errors)
# Don't let a user follow herself.
if userobj.id == validated_data_dict['id']:
message = _('You cannot follow yourself')
raise ValidationError({'message': message}, error_summary=message)
# Don't let a user follow someone she is already following.
if model.UserFollowingUser.is_following(userobj.id,
validated_data_dict['id']):
followeduserobj = model.User.get(validated_data_dict['id'])
name = followeduserobj.display_name
message = _(
'You are already following {0}').format(name)
raise ValidationError({'message': message}, error_summary=message)
follower = model_save.follower_dict_save(validated_data_dict, context,
model.UserFollowingUser)
if not context.get('defer_commit'):
model.repo.commit()
log.debug(u'User {follower} started following user {object}'.format(
follower=follower.follower_id, object=follower.object_id))
return model_dictize.user_following_user_dictize(follower, context)
def follow_dataset(context, data_dict):
'''Start following a dataset.
You must provide your API key in the Authorization header.
:param id: the id or name of the dataset to follow, e.g. ``'warandpeace'``
:type id: string
:returns: a representation of the 'follower' relationship between yourself
and the dataset
:rtype: dictionary
'''
if not context.has_key('user'):
raise logic.NotAuthorized(
_("You must be logged in to follow a dataset."))
model = context['model']
session = context['session']
userobj = model.User.get(context['user'])
if not userobj:
raise logic.NotAuthorized(
_("You must be logged in to follow a dataset."))
schema = (context.get('schema')
or ckan.logic.schema.default_follow_dataset_schema())
validated_data_dict, errors = _validate(data_dict, schema, context)
if errors:
model.Session.rollback()
raise ValidationError(errors)
# Don't let a user follow a dataset she is already following.
if model.UserFollowingDataset.is_following(userobj.id,
validated_data_dict['id']):
# FIXME really package model should have this logic and provide
# 'display_name' like users and groups
pkgobj = model.Package.get(validated_data_dict['id'])
name = pkgobj.title or pkgobj.name or pkgobj.id
message = _(
'You are already following {0}').format(name)
raise ValidationError({'message': message}, error_summary=message)
follower = model_save.follower_dict_save(validated_data_dict, context,
model.UserFollowingDataset)
if not context.get('defer_commit'):
model.repo.commit()
log.debug(u'User {follower} started following dataset {object}'.format(
follower=follower.follower_id, object=follower.object_id))
return model_dictize.user_following_dataset_dictize(follower, context)
def _group_or_org_member_create(context, data_dict, is_org=False):
# creator of group/org becomes an admin
# this needs to be after the repo.commit or else revisions break
model = context['model']
user = context['user']
session = context['session']
schema = ckan.logic.schema.member_schema()
data, errors = _validate(data_dict, schema, context)
username = _get_or_bust(data_dict, 'username')
role = data_dict.get('role')
group_id = data_dict.get('id')
group = model.Group.get(group_id)
result = model.User.get(username)
if result:
user_id = result.id
else:
message = _(u'User {username} does not exist.').format(username=username)
raise ValidationError({'message': message}, error_summary=message)
member_dict = {
'id': group.id,
'object': user_id,
'object_type': 'user',
'capacity': role,
}
member_create_context = {
'model': model,
'user': user,
'session': session
}
logic.get_action('member_create')(member_create_context, member_dict)
def group_member_create(context, data_dict):
'''Make a user a member of a group.
You must be authorized to edit the group.
:param id: the id or name of the group
:type id: string
:param username: name or id of the user to be made member of the group
:type username: string
:param role: role of the user in the group. One of ``member``, ``editor``,
or ``admin``
:type role: string
:returns: the newly created (or updated) membership
:rtype: dictionary
'''
_check_access('group_member_create', context, data_dict)
return _group_or_org_member_create(context, data_dict)
def organization_member_create(context, data_dict):
'''Make a user a member of an organization.
You must be authorized to edit the organization.
:param id: the id or name of the organization
:type id: string
:param username: name or id of the user to be made member of the
organization
:type username: string
:param role: role of the user in the organization. One of ``member``,
``editor``, or ``admin``
:type role: string
:returns: the newly created (or updated) membership
:rtype: dictionary
'''
_check_access('organization_member_create', context, data_dict)
return _group_or_org_member_create(context, data_dict, is_org=True)
def follow_group(context, data_dict):
'''Start following a group.
You must provide your API key in the Authorization header.
:param id: the id or name of the group to follow, e.g. ``'roger'``
:type id: string
:returns: a representation of the 'follower' relationship between yourself
and the group
:rtype: dictionary
'''
if 'user' not in context:
raise logic.NotAuthorized(
_("You must be logged in to follow a group."))
model = context['model']
session = context['session']
userobj = model.User.get(context['user'])
if not userobj:
raise logic.NotAuthorized(
_("You must be logged in to follow a group."))
schema = context.get('schema',
ckan.logic.schema.default_follow_group_schema())
validated_data_dict, errors = _validate(data_dict, schema, context)
if errors:
model.Session.rollback()
raise ValidationError(errors)
# Don't let a user follow a group she is already following.
if model.UserFollowingGroup.is_following(userobj.id,
validated_data_dict['id']):
groupobj = model.Group.get(validated_data_dict['id'])
name = groupobj.display_name
message = _(
'You are already following {0}').format(name)
raise ValidationError({'message': message}, error_summary=message)
follower = model_save.follower_dict_save(validated_data_dict, context,
model.UserFollowingGroup)
if not context.get('defer_commit'):
model.repo.commit()
log.debug(u'User {follower} started following group {object}'.format(
follower=follower.follower_id, object=follower.object_id))
return model_dictize.user_following_group_dictize(follower, context)
| {
"repo_name": "WilJoey/tn_ckan",
"path": "ckan/logic/action/create.py",
"copies": "1",
"size": "47148",
"license": "mit",
"hash": -320518789178124800,
"line_mean": 34.3168539326,
"line_max": 111,
"alpha_frac": 0.6464325104,
"autogenerated": false,
"ratio": 4,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.51464325104,
"avg_score": null,
"num_lines": null
} |
'''API functions for deleting data from CKAN.'''
from sqlalchemy import or_
import ckan.logic
import ckan.logic.action
import ckan.plugins as plugins
import ckan.lib.dictization.model_dictize as model_dictize
from ckan.common import _
validate = ckan.lib.navl.dictization_functions.validate
# Define some shortcuts
# Ensure they are module-private so that they don't get loaded as available
# actions in the action API.
ValidationError = ckan.logic.ValidationError
NotFound = ckan.logic.NotFound
_check_access = ckan.logic.check_access
_get_or_bust = ckan.logic.get_or_bust
_get_action = ckan.logic.get_action
def user_delete(context, data_dict):
'''Delete a user.
Only sysadmins can delete users.
:param id: the id or usernamename of the user to delete
:type id: string
'''
_check_access('user_delete', context, data_dict)
model = context['model']
user_id = _get_or_bust(data_dict, 'id')
user = model.User.get(user_id)
if user is None:
raise NotFound('User "{id}" was not found.'.format(id=user_id))
user.delete()
model.repo.commit()
def package_delete(context, data_dict):
'''Delete a dataset (package).
You must be authorized to delete the dataset.
:param id: the id or name of the dataset to delete
:type id: string
'''
model = context['model']
user = context['user']
id = _get_or_bust(data_dict, 'id')
entity = model.Package.get(id)
if entity is None:
raise NotFound
_check_access('package_delete',context, data_dict)
rev = model.repo.new_revision()
rev.author = user
rev.message = _(u'REST API: Delete Package: %s') % entity.name
for item in plugins.PluginImplementations(plugins.IPackageController):
item.delete(entity)
item.after_delete(context, data_dict)
entity.delete()
model.repo.commit()
def resource_delete(context, data_dict):
'''Delete a resource from a dataset.
You must be a sysadmin or the owner of the resource to delete it.
:param id: the id of the resource
:type id: string
'''
model = context['model']
id = _get_or_bust(data_dict, 'id')
entity = model.Resource.get(id)
if entity is None:
raise NotFound
_check_access('resource_delete',context, data_dict)
package_id = entity.get_package_id()
pkg_dict = _get_action('package_show')(context, {'id': package_id})
if pkg_dict.get('resources'):
pkg_dict['resources'] = [r for r in pkg_dict['resources'] if not
r['id'] == id]
try:
pkg_dict = _get_action('package_update')(context, pkg_dict)
except ValidationError, e:
errors = e.error_dict['resources'][-1]
raise ValidationError(errors)
model.repo.commit()
def package_relationship_delete(context, data_dict):
'''Delete a dataset (package) relationship.
You must be authorised to delete dataset relationships, and to edit both
the subject and the object datasets.
:param subject: the id or name of the dataset that is the subject of the
relationship
:type subject: string
:param object: the id or name of the dataset that is the object of the
relationship
:type object: string
:param type: the type of the relationship
:type type: string
'''
model = context['model']
user = context['user']
id, id2, rel = _get_or_bust(data_dict, ['subject', 'object', 'type'])
pkg1 = model.Package.get(id)
pkg2 = model.Package.get(id2)
if not pkg1:
raise NotFound('Subject package %r was not found.' % id)
if not pkg2:
return NotFound('Object package %r was not found.' % id2)
existing_rels = pkg1.get_relationships_with(pkg2, rel)
if not existing_rels:
raise NotFound
relationship = existing_rels[0]
revisioned_details = 'Package Relationship: %s %s %s' % (id, rel, id2)
context['relationship'] = relationship
_check_access('package_relationship_delete', context, data_dict)
rev = model.repo.new_revision()
rev.author = user
rev.message = _(u'REST API: Delete %s') % revisioned_details
relationship.delete()
model.repo.commit()
def related_delete(context, data_dict):
'''Delete a related item from a dataset.
You must be a sysadmin or the owner of the related item to delete it.
:param id: the id of the related item
:type id: string
'''
model = context['model']
session = context['session']
user = context['user']
userobj = model.User.get(user)
id = _get_or_bust(data_dict, 'id')
entity = model.Related.get(id)
if entity is None:
raise NotFound
_check_access('related_delete',context, data_dict)
related_dict = model_dictize.related_dictize(entity, context)
activity_dict = {
'user_id': userobj.id,
'object_id': entity.id,
'activity_type': 'deleted related item',
}
activity_dict['data'] = {
'related': related_dict
}
activity_create_context = {
'model': model,
'user': user,
'defer_commit': True,
'ignore_auth': True,
'session': session
}
_get_action('activity_create')(activity_create_context, activity_dict)
session.commit()
entity.delete()
model.repo.commit()
def member_delete(context, data_dict=None):
'''Remove an object (e.g. a user, dataset or group) from a group.
You must be authorized to edit a group to remove objects from it.
:param id: the id of the group
:type id: string
:param object: the id or name of the object to be removed
:type object: string
:param object_type: the type of the object to be removed, e.g. ``package``
or ``user``
:type object_type: string
'''
model = context['model']
group_id, obj_id, obj_type = _get_or_bust(data_dict, ['id', 'object', 'object_type'])
group = model.Group.get(group_id)
if not group:
raise NotFound('Group was not found.')
obj_class = ckan.logic.model_name_to_class(model, obj_type)
obj = obj_class.get(obj_id)
if not obj:
raise NotFound('%s was not found.' % obj_type.title())
_check_access('member_delete', context, data_dict)
member = model.Session.query(model.Member).\
filter(model.Member.table_name == obj_type).\
filter(model.Member.table_id == obj.id).\
filter(model.Member.group_id == group.id).\
filter(model.Member.state == 'active').first()
if member:
rev = model.repo.new_revision()
rev.author = context.get('user')
rev.message = _(u'REST API: Delete Member: %s') % obj_id
member.delete()
model.repo.commit()
def _group_or_org_delete(context, data_dict, is_org=False):
'''Delete a group.
You must be authorized to delete the group.
:param id: the name or id of the group
:type id: string
'''
model = context['model']
user = context['user']
id = _get_or_bust(data_dict, 'id')
group = model.Group.get(id)
context['group'] = group
if group is None:
raise NotFound('Group was not found.')
revisioned_details = 'Group: %s' % group.name
if is_org:
_check_access('organization_delete', context, data_dict)
else:
_check_access('group_delete', context, data_dict)
# organization delete will delete all datasets for that org
# FIXME this gets all the packages the user can see which generally will
# be all but this is only a fluke so we should fix this properly
if is_org:
for pkg in group.packages(with_private=True):
_get_action('package_delete')(context, {'id': pkg.id})
rev = model.repo.new_revision()
rev.author = user
rev.message = _(u'REST API: Delete %s') % revisioned_details
# The group's Member objects are deleted
# (including hierarchy connections to parent and children groups)
for member in model.Session.query(model.Member).\
filter(or_(model.Member.table_id == id,
model.Member.group_id == id)).\
filter(model.Member.state == 'active').all():
member.delete()
group.delete()
if is_org:
plugin_type = plugins.IOrganizationController
else:
plugin_type = plugins.IGroupController
for item in plugins.PluginImplementations(plugin_type):
item.delete(group)
model.repo.commit()
def group_delete(context, data_dict):
'''Delete a group.
You must be authorized to delete the group.
:param id: the name or id of the group
:type id: string
'''
return _group_or_org_delete(context, data_dict)
def organization_delete(context, data_dict):
'''Delete an organization.
You must be authorized to delete the organization.
:param id: the name or id of the organization
:type id: string
'''
return _group_or_org_delete(context, data_dict, is_org=True)
def _group_or_org_purge(context, data_dict, is_org=False):
'''Purge a group or organization.
The group or organization will be completely removed from the database.
This cannot be undone!
Only sysadmins can purge groups or organizations.
:param id: the name or id of the group or organization to be purged
:type id: string
:param is_org: you should pass is_org=True if purging an organization,
otherwise False (optional, default: False)
:type is_org: boolean
'''
model = context['model']
id = _get_or_bust(data_dict, 'id')
group = model.Group.get(id)
context['group'] = group
if group is None:
if is_org:
raise NotFound('Organization was not found')
else:
raise NotFound('Group was not found')
if is_org:
_check_access('organization_purge', context, data_dict)
else:
_check_access('group_purge', context, data_dict)
members = model.Session.query(model.Member)
members = members.filter(model.Member.group_id == group.id)
if members.count() > 0:
model.repo.new_revision()
for m in members.all():
m.delete()
model.repo.commit_and_remove()
group = model.Group.get(id)
model.repo.new_revision()
group.purge()
model.repo.commit_and_remove()
def group_purge(context, data_dict):
'''Purge a group.
.. warning:: Purging a group cannot be undone!
Purging a group completely removes the group from the CKAN database,
whereas deleting a group simply marks the group as deleted (it will no
longer show up in the frontend, but is still in the db).
You must be authorized to purge the group.
:param id: the name or id of the group to be purged
:type id: string
'''
return _group_or_org_purge(context, data_dict, is_org=False)
def organization_purge(context, data_dict):
'''Purge an organization.
.. warning:: Purging an organization cannot be undone!
Purging an organization completely removes the organization from the CKAN
database, whereas deleting an organization simply marks the organization as
deleted (it will no longer show up in the frontend, but is still in the
db).
You must be authorized to purge the organization.
:param id: the name or id of the organization to be purged
:type id: string
'''
return _group_or_org_purge(context, data_dict, is_org=True)
def task_status_delete(context, data_dict):
'''Delete a task status.
You must be a sysadmin to delete task statuses.
:param id: the id of the task status to delete
:type id: string
'''
model = context['model']
id = _get_or_bust(data_dict, 'id')
entity = model.TaskStatus.get(id)
if entity is None:
raise NotFound
_check_access('task_status_delete', context, data_dict)
entity.delete()
model.Session.commit()
def vocabulary_delete(context, data_dict):
'''Delete a tag vocabulary.
You must be a sysadmin to delete vocabularies.
:param id: the id of the vocabulary
:type id: string
'''
model = context['model']
vocab_id = data_dict.get('id')
if not vocab_id:
raise ValidationError({'id': _('id not in data')})
vocab_obj = model.vocabulary.Vocabulary.get(vocab_id)
if vocab_obj is None:
raise NotFound(_('Could not find vocabulary "%s"') % vocab_id)
_check_access('vocabulary_delete', context, data_dict)
vocab_obj.delete()
model.repo.commit()
def tag_delete(context, data_dict):
'''Delete a tag.
You must be a sysadmin to delete tags.
:param id: the id or name of the tag
:type id: string
:param vocabulary_id: the id or name of the vocabulary that the tag belongs
to (optional, default: None)
:type vocabulary_id: string
'''
model = context['model']
if not data_dict.has_key('id') or not data_dict['id']:
raise ValidationError({'id': _('id not in data')})
tag_id_or_name = _get_or_bust(data_dict, 'id')
vocab_id_or_name = data_dict.get('vocabulary_id')
tag_obj = model.tag.Tag.get(tag_id_or_name, vocab_id_or_name)
if tag_obj is None:
raise NotFound(_('Could not find tag "%s"') % tag_id_or_name)
_check_access('tag_delete', context, data_dict)
tag_obj.delete()
model.repo.commit()
def package_relationship_delete_rest(context, data_dict):
# rename keys
key_map = {'id': 'subject',
'id2': 'object',
'rel': 'type'}
# We want 'destructive', so that the value of the subject,
# object and rel in the URI overwrite any values for these
# in params. This is because you are not allowed to change
# these values.
data_dict = ckan.logic.action.rename_keys(data_dict, key_map, destructive=True)
package_relationship_delete(context, data_dict)
def _unfollow(context, data_dict, schema, FollowerClass):
model = context['model']
if not context.has_key('user'):
raise ckan.logic.NotAuthorized(
_("You must be logged in to unfollow something."))
userobj = model.User.get(context['user'])
if not userobj:
raise ckan.logic.NotAuthorized(
_("You must be logged in to unfollow something."))
follower_id = userobj.id
validated_data_dict, errors = validate(data_dict, schema, context)
if errors:
raise ValidationError(errors)
object_id = validated_data_dict.get('id')
follower_obj = FollowerClass.get(follower_id, object_id)
if follower_obj is None:
raise NotFound(
_('You are not following {0}.').format(data_dict.get('id')))
follower_obj.delete()
model.repo.commit()
def unfollow_user(context, data_dict):
'''Stop following a user.
:param id: the id or name of the user to stop following
:type id: string
'''
schema = context.get('schema') or (
ckan.logic.schema.default_follow_user_schema())
_unfollow(context, data_dict, schema, context['model'].UserFollowingUser)
def unfollow_dataset(context, data_dict):
'''Stop following a dataset.
:param id: the id or name of the dataset to stop following
:type id: string
'''
schema = context.get('schema') or (
ckan.logic.schema.default_follow_dataset_schema())
_unfollow(context, data_dict, schema,
context['model'].UserFollowingDataset)
def _group_or_org_member_delete(context, data_dict=None):
model = context['model']
user = context['user']
session = context['session']
group_id = data_dict.get('id')
group = model.Group.get(group_id)
user_id = data_dict.get('username')
user_id = data_dict.get('user_id') if user_id is None else user_id
member_dict = {
'id': group.id,
'object': user_id,
'object_type': 'user',
}
member_context = {
'model': model,
'user': user,
'session': session
}
_get_action('member_delete')(member_context, member_dict)
def group_member_delete(context, data_dict=None):
'''Remove a user from a group.
You must be authorized to edit the group.
:param id: the id or name of the group
:type id: string
:param username: name or id of the user to be removed
:type username: string
'''
_check_access('group_member_delete',context, data_dict)
return _group_or_org_member_delete(context, data_dict)
def organization_member_delete(context, data_dict=None):
'''Remove a user from an organization.
You must be authorized to edit the organization.
:param id: the id or name of the organization
:type id: string
:param username: name or id of the user to be removed
:type username: string
'''
_check_access('organization_member_delete',context, data_dict)
return _group_or_org_member_delete(context, data_dict)
def unfollow_group(context, data_dict):
'''Stop following a group.
:param id: the id or name of the group to stop following
:type id: string
'''
schema = context.get('schema',
ckan.logic.schema.default_follow_group_schema())
_unfollow(context, data_dict, schema,
context['model'].UserFollowingGroup)
| {
"repo_name": "WilJoey/tn_ckan",
"path": "ckan/logic/action/delete.py",
"copies": "1",
"size": "17234",
"license": "mit",
"hash": -5513542143168684000,
"line_mean": 27.5804311774,
"line_max": 89,
"alpha_frac": 0.6423349193,
"autogenerated": false,
"ratio": 3.6959039245121166,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48382388438121166,
"avg_score": null,
"num_lines": null
} |
'''API functions for searching for and getting data from CKAN.'''
import uuid
import logging
import json
import datetime
import socket
from pylons import config
import sqlalchemy
import ckan.lib.dictization
import ckan.logic as logic
import ckan.logic.action
import ckan.logic.schema
import ckan.lib.dictization.model_dictize as model_dictize
import ckan.lib.navl.dictization_functions
import ckan.model as model
import ckan.model.misc as misc
import ckan.plugins as plugins
import ckan.lib.search as search
import ckan.lib.plugins as lib_plugins
import ckan.lib.activity_streams as activity_streams
import ckan.new_authz as new_authz
import ckan.lib.munge as munge
import ckan.lib.helpers as h
from ckan.common import _
log = logging.getLogger('ckan.logic')
# Define some shortcuts
# Ensure they are module-private so that they don't get loaded as available
# actions in the action API.
_validate = ckan.lib.navl.dictization_functions.validate
_table_dictize = ckan.lib.dictization.table_dictize
_check_access = logic.check_access
NotFound = logic.NotFound
ValidationError = logic.ValidationError
_get_or_bust = logic.get_or_bust
_select = sqlalchemy.sql.select
_aliased = sqlalchemy.orm.aliased
_or_ = sqlalchemy.or_
_and_ = sqlalchemy.and_
_func = sqlalchemy.func
_desc = sqlalchemy.desc
_case = sqlalchemy.case
_text = sqlalchemy.text
def _package_list_with_resources(context, package_revision_list):
package_list = []
for package in package_revision_list:
result_dict = model_dictize.package_dictize(package,context)
package_list.append(result_dict)
return package_list
def site_read(context,data_dict=None):
'''Return ``True``.
:rtype: boolean
'''
_check_access('site_read',context,data_dict)
return True
@logic.validate(logic.schema.default_pagination_schema)
def package_list(context, data_dict):
'''Return a list of the names of the site's datasets (packages).
:param limit: if given, the list of datasets will be broken into pages of
at most ``limit`` datasets per page and only one page will be returned
at a time (optional)
:type limit: int
:param offset: when ``limit`` is given, the offset to start returning packages from
:type offset: int
:rtype: list of strings
'''
model = context["model"]
api = context.get("api_version", 1)
_check_access('package_list', context, data_dict)
package_revision_table = model.package_revision_table
col = (package_revision_table.c.id
if api == 2 else package_revision_table.c.name)
query = _select([col])
query = query.where(_and_(
package_revision_table.c.state=='active',
package_revision_table.c.current==True,
package_revision_table.c.private==False,
))
query = query.order_by(col)
limit = data_dict.get('limit')
if limit:
query = query.limit(limit)
offset = data_dict.get('offset')
if offset:
query = query.offset(offset)
## Returns the first field in each result record
return [r[0] for r in query.execute()]
@logic.validate(logic.schema.default_package_list_schema)
def current_package_list_with_resources(context, data_dict):
'''Return a list of the site's datasets (packages) and their resources.
The list is sorted most-recently-modified first.
:param limit: if given, the list of datasets will be broken into pages of
at most ``limit`` datasets per page and only one page will be returned
at a time (optional)
:type limit: int
:param offset: when ``limit`` is given, the offset to start returning packages from
:type offset: int
:param page: when ``limit`` is given, which page to return, Deprecated use ``offset``
:type page: int
:rtype: list of dictionaries
'''
model = context["model"]
limit = data_dict.get('limit')
offset = data_dict.get('offset', 0)
if not 'offset' in data_dict and 'page' in data_dict:
log.warning('"page" parameter is deprecated. '
'Use the "offset" parameter instead')
page = data_dict['page']
if limit:
offset = (page - 1) * limit
else:
offset = 0
_check_access('current_package_list_with_resources', context, data_dict)
query = model.Session.query(model.PackageRevision)
query = query.filter(model.PackageRevision.state=='active')
query = query.filter(model.PackageRevision.current==True)
query = query.order_by(model.package_revision_table.c.revision_timestamp.desc())
if limit is not None:
query = query.limit(limit)
query = query.offset(offset)
pack_rev = query.all()
return _package_list_with_resources(context, pack_rev)
def revision_list(context, data_dict):
'''Return a list of the IDs of the site's revisions.
:rtype: list of strings
'''
model = context['model']
_check_access('revision_list', context, data_dict)
revs = model.Session.query(model.Revision).all()
return [rev.id for rev in revs]
def package_revision_list(context, data_dict):
'''Return a dataset (package)'s revisions as a list of dictionaries.
:param id: the id or name of the dataset
:type id: string
'''
model = context["model"]
id = _get_or_bust(data_dict, "id")
pkg = model.Package.get(id)
if pkg is None:
raise NotFound
_check_access('package_revision_list',context, data_dict)
revision_dicts = []
for revision, object_revisions in pkg.all_related_revisions:
revision_dicts.append(model.revision_as_dict(revision,
include_packages=False,
include_groups=False))
return revision_dicts
def related_show(context, data_dict=None):
'''Return a single related item.
:param id: the id of the related item to show
:type id: string
:rtype: dictionary
'''
model = context['model']
id = _get_or_bust(data_dict, 'id')
related = model.Related.get(id)
context['related'] = related
if related is None:
raise NotFound
_check_access('related_show',context, data_dict)
schema = context.get('schema') or ckan.logic.schema.default_related_schema()
related_dict = model_dictize.related_dictize(related, context)
related_dict, errors = _validate(related_dict, schema, context=context)
return related_dict
def related_list(context, data_dict=None):
'''Return a dataset's related items.
:param id: id or name of the dataset (optional)
:type id: string
:param dataset: dataset dictionary of the dataset (optional)
:type dataset: dictionary
:param type_filter: the type of related item to show (optional,
default: None, show all items)
:type type_filter: string
:param sort: the order to sort the related items in, possible values are
'view_count_asc', 'view_count_desc', 'created_asc' or 'created_desc'
(optional)
:type sort: string
:param featured: whether or not to restrict the results to only featured
related items (optional, default: False)
:type featured: bool
:rtype: list of dictionaries
'''
model = context['model']
dataset = data_dict.get('dataset', None)
if not dataset:
dataset = model.Package.get(data_dict.get('id'))
_check_access('related_show',context, data_dict)
related_list = []
if not dataset:
related_list = model.Session.query(model.Related)
filter_on_type = data_dict.get('type_filter', None)
if filter_on_type:
related_list = related_list.filter(model.Related.type == filter_on_type)
sort = data_dict.get('sort', None)
if sort:
sortables = {
'view_count_asc' : model.Related.view_count.asc,
'view_count_desc': model.Related.view_count.desc,
'created_asc' : model.Related.created.asc,
'created_desc': model.Related.created.desc,
}
s = sortables.get(sort, None)
if s:
related_list = related_list.order_by( s() )
if data_dict.get('featured', False):
related_list = related_list.filter(model.Related.featured == 1)
related_items = related_list.all()
context['sorted'] = True
else:
relateds = model.Related.get_for_dataset(dataset, status='active')
related_items = (r.related for r in relateds)
related_list = model_dictize.related_list_dictize( related_items, context)
return related_list
def member_list(context, data_dict=None):
'''Return the members of a group.
The user must have permission to 'get' the group.
:param id: the id or name of the group
:type id: string
:param object_type: restrict the members returned to those of a given type,
e.g. ``'user'`` or ``'package'`` (optional, default: ``None``)
:type object_type: string
:param capacity: restrict the members returned to those with a given
capacity, e.g. ``'member'``, ``'editor'``, ``'admin'``, ``'public'``,
``'private'`` (optional, default: ``None``)
:type capacity: string
:rtype: list of (id, type, capacity) tuples
:raises: :class:`ckan.logic.NotFound`: if the group doesn't exist
'''
model = context['model']
group = model.Group.get(_get_or_bust(data_dict, 'id'))
if not group:
raise NotFound
obj_type = data_dict.get('object_type', None)
capacity = data_dict.get('capacity', None)
# User must be able to update the group to remove a member from it
_check_access('group_show', context, data_dict)
q = model.Session.query(model.Member).\
filter(model.Member.group_id == group.id).\
filter(model.Member.state == "active")
if obj_type:
q = q.filter(model.Member.table_name == obj_type)
if capacity:
q = q.filter(model.Member.capacity == capacity)
trans = new_authz.roles_trans()
def translated_capacity(capacity):
try:
return trans[capacity]
except KeyError:
return capacity
return [(m.table_id, m.table_name, translated_capacity(m.capacity))
for m in q.all()]
def _group_or_org_list(context, data_dict, is_org=False):
model = context['model']
user = context['user']
api = context.get('api_version')
groups = data_dict.get('groups')
ref_group_by = 'id' if api == 2 else 'name'
sort = data_dict.get('sort', 'name')
q = data_dict.get('q')
# order_by deprecated in ckan 1.8
# if it is supplied and sort isn't use order_by and raise a warning
order_by = data_dict.get('order_by', '')
if order_by:
log.warn('`order_by` deprecated please use `sort`')
if not data_dict.get('sort'):
sort = order_by
# if the sort is packages and no sort direction is supplied we want to do a
# reverse sort to maintain compatibility.
if sort.strip() == 'packages':
sort = 'packages desc'
sort_info = _unpick_search(sort,
allowed_fields=['name', 'packages'],
total=1)
all_fields = data_dict.get('all_fields', None)
query = model.Session.query(model.Group).join(model.GroupRevision)
query = query.filter(model.GroupRevision.state=='active')
query = query.filter(model.GroupRevision.current==True)
if groups:
query = query.filter(model.GroupRevision.name.in_(groups))
if q:
q = u'%{0}%'.format(q)
query = query.filter(_or_(
model.GroupRevision.name.ilike(q),
model.GroupRevision.title.ilike(q),
model.GroupRevision.description.ilike(q),
))
query = query.filter(model.GroupRevision.is_organization==is_org)
groups = query.all()
group_list = model_dictize.group_list_dictize(groups, context,
lambda x:x[sort_info[0][0]],
sort_info[0][1] == 'desc')
if not all_fields:
group_list = [group[ref_group_by] for group in group_list]
return group_list
def group_list(context, data_dict):
'''Return a list of the names of the site's groups.
:param order_by: the field to sort the list by, must be ``'name'`` or
``'packages'`` (optional, default: ``'name'``) Deprecated use sort.
:type order_by: string
:param sort: sorting of the search results. Optional. Default:
"name asc" string of field name and sort-order. The allowed fields are
'name' and 'packages'
:type sort: string
:param groups: a list of names of the groups to return, if given only
groups whose names are in this list will be returned (optional)
:type groups: list of strings
:param all_fields: return full group dictionaries instead of just names
(optional, default: ``False``)
:type all_fields: boolean
:rtype: list of strings
'''
_check_access('group_list', context, data_dict)
data_dict['type'] = 'group'
return _group_or_org_list(context, data_dict)
def organization_list(context, data_dict):
'''Return a list of the names of the site's organizations.
:param order_by: the field to sort the list by, must be ``'name'`` or
``'packages'`` (optional, default: ``'name'``) Deprecated use sort.
:type order_by: string
:param sort: sorting of the search results. Optional. Default:
"name asc" string of field name and sort-order. The allowed fields are
'name' and 'packages'
:type sort: string
:param organizations: a list of names of the groups to return, if given only
groups whose names are in this list will be returned (optional)
:type organizations: list of strings
:param all_fields: return full group dictionaries instead of just names
(optional, default: ``False``)
:type all_fields: boolean
:rtype: list of strings
'''
_check_access('organization_list', context, data_dict)
data_dict['groups'] = data_dict.pop('organizations', [])
data_dict['type'] = 'organization'
return _group_or_org_list(context, data_dict, is_org=True)
def group_list_authz(context, data_dict):
'''Return the list of groups that the user is authorized to edit.
:param available_only: remove the existing groups in the package
(optional, default: ``False``)
:type available_only: boolean
:param am_member: if True return only the groups the logged-in user is a
member of, otherwise return all groups that the user is authorized to
edit (for example, sysadmin users are authorized to edit all groups)
(optional, default: False)
:type am-member: boolean
:returns: list of dictized groups that the user is authorized to edit
:rtype: list of dicts
'''
model = context['model']
user = context['user']
available_only = data_dict.get('available_only', False)
am_member = data_dict.get('am_member', False)
_check_access('group_list_authz',context, data_dict)
sysadmin = new_authz.is_sysadmin(user)
roles = ckan.new_authz.get_roles_with_permission('manage_group')
if not roles:
return []
user_id = new_authz.get_user_id_for_username(user, allow_none=True)
if not user_id:
return []
if not sysadmin or am_member:
q = model.Session.query(model.Member) \
.filter(model.Member.table_name == 'user') \
.filter(model.Member.capacity.in_(roles)) \
.filter(model.Member.table_id == user_id)
group_ids = []
for row in q.all():
group_ids.append(row.group_id)
if not group_ids:
return []
q = model.Session.query(model.Group) \
.filter(model.Group.is_organization == False) \
.filter(model.Group.state == 'active')
if not sysadmin or am_member:
q = q.filter(model.Group.id.in_(group_ids))
groups = q.all()
if available_only:
package = context.get('package')
if package:
groups = set(groups) - set(package.get_groups())
group_list = model_dictize.group_list_dictize(groups, context)
return group_list
def organization_list_for_user(context, data_dict):
'''Return the list of organizations that the user is a member of.
:param permission: the permission the user has against the returned organizations
(optional, default: ``edit_group``)
:type permission: string
:returns: list of dictized organizations that the user is authorized to edit
:rtype: list of dicts
'''
model = context['model']
user = context['user']
_check_access('organization_list_for_user',context, data_dict)
sysadmin = new_authz.is_sysadmin(user)
orgs_q = model.Session.query(model.Group) \
.filter(model.Group.is_organization == True) \
.filter(model.Group.state == 'active')
if not sysadmin:
# for non-Sysadmins check they have the required permission
permission = data_dict.get('permission', 'edit_group')
roles = ckan.new_authz.get_roles_with_permission(permission)
if not roles:
return []
user_id = new_authz.get_user_id_for_username(user, allow_none=True)
if not user_id:
return []
q = model.Session.query(model.Member) \
.filter(model.Member.table_name == 'user') \
.filter(model.Member.capacity.in_(roles)) \
.filter(model.Member.table_id == user_id)
group_ids = []
for row in q.all():
group_ids.append(row.group_id)
if not group_ids:
return []
orgs_q = orgs_q.filter(model.Group.id.in_(group_ids))
orgs_list = model_dictize.group_list_dictize(orgs_q.all(), context)
return orgs_list
def _group_or_org_revision_list(context, data_dict):
'''Return a group's revisions.
:param id: the name or id of the group
:type id: string
:rtype: list of dictionaries
'''
model = context['model']
id = _get_or_bust(data_dict, 'id')
group = model.Group.get(id)
if group is None:
raise NotFound
revision_dicts = []
for revision, object_revisions in group.all_related_revisions:
revision_dicts.append(model.revision_as_dict(revision,
include_packages=False,
include_groups=False))
return revision_dicts
def group_revision_list(context, data_dict):
'''Return a group's revisions.
:param id: the name or id of the group
:type id: string
:rtype: list of dictionaries
'''
_check_access('group_revision_list',context, data_dict)
return _group_or_org_revision_list(context, data_dict)
def organization_revision_list(context, data_dict):
'''Return an organization's revisions.
:param id: the name or id of the organization
:type id: string
:rtype: list of dictionaries
'''
_check_access('organization_revision_list',context, data_dict)
return _group_or_org_revision_list(context, data_dict)
def license_list(context, data_dict):
'''Return the list of licenses available for datasets on the site.
:rtype: list of dictionaries
'''
model = context["model"]
_check_access('license_list',context, data_dict)
license_register = model.Package.get_license_register()
licenses = license_register.values()
licenses = [l.as_dict() for l in licenses]
return licenses
def tag_list(context, data_dict):
'''Return a list of the site's tags.
By default only free tags (tags that don't belong to a vocabulary) are
returned. If the ``vocabulary_id`` argument is given then only tags
belonging to that vocabulary will be returned instead.
:param query: a tag name query to search for, if given only tags whose
names contain this string will be returned (optional)
:type query: string
:param vocabulary_id: the id or name of a vocabulary, if give only tags
that belong to this vocabulary will be returned (optional)
:type vocabulary_id: string
:param all_fields: return full tag dictionaries instead of just names
(optional, default: ``False``)
:type all_fields: boolean
:rtype: list of dictionaries
'''
model = context['model']
vocab_id_or_name = data_dict.get('vocabulary_id')
query = data_dict.get('query') or data_dict.get('q')
if query:
query = query.strip()
all_fields = data_dict.get('all_fields', None)
_check_access('tag_list', context, data_dict)
if query:
tags, count = _tag_search(context, data_dict)
else:
tags = model.Tag.all(vocab_id_or_name)
if tags:
if all_fields:
tag_list = model_dictize.tag_list_dictize(tags, context)
else:
tag_list = [tag.name for tag in tags]
else:
tag_list = []
return tag_list
def user_list(context, data_dict):
'''Return a list of the site's user accounts.
:param q: restrict the users returned to those whose names contain a string
(optional)
:type q: string
:param order_by: which field to sort the list by (optional, default:
``'name'``)
:type order_by: string
:rtype: list of dictionaries
'''
model = context['model']
_check_access('user_list',context, data_dict)
q = data_dict.get('q','')
order_by = data_dict.get('order_by','name')
query = model.Session.query(
model.User,
model.User.name.label('name'),
model.User.fullname.label('fullname'),
model.User.about.label('about'),
model.User.about.label('email'),
model.User.created.label('created'),
_select([_func.count(model.Revision.id)], _or_(
model.Revision.author==model.User.name,
model.Revision.author==model.User.openid
)
).label('number_of_edits'),
_select([_func.count(model.UserObjectRole.id)], _and_(
model.UserObjectRole.user_id==model.User.id,
model.UserObjectRole.context=='Package',
model.UserObjectRole.role=='admin'
)
).label('number_administered_packages')
)
if q:
query = model.User.search(q, query, user_name=context.get('user'))
if order_by == 'edits':
query = query.order_by(_desc(
_select([_func.count(model.Revision.id)], _or_(
model.Revision.author==model.User.name,
model.Revision.author==model.User.openid
))
))
else:
query = query.order_by(
_case([(_or_(model.User.fullname == None, model.User.fullname == ''),
model.User.name)],
else_=model.User.fullname)
)
# Filter deleted users
query = query.filter(model.User.state != model.State.DELETED)
## hack for pagination
if context.get('return_query'):
return query
users_list = []
for user in query.all():
result_dict = model_dictize.user_dictize(user[0], context)
users_list.append(result_dict)
return users_list
def package_relationships_list(context, data_dict):
'''Return a dataset (package)'s relationships.
:param id: the id or name of the first package
:type id: string
:param id2: the id or name of the second package
:type id: string
:param rel: relationship as string see
:func:`ckan.logic.action.create.package_relationship_create()` for the
relationship types (optional)
:rtype: list of dictionaries
'''
##TODO needs to work with dictization layer
model = context['model']
api = context.get('api_version')
id = _get_or_bust(data_dict, "id")
id2 = data_dict.get("id2")
rel = data_dict.get("rel")
ref_package_by = 'id' if api == 2 else 'name'
pkg1 = model.Package.get(id)
pkg2 = None
if not pkg1:
raise NotFound('First package named in request was not found.')
if id2:
pkg2 = model.Package.get(id2)
if not pkg2:
raise NotFound('Second package named in address was not found.')
if rel == 'relationships':
rel = None
_check_access('package_relationships_list',context, data_dict)
# TODO: How to handle this object level authz?
# Currently we don't care
relationships = pkg1.get_relationships(with_package=pkg2, type=rel)
if rel and not relationships:
raise NotFound('Relationship "%s %s %s" not found.'
% (id, rel, id2))
relationship_dicts = [rel.as_dict(pkg1, ref_package_by=ref_package_by)
for rel in relationships]
return relationship_dicts
def package_show(context, data_dict):
'''Return the metadata of a dataset (package) and its resources.
:param id: the id or name of the dataset
:type id: string
:param use_default_schema: use default package schema instead of
a custom schema defined with an IDatasetForm plugin (default: False)
:type use_default_schema: bool
:rtype: dictionary
'''
f = open('/var/lib/ckan/tnod/log/joe.log','a')
f.write('package_show!')
f.write('\n')
f.close()
model = context['model']
context['session'] = model.Session
name_or_id = data_dict.get("id") or _get_or_bust(data_dict, 'name_or_id')
pkg = model.Package.get(name_or_id)
if pkg is None:
raise NotFound
context['package'] = pkg
_check_access('package_show', context, data_dict)
if data_dict.get('use_default_schema', False):
context['schema'] = ckan.logic.schema.default_show_package_schema()
package_dict = None
use_cache = (context.get('use_cache', True)
and not 'revision_id' in context
and not 'revision_date' in context)
if use_cache:
try:
search_result = search.show(name_or_id)
except (search.SearchError, socket.error):
pass
else:
use_validated_cache = 'schema' not in context
if use_validated_cache and 'validated_data_dict' in search_result:
package_dict = json.loads(search_result['validated_data_dict'])
package_dict_validated = True
else:
package_dict = json.loads(search_result['data_dict'])
package_dict_validated = False
if 'organization' in package_dict:
org = package_dict['organization']
image_url = org['image_url']
if image_url and not image_url.startswith('http'):
image_url = munge.munge_filename(image_url)
org['image_display_url'] = h.url_for_static(
'uploads/group/%s' % org.get('image_url'),
qualified=True
)
package_dict['organization'] = org
metadata_modified = pkg.metadata_modified.isoformat()
search_metadata_modified = search_result['metadata_modified']
# solr stores less precice datetime,
# truncate to 22 charactors to get good enough match
if metadata_modified[:22] != search_metadata_modified[:22]:
package_dict = None
if not package_dict:
package_dict = model_dictize.package_dictize(pkg, context)
package_dict_validated = False
# Add page-view tracking summary data to the package dict.
# If the package_dict came from the Solr cache then it will already have a
# potentially outdated tracking_summary, this will overwrite it with a
# current one.
package_dict['tracking_summary'] = model.TrackingSummary.get_for_package(
package_dict['id'])
# Add page-view tracking summary data to the package's resource dicts.
# If the package_dict came from the Solr cache then each resource dict will
# already have a potentially outdated tracking_summary, this will overwrite
# it with a current one.
for resource_dict in package_dict['resources']:
_add_tracking_summary_to_resource_dict(resource_dict, model)
if context.get('for_view'):
for item in plugins.PluginImplementations(plugins.IPackageController):
package_dict = item.before_view(package_dict)
for item in plugins.PluginImplementations(plugins.IPackageController):
item.read(pkg)
for resource_dict in package_dict['resources']:
for item in plugins.PluginImplementations(plugins.IResourceController):
resource_dict = item.before_show(resource_dict)
if not package_dict_validated:
package_plugin = lib_plugins.lookup_package_plugin(package_dict['type'])
if 'schema' in context:
schema = context['schema']
else:
schema = package_plugin.show_package_schema()
if schema and context.get('validate', True):
package_dict, errors = _validate(package_dict, schema,
context=context)
for item in plugins.PluginImplementations(plugins.IPackageController):
item.after_show(context, package_dict)
return package_dict
def _add_tracking_summary_to_resource_dict(resource_dict, model):
'''Add page-view tracking summary data to the given resource dict.
'''
tracking_summary = model.TrackingSummary.get_for_resource(
resource_dict['url'])
resource_dict['tracking_summary'] = tracking_summary
def resource_show(context, data_dict):
'''Return the metadata of a resource.
:param id: the id of the resource
:type id: string
:rtype: dictionary
'''
model = context['model']
id = _get_or_bust(data_dict, 'id')
resource = model.Resource.get(id)
context['resource'] = resource
if not resource:
raise NotFound
_check_access('resource_show', context, data_dict)
resource_dict = model_dictize.resource_dictize(resource, context)
_add_tracking_summary_to_resource_dict(resource_dict, model)
for item in plugins.PluginImplementations(plugins.IResourceController):
resource_dict = item.before_show(resource_dict)
return resource_dict
def resource_status_show(context, data_dict):
'''Return the statuses of a resource's tasks.
:param id: the id of the resource
:type id: string
:rtype: list of (status, date_done, traceback, task_status) dictionaries
'''
try:
import ckan.lib.celery_app as celery_app
except ImportError:
return {'message': 'queue is not installed on this instance'}
model = context['model']
id = _get_or_bust(data_dict, 'id')
_check_access('resource_status_show', context, data_dict)
# needs to be text query as celery tables are not in our model
q = _text("""select status, date_done, traceback, task_status.*
from task_status left join celery_taskmeta
on task_status.value = celery_taskmeta.task_id and key = 'celery_task_id'
where entity_id = :entity_id """)
result = model.Session.connection().execute(q, entity_id=id)
result_list = [_table_dictize(row, context) for row in result]
return result_list
@logic.auth_audit_exempt
def revision_show(context, data_dict):
'''Return the details of a revision.
:param id: the id of the revision
:type id: string
:rtype: dictionary
'''
model = context['model']
api = context.get('api_version')
id = _get_or_bust(data_dict, 'id')
ref_package_by = 'id' if api == 2 else 'name'
rev = model.Session.query(model.Revision).get(id)
if rev is None:
raise NotFound
rev_dict = model.revision_as_dict(rev, include_packages=True,
ref_package_by=ref_package_by)
return rev_dict
def _group_or_org_show(context, data_dict, is_org=False):
model = context['model']
id = _get_or_bust(data_dict, 'id')
group = model.Group.get(id)
context['group'] = group
include_datasets = data_dict.get('include_datasets', True)
if isinstance(include_datasets, basestring):
include_datasets = (include_datasets.lower() in ('true', '1'))
context['include_datasets'] = include_datasets
if group is None:
raise NotFound
if is_org and not group.is_organization:
raise NotFound
if not is_org and group.is_organization:
raise NotFound
if is_org:
_check_access('organization_show',context, data_dict)
else:
_check_access('group_show',context, data_dict)
group_dict = model_dictize.group_dictize(group, context)
if is_org:
plugin_type = plugins.IOrganizationController
else:
plugin_type = plugins.IGroupController
for item in plugins.PluginImplementations(plugin_type):
item.read(group)
group_plugin = lib_plugins.lookup_group_plugin(group_dict['type'])
try:
schema = group_plugin.db_to_form_schema_options({
'type':'show',
'api': 'api_version' in context,
'context': context })
except AttributeError:
schema = group_plugin.db_to_form_schema()
group_dict['num_followers'] = logic.get_action('group_follower_count')(
{'model': model, 'session': model.Session},
{'id': group_dict['id']})
if schema:
group_dict, errors = _validate(group_dict, schema, context=context)
return group_dict
def group_show(context, data_dict):
'''Return the details of a group.
:param id: the id or name of the group
:type id: string
:param include_datasets: include a list of the group's datasets
(optional, default: ``True``)
:type id: boolean
:rtype: dictionary
.. note:: Only its first 1000 datasets are returned
'''
return _group_or_org_show(context, data_dict)
def organization_show(context, data_dict):
'''Return the details of a organization.
:param id: the id or name of the organization
:type id: string
:param include_datasets: include a list of the organization's datasets
(optional, default: ``True``)
:type id: boolean
:rtype: dictionary
.. note:: Only its first 1000 datasets are returned
'''
return _group_or_org_show(context, data_dict, is_org=True)
def group_package_show(context, data_dict):
'''Return the datasets (packages) of a group.
:param id: the id or name of the group
:type id: string
:param limit: the maximum number of datasets to return (optional)
:type limit: int
:rtype: list of dictionaries
'''
model = context['model']
group_id = _get_or_bust(data_dict, 'id')
# FIXME: What if limit is not an int? Schema and validation needed.
limit = data_dict.get('limit')
group = model.Group.get(group_id)
context['group'] = group
if group is None:
raise NotFound
_check_access('group_show', context, data_dict)
result = []
for pkg_rev in group.packages(limit=limit,
return_query=context.get('return_query')):
result.append(model_dictize.package_dictize(pkg_rev, context))
return result
def tag_show(context, data_dict):
'''Return the details of a tag and all its datasets.
:param id: the name or id of the tag
:type id: string
:returns: the details of the tag, including a list of all of the tag's
datasets and their details
:rtype: dictionary
'''
model = context['model']
id = _get_or_bust(data_dict, 'id')
tag = model.Tag.get(id)
context['tag'] = tag
if tag is None:
raise NotFound
_check_access('tag_show',context, data_dict)
return model_dictize.tag_dictize(tag,context)
def user_show(context, data_dict):
'''Return a user account.
Either the ``id`` or the ``user_obj`` parameter must be given.
:param id: the id or name of the user (optional)
:type id: string
:param user_obj: the user dictionary of the user (optional)
:type user_obj: user dictionary
:rtype: dictionary
'''
model = context['model']
id = data_dict.get('id',None)
provided_user = data_dict.get('user_obj',None)
if id:
user_obj = model.User.get(id)
context['user_obj'] = user_obj
if user_obj is None:
raise NotFound
elif provided_user:
context['user_obj'] = user_obj = provided_user
else:
raise NotFound
_check_access('user_show',context, data_dict)
user_dict = model_dictize.user_dictize(user_obj,context)
if context.get('return_minimal'):
return user_dict
revisions_q = model.Session.query(model.Revision
).filter_by(author=user_obj.name)
revisions_list = []
for revision in revisions_q.limit(20).all():
revision_dict = logic.get_action('revision_show')(context,{'id':revision.id})
revision_dict['state'] = revision.state
revisions_list.append(revision_dict)
user_dict['activity'] = revisions_list
user_dict['datasets'] = []
dataset_q = model.Session.query(model.Package).join(model.PackageRole
).filter_by(user=user_obj, role=model.Role.ADMIN
).limit(300)
for dataset in dataset_q:
try:
dataset_dict = logic.get_action('package_show')(context, {'id': dataset.id})
except logic.NotAuthorized:
continue
user_dict['datasets'].append(dataset_dict)
user_dict['num_followers'] = logic.get_action('user_follower_count')(
{'model': model, 'session': model.Session},
{'id': user_dict['id']})
return user_dict
def package_show_rest(context, data_dict):
_check_access('package_show_rest',context, data_dict)
logic.get_action('package_show')(context, data_dict)
pkg = context['package']
package_dict = model_dictize.package_to_api(pkg, context)
return package_dict
def group_show_rest(context, data_dict):
_check_access('group_show_rest',context, data_dict)
logic.get_action('group_show')(context, data_dict)
group = context['group']
group_dict = model_dictize.group_to_api(group, context)
return group_dict
def tag_show_rest(context, data_dict):
_check_access('tag_show_rest',context, data_dict)
logic.get_action('tag_show')(context, data_dict)
tag = context['tag']
tag_dict = model_dictize.tag_to_api(tag, context)
return tag_dict
@logic.validate(logic.schema.default_autocomplete_schema)
def package_autocomplete(context, data_dict):
'''Return a list of datasets (packages) that match a string.
Datasets with names or titles that contain the query string will be
returned.
:param q: the string to search for
:type q: string
:param limit: the maximum number of resource formats to return (optional,
default: 10)
:type limit: int
:rtype: list of dictionaries
'''
model = context['model']
_check_access('package_autocomplete', context, data_dict)
limit = data_dict.get('limit', 10)
q = data_dict['q']
like_q = u"%s%%" % q
query = model.Session.query(model.PackageRevision)
query = query.filter(model.PackageRevision.state=='active')
query = query.filter(model.PackageRevision.current==True)
query = query.filter(_or_(model.PackageRevision.name.ilike(like_q),
model.PackageRevision.title.ilike(like_q)))
query = query.limit(limit)
q_lower = q.lower()
pkg_list = []
for package in query:
if package.name.startswith(q_lower):
match_field = 'name'
match_displayed = package.name
else:
match_field = 'title'
match_displayed = '%s (%s)' % (package.title, package.name)
result_dict = {'name':package.name, 'title':package.title,
'match_field':match_field, 'match_displayed':match_displayed}
pkg_list.append(result_dict)
return pkg_list
@logic.validate(logic.schema.default_autocomplete_schema)
def format_autocomplete(context, data_dict):
'''Return a list of resource formats whose names contain a string.
:param q: the string to search for
:type q: string
:param limit: the maximum number of resource formats to return (optional,
default: 5)
:type limit: int
:rtype: list of strings
'''
model = context['model']
session = context['session']
_check_access('format_autocomplete', context, data_dict)
q = data_dict['q']
limit = data_dict.get('limit', 5)
like_q = u'%' + q + u'%'
query = session.query(model.ResourceRevision.format,
_func.count(model.ResourceRevision.format).label('total'))\
.filter(_and_(
model.ResourceRevision.state == 'active',
model.ResourceRevision.current == True
))\
.filter(model.ResourceRevision.format.ilike(like_q))\
.group_by(model.ResourceRevision.format)\
.order_by('total DESC')\
.limit(limit)
return [resource.format.lower() for resource in query]
@logic.validate(logic.schema.default_autocomplete_schema)
def user_autocomplete(context, data_dict):
'''Return a list of user names that contain a string.
:param q: the string to search for
:type q: string
:param limit: the maximum number of user names to return (optional,
default: 20)
:type limit: int
:rtype: a list of user dictionaries each with keys ``'name'``,
``'fullname'``, and ``'id'``
'''
model = context['model']
user = context['user']
_check_access('user_autocomplete', context, data_dict)
q = data_dict['q']
limit = data_dict.get('limit', 20)
query = model.User.search(q)
query = query.filter(model.User.state != model.State.DELETED)
query = query.limit(limit)
user_list = []
for user in query.all():
result_dict = {}
for k in ['id', 'name', 'fullname']:
result_dict[k] = getattr(user,k)
user_list.append(result_dict)
return user_list
def package_search(context, data_dict):
'''
Searches for packages satisfying a given search criteria.
This action accepts solr search query parameters (details below), and
returns a dictionary of results, including dictized datasets that match
the search criteria, a search count and also facet information.
**Solr Parameters:**
For more in depth treatment of each paramter, please read the `Solr
Documentation <http://wiki.apache.org/solr/CommonQueryParameters>`_.
This action accepts a *subset* of solr's search query parameters:
:param q: the solr query. Optional. Default: `"*:*"`
:type q: string
:param fq: any filter queries to apply. Note: `+site_id:{ckan_site_id}`
is added to this string prior to the query being executed.
:type fq: string
:param sort: sorting of the search results. Optional. Default:
'relevance asc, metadata_modified desc'. As per the solr
documentation, this is a comma-separated string of field names and
sort-orderings.
:type sort: string
:param rows: the number of matching rows to return.
:type rows: int
:param start: the offset in the complete result for where the set of
returned datasets should begin.
:type start: int
:param facet: whether to enable faceted results. Default: "true".
:type facet: string
:param facet.mincount: the minimum counts for facet fields should be
included in the results.
:type facet.mincount: int
:param facet.limit: the maximum number of values the facet fields return.
A negative value means unlimited. This can be set instance-wide with
the :ref:`search.facets.limit` config option. Default is 50.
:type facet.limit: int
:param facet.field: the fields to facet upon. Default empty. If empty,
then the returned facet information is empty.
:type facet.field: list of strings
The following advanced Solr parameters are supported as well. Note that
some of these are only available on particular Solr versions. See Solr's
`dismax`_ and `edismax`_ documentation for further details on them:
``qf``, ``wt``, ``bf``, ``boost``, ``tie``, ``defType``, ``mm``
.. _dismax: http://wiki.apache.org/solr/DisMaxQParserPlugin
.. _edismax: http://wiki.apache.org/solr/ExtendedDisMax
**Results:**
The result of this action is a dict with the following keys:
:rtype: A dictionary with the following keys
:param count: the number of results found. Note, this is the total number
of results found, not the total number of results returned (which is
affected by limit and row parameters used in the input).
:type count: int
:param results: ordered list of datasets matching the query, where the
ordering defined by the sort parameter used in the query.
:type results: list of dictized datasets.
:param facets: DEPRECATED. Aggregated information about facet counts.
:type facets: DEPRECATED dict
:param search_facets: aggregated information about facet counts. The outer
dict is keyed by the facet field name (as used in the search query).
Each entry of the outer dict is itself a dict, with a "title" key, and
an "items" key. The "items" key's value is a list of dicts, each with
"count", "display_name" and "name" entries. The display_name is a
form of the name that can be used in titles.
:type search_facets: nested dict of dicts.
:param use_default_schema: use default package schema instead of
a custom schema defined with an IDatasetForm plugin (default: False)
:type use_default_schema: bool
An example result: ::
{'count': 2,
'results': [ { <snip> }, { <snip> }],
'search_facets': {u'tags': {'items': [{'count': 1,
'display_name': u'tolstoy',
'name': u'tolstoy'},
{'count': 2,
'display_name': u'russian',
'name': u'russian'}
]
}
}
}
**Limitations:**
The full solr query language is not exposed, including.
fl
The parameter that controls which fields are returned in the solr
query cannot be changed. CKAN always returns the matched datasets as
dictionary objects.
'''
# sometimes context['schema'] is None
schema = (context.get('schema') or
logic.schema.default_package_search_schema())
data_dict, errors = _validate(data_dict, schema, context)
# put the extras back into the data_dict so that the search can
# report needless parameters
data_dict.update(data_dict.get('__extras', {}))
data_dict.pop('__extras', None)
if errors:
raise ValidationError(errors)
model = context['model']
session = context['session']
_check_access('package_search', context, data_dict)
# Move ext_ params to extras and remove them from the root of the search
# params, so they don't cause and error
data_dict['extras'] = data_dict.get('extras', {})
for key in [key for key in data_dict.keys() if key.startswith('ext_')]:
data_dict['extras'][key] = data_dict.pop(key)
# check if some extension needs to modify the search params
for item in plugins.PluginImplementations(plugins.IPackageController):
data_dict = item.before_search(data_dict)
# the extension may have decided that it is not necessary to perform
# the query
abort = data_dict.get('abort_search', False)
if data_dict.get('sort') in (None, 'rank'):
data_dict['sort'] = 'score desc, metadata_modified desc'
results = []
if not abort:
data_source = 'data_dict' if data_dict.get('use_default_schema',
False) else 'validated_data_dict'
# return a list of package ids
data_dict['fl'] = 'id {0}'.format(data_source)
# If this query hasn't come from a controller that has set this flag
# then we should remove any mention of capacity from the fq and
# instead set it to only retrieve public datasets
fq = data_dict.get('fq', '')
if not context.get('ignore_capacity_check', False):
fq = ' '.join(p for p in fq.split(' ')
if not 'capacity:' in p)
data_dict['fq'] = fq + ' capacity:"public"'
# Pop these ones as Solr does not need them
extras = data_dict.pop('extras', None)
query = search.query_for(model.Package)
query.run(data_dict)
# Add them back so extensions can use them on after_search
data_dict['extras'] = extras
for package in query.results:
# get the package object
package, package_dict = package['id'], package.get(data_source)
pkg_query = session.query(model.PackageRevision)\
.filter(model.PackageRevision.id == package)\
.filter(_and_(
model.PackageRevision.state == u'active',
model.PackageRevision.current == True
))
pkg = pkg_query.first()
## if the index has got a package that is not in ckan then
## ignore it.
if not pkg:
log.warning('package %s in index but not in database' % package)
continue
## use data in search index if there
if package_dict:
## the package_dict still needs translating when being viewed
package_dict = json.loads(package_dict)
if context.get('for_view'):
for item in plugins.PluginImplementations( plugins.IPackageController):
package_dict = item.before_view(package_dict)
results.append(package_dict)
else:
results.append(model_dictize.package_dictize(pkg,context))
count = query.count
facets = query.facets
else:
count = 0
facets = {}
results = []
search_results = {
'count': count,
'facets': facets,
'results': results,
'sort': data_dict['sort']
}
# Transform facets into a more useful data structure.
restructured_facets = {}
for key, value in facets.items():
restructured_facets[key] = {
'title': key,
'items': []
}
for key_, value_ in value.items():
new_facet_dict = {}
new_facet_dict['name'] = key_
if key in ('groups', 'organization'):
group = model.Group.get(key_)
if group:
new_facet_dict['display_name'] = group.display_name
else:
new_facet_dict['display_name'] = key_
elif key == 'license_id':
license = model.Package.get_license_register().get(key_)
if license:
new_facet_dict['display_name'] = license.title
else:
new_facet_dict['display_name'] = key_
else:
new_facet_dict['display_name'] = key_
new_facet_dict['count'] = value_
restructured_facets[key]['items'].append(new_facet_dict)
search_results['search_facets'] = restructured_facets
# check if some extension needs to modify the search results
for item in plugins.PluginImplementations(plugins.IPackageController):
search_results = item.after_search(search_results,data_dict)
# After extensions have had a chance to modify the facets, sort them by
# display name.
for facet in search_results['search_facets']:
search_results['search_facets'][facet]['items'] = sorted(
search_results['search_facets'][facet]['items'],
key=lambda facet: facet['display_name'], reverse=True)
return search_results
@logic.validate(logic.schema.default_resource_search_schema)
def resource_search(context, data_dict):
'''
Searches for resources satisfying a given search criteria.
It returns a dictionary with 2 fields: ``count`` and ``results``. The
``count`` field contains the total number of Resources found without the
limit or query parameters having an effect. The ``results`` field is a
list of dictized Resource objects.
The 'query' parameter is a required field. It is a string of the form
``{field}:{term}`` or a list of strings, each of the same form. Within
each string, ``{field}`` is a field or extra field on the Resource domain
object.
If ``{field}`` is ``"hash"``, then an attempt is made to match the
`{term}` as a *prefix* of the ``Resource.hash`` field.
If ``{field}`` is an extra field, then an attempt is made to match against
the extra fields stored against the Resource.
Note: The search is limited to search against extra fields declared in
the config setting ``ckan.extra_resource_fields``.
Note: Due to a Resource's extra fields being stored as a json blob, the
match is made against the json string representation. As such, false
positives may occur:
If the search criteria is: ::
query = "field1:term1"
Then a json blob with the string representation of: ::
{"field1": "foo", "field2": "term1"}
will match the search criteria! This is a known short-coming of this
approach.
All matches are made ignoring case; and apart from the ``"hash"`` field,
a term matches if it is a substring of the field's value.
Finally, when specifying more than one search criteria, the criteria are
AND-ed together.
The ``order`` parameter is used to control the ordering of the results.
Currently only ordering one field is available, and in ascending order
only.
The ``fields`` parameter is deprecated as it is not compatible with calling
this action with a GET request to the action API.
The context may contain a flag, `search_query`, which if True will make
this action behave as if being used by the internal search api. ie - the
results will not be dictized, and SearchErrors are thrown for bad search
queries (rather than ValidationErrors).
:param query: The search criteria. See above for description.
:type query: string or list of strings of the form "{field}:{term1}"
:param fields: Deprecated
:type fields: dict of fields to search terms.
:param order_by: A field on the Resource model that orders the results.
:type order_by: string
:param offset: Apply an offset to the query.
:type offset: int
:param limit: Apply a limit to the query.
:type limit: int
:returns: A dictionary with a ``count`` field, and a ``results`` field.
:rtype: dict
'''
model = context['model']
# Allow either the `query` or `fields` parameter to be given, but not both.
# Once `fields` parameter is dropped, this can be made simpler.
# The result of all this gumpf is to populate the local `fields` variable
# with mappings from field names to list of search terms, or a single
# search-term string.
query = data_dict.get('query')
fields = data_dict.get('fields')
if query is None and fields is None:
raise ValidationError({'query': _('Missing value')})
elif query is not None and fields is not None:
raise ValidationError(
{'fields': _('Do not specify if using "query" parameter')})
elif query is not None:
if isinstance(query, basestring):
query = [query]
try:
fields = dict(pair.split(":", 1) for pair in query)
except ValueError:
raise ValidationError(
{'query': _('Must be <field>:<value> pair(s)')})
else:
log.warning('Use of the "fields" parameter in resource_search is '
'deprecated. Use the "query" parameter instead')
# The legacy fields paramter splits string terms.
# So maintain that behaviour
split_terms = {}
for field, terms in fields.items():
if isinstance(terms, basestring):
terms = terms.split()
split_terms[field] = terms
fields = split_terms
order_by = data_dict.get('order_by')
offset = data_dict.get('offset')
limit = data_dict.get('limit')
q = model.Session.query(model.Resource).join(model.ResourceGroup).join(model.Package)
q = q.filter(model.Package.state == 'active')
q = q.filter(model.Package.private == False)
q = q.filter(model.Resource.state == 'active')
resource_fields = model.Resource.get_columns()
for field, terms in fields.items():
if isinstance(terms, basestring):
terms = [terms]
if field not in resource_fields:
msg = _('Field "{field}" not recognised in resource_search.')\
.format(field=field)
# Running in the context of the internal search api.
if context.get('search_query', False):
raise search.SearchError(msg)
# Otherwise, assume we're in the context of an external api
# and need to provide meaningful external error messages.
raise ValidationError({'query': msg})
for term in terms:
# prevent pattern injection
term = misc.escape_sql_like_special_characters(term)
model_attr = getattr(model.Resource, field)
# Treat the has field separately, see docstring.
if field == 'hash':
q = q.filter(model_attr.ilike(unicode(term) + '%'))
# Resource extras are stored in a json blob. So searching for
# matching fields is a bit trickier. See the docstring.
elif field in model.Resource.get_extra_columns():
model_attr = getattr(model.Resource, 'extras')
like = _or_(
model_attr.ilike(u'''%%"%s": "%%%s%%",%%''' % (field, term)),
model_attr.ilike(u'''%%"%s": "%%%s%%"}''' % (field, term))
)
q = q.filter(like)
# Just a regular field
else:
q = q.filter(model_attr.ilike('%' + unicode(term) + '%'))
if order_by is not None:
if hasattr(model.Resource, order_by):
q = q.order_by(getattr(model.Resource, order_by))
count = q.count()
q = q.offset(offset)
q = q.limit(limit)
results = []
for result in q:
if isinstance(result, tuple) and isinstance(result[0], model.DomainObject):
# This is the case for order_by rank due to the add_column.
results.append(result[0])
else:
results.append(result)
# If run in the context of a search query, then don't dictize the results.
if not context.get('search_query', False):
results = model_dictize.resource_list_dictize(results, context)
return {'count': count,
'results': results}
def _tag_search(context, data_dict):
model = context['model']
terms = data_dict.get('query') or data_dict.get('q') or []
if isinstance(terms, basestring):
terms = [terms]
terms = [ t.strip() for t in terms if t.strip() ]
if 'fields' in data_dict:
log.warning('"fields" parameter is deprecated. '
'Use the "query" parameter instead')
fields = data_dict.get('fields', {})
offset = data_dict.get('offset')
limit = data_dict.get('limit')
# TODO: should we check for user authentication first?
q = model.Session.query(model.Tag)
if 'vocabulary_id' in data_dict:
# Filter by vocabulary.
vocab = model.Vocabulary.get(_get_or_bust(data_dict, 'vocabulary_id'))
if not vocab:
raise NotFound
q = q.filter(model.Tag.vocabulary_id == vocab.id)
else:
# If no vocabulary_name in data dict then show free tags only.
q = q.filter(model.Tag.vocabulary_id == None)
# If we're searching free tags, limit results to tags that are
# currently applied to a package.
q = q.distinct().join(model.Tag.package_tags)
for field, value in fields.items():
if field in ('tag', 'tags'):
terms.append(value)
if not len(terms):
return [], 0
for term in terms:
escaped_term = misc.escape_sql_like_special_characters(term, escape='\\')
q = q.filter(model.Tag.name.ilike('%' + escaped_term + '%'))
count = q.count()
q = q.offset(offset)
q = q.limit(limit)
return q.all(), count
def tag_search(context, data_dict):
'''Return a list of tags whose names contain a given string.
By default only free tags (tags that don't belong to any vocabulary) are
searched. If the ``vocabulary_id`` argument is given then only tags
belonging to that vocabulary will be searched instead.
:param query: the string(s) to search for
:type query: string or list of strings
:param vocabulary_id: the id or name of the tag vocabulary to search in
(optional)
:type vocabulary_id: string
:param fields: deprecated
:type fields: dictionary
:param limit: the maximum number of tags to return
:type limit: int
:param offset: when ``limit`` is given, the offset to start returning tags
from
:type offset: int
:returns: A dictionary with the following keys:
``'count'``
The number of tags in the result.
``'results'``
The list of tags whose names contain the given string, a list of
dictionaries.
:rtype: dictionary
'''
tags, count = _tag_search(context, data_dict)
return {'count': count,
'results': [_table_dictize(tag, context) for tag in tags]}
def tag_autocomplete(context, data_dict):
'''Return a list of tag names that contain a given string.
By default only free tags (tags that don't belong to any vocabulary) are
searched. If the ``vocabulary_id`` argument is given then only tags
belonging to that vocabulary will be searched instead.
:param query: the string to search for
:type query: string
:param vocabulary_id: the id or name of the tag vocabulary to search in
(optional)
:type vocabulary_id: string
:param fields: deprecated
:type fields: dictionary
:param limit: the maximum number of tags to return
:type limit: int
:param offset: when ``limit`` is given, the offset to start returning tags
from
:type offset: int
:rtype: list of strings
'''
_check_access('tag_autocomplete', context, data_dict)
matching_tags, count = _tag_search(context, data_dict)
if matching_tags:
return [tag.name for tag in matching_tags]
else:
return []
def task_status_show(context, data_dict):
'''Return a task status.
Either the ``id`` parameter *or* the ``entity_id``, ``task_type`` *and*
``key`` parameters must be given.
:param id: the id of the task status (optional)
:type id: string
:param entity_id: the entity_id of the task status (optional)
:type entity_id: string
:param task_type: the task_type of the task status (optional)
:type tast_type: string
:param key: the key of the task status (optional)
:type key: string
:rtype: dictionary
'''
model = context['model']
id = data_dict.get('id')
if id:
task_status = model.TaskStatus.get(id)
else:
query = model.Session.query(model.TaskStatus)\
.filter(_and_(
model.TaskStatus.entity_id == _get_or_bust(data_dict, 'entity_id'),
model.TaskStatus.task_type == _get_or_bust(data_dict, 'task_type'),
model.TaskStatus.key == _get_or_bust(data_dict, 'key')
))
task_status = query.first()
context['task_status'] = task_status
_check_access('task_status_show', context, data_dict)
if task_status is None:
raise NotFound
task_status_dict = model_dictize.task_status_dictize(task_status, context)
return task_status_dict
def term_translation_show(context, data_dict):
'''Return the translations for the given term(s) and language(s).
:param terms: the terms to search for translations of, e.g. ``'Russian'``,
``'romantic novel'``
:type terms: list of strings
:param lang_codes: the language codes of the languages to search for
translations into, e.g. ``'en'``, ``'de'`` (optional, default is to
search for translations into any language)
:type lang_codes: list of language code strings
:rtype: a list of term translation dictionaries each with keys ``'term'``
(the term searched for, in the source language), ``'term_translation'``
(the translation of the term into the target language) and
``'lang_code'`` (the language code of the target language)
'''
model = context['model']
trans_table = model.term_translation_table
q = _select([trans_table])
if 'terms' not in data_dict:
raise ValidationError({'terms': 'terms not in data'})
# This action accepts `terms` as either a list of strings, or a single
# string.
terms = _get_or_bust(data_dict, 'terms')
if isinstance(terms, basestring):
terms = [terms]
if terms:
q = q.where(trans_table.c.term.in_(terms))
# This action accepts `lang_codes` as either a list of strings, or a single
# string.
if 'lang_codes' in data_dict:
lang_codes = _get_or_bust(data_dict, 'lang_codes')
if isinstance(lang_codes, basestring):
lang_codes = [lang_codes]
q = q.where(trans_table.c.lang_code.in_(lang_codes))
conn = model.Session.connection()
cursor = conn.execute(q)
results = []
for row in cursor:
results.append(_table_dictize(row, context))
return results
# Only internal services are allowed to call get_site_user.
def get_site_user(context, data_dict):
_check_access('get_site_user', context, data_dict)
model = context['model']
site_id = config.get('ckan.site_id', 'ckan_site_user')
user = model.User.get(site_id)
if not user:
apikey = str(uuid.uuid4())
user = model.User(name=site_id,
password=apikey,
apikey=apikey)
# make sysadmin
user.sysadmin = True
model.Session.add(user)
model.Session.flush()
if not context.get('defer_commit'):
model.repo.commit_and_remove()
return {'name': user.name,
'apikey': user.apikey}
def roles_show(context, data_dict):
'''Return the roles of all users and authorization groups for an object.
:param domain_object: a package or group name or id
to filter the results by
:type domain_object: string
:param user: a user name or id
:type user: string
:rtype: list of dictionaries
'''
model = context['model']
session = context['session']
domain_object_ref = _get_or_bust(data_dict, 'domain_object')
user_ref = data_dict.get('user')
domain_object = ckan.logic.action.get_domain_object(model, domain_object_ref)
if isinstance(domain_object, model.Package):
query = session.query(model.PackageRole).join('package')
elif isinstance(domain_object, model.Group):
query = session.query(model.GroupRole).join('group')
elif domain_object is model.System:
query = session.query(model.SystemRole)
else:
raise NotFound(_('Cannot list entity of this type: %s') % type(domain_object).__name__)
# Filter by the domain_obj (apart from if it is the system object)
if not isinstance(domain_object, type):
query = query.filter_by(id=domain_object.id)
# Filter by the user
if user_ref:
user = model.User.get(user_ref)
if not user:
raise NotFound(_('unknown user:') + repr(user_ref))
query = query.join('user').filter_by(id=user.id)
uors = query.all()
uors_dictized = [_table_dictize(uor, context) for uor in uors]
result = {'domain_object_type': type(domain_object).__name__,
'domain_object_id': domain_object.id if domain_object != model.System else None,
'roles': uors_dictized}
if user_ref:
result['user'] = user.id
return result
def status_show(context, data_dict):
'''Return a dictionary with information about the site's configuration.
:rtype: dictionary
'''
return {
'site_title': config.get('ckan.site_title'),
'site_description': config.get('ckan.site_description'),
'site_url': config.get('ckan.site_url'),
'ckan_version': ckan.__version__,
'error_emails_to': config.get('email_to'),
'locale_default': config.get('ckan.locale_default'),
'extensions': config.get('ckan.plugins').split(),
}
def vocabulary_list(context, data_dict):
'''Return a list of all the site's tag vocabularies.
:rtype: list of dictionaries
'''
model = context['model']
vocabulary_objects = model.Session.query(model.Vocabulary).all()
return model_dictize.vocabulary_list_dictize(vocabulary_objects, context)
def vocabulary_show(context, data_dict):
'''Return a single tag vocabulary.
:param id: the id or name of the vocabulary
:type id: string
:return: the vocabulary.
:rtype: dictionary
'''
model = context['model']
vocab_id = data_dict.get('id')
if not vocab_id:
raise ValidationError({'id': _('id not in data')})
vocabulary = model.vocabulary.Vocabulary.get(vocab_id)
if vocabulary is None:
raise NotFound(_('Could not find vocabulary "%s"') % vocab_id)
vocabulary_dict = model_dictize.vocabulary_dictize(vocabulary, context)
return vocabulary_dict
@logic.validate(logic.schema.default_activity_list_schema)
def user_activity_list(context, data_dict):
'''Return a user's public activity stream.
You must be authorized to view the user's profile.
:param id: the id or name of the user
:type id: string
:param offset: where to start getting activity items from
(optional, default: 0)
:type offset: int
:param limit: the maximum number of activities to return
(optional, default: 31, the default value is configurable via the
ckan.activity_list_limit setting)
:type limit: int
:rtype: list of dictionaries
'''
# FIXME: Filter out activities whose subject or object the user is not
# authorized to read.
_check_access('user_show', context, data_dict)
model = context['model']
user_ref = data_dict.get('id') # May be user name or id.
user = model.User.get(user_ref)
if user is None:
raise logic.NotFound
offset = data_dict.get('offset', 0)
limit = int(
data_dict.get('limit', config.get('ckan.activity_list_limit', 31)))
activity_objects = model.activity.user_activity_list(user.id, limit=limit,
offset=offset)
return model_dictize.activity_list_dictize(activity_objects, context)
@logic.validate(logic.schema.default_activity_list_schema)
def package_activity_list(context, data_dict):
'''Return a package's activity stream.
You must be authorized to view the package.
:param id: the id or name of the package
:type id: string
:param offset: where to start getting activity items from
(optional, default: 0)
:type offset: int
:param limit: the maximum number of activities to return
(optional, default: 31, the default value is configurable via the
ckan.activity_list_limit setting)
:type limit: int
:rtype: list of dictionaries
'''
# FIXME: Filter out activities whose subject or object the user is not
# authorized to read.
_check_access('package_show', context, data_dict)
model = context['model']
package_ref = data_dict.get('id') # May be name or ID.
package = model.Package.get(package_ref)
if package is None:
raise logic.NotFound
offset = int(data_dict.get('offset', 0))
limit = int(
data_dict.get('limit', config.get('ckan.activity_list_limit', 31)))
activity_objects = model.activity.package_activity_list(package.id,
limit=limit, offset=offset)
return model_dictize.activity_list_dictize(activity_objects, context)
@logic.validate(logic.schema.default_activity_list_schema)
def group_activity_list(context, data_dict):
'''Return a group's activity stream.
You must be authorized to view the group.
:param id: the id or name of the group
:type id: string
:param offset: where to start getting activity items from
(optional, default: 0)
:type offset: int
:param limit: the maximum number of activities to return
(optional, default: 31, the default value is configurable via the
ckan.activity_list_limit setting)
:type limit: int
:rtype: list of dictionaries
'''
# FIXME: Filter out activities whose subject or object the user is not
# authorized to read.
_check_access('group_show', context, data_dict)
model = context['model']
group_id = data_dict.get('id')
offset = data_dict.get('offset', 0)
limit = int(
data_dict.get('limit', config.get('ckan.activity_list_limit', 31)))
# Convert group_id (could be id or name) into id.
group_show = logic.get_action('group_show')
group_id = group_show(context, {'id': group_id})['id']
activity_objects = model.activity.group_activity_list(group_id,
limit=limit, offset=offset)
return model_dictize.activity_list_dictize(activity_objects, context)
@logic.validate(logic.schema.default_activity_list_schema)
def organization_activity_list(context, data_dict):
'''Return a organization's activity stream.
:param id: the id or name of the organization
:type id: string
:rtype: list of dictionaries
'''
# FIXME: Filter out activities whose subject or object the user is not
# authorized to read.
_check_access('organization_show', context, data_dict)
model = context['model']
org_id = data_dict.get('id')
offset = data_dict.get('offset', 0)
limit = int(
data_dict.get('limit', config.get('ckan.activity_list_limit', 31)))
# Convert org_id (could be id or name) into id.
org_show = logic.get_action('organization_show')
org_id = org_show(context, {'id': org_id})['id']
activity_objects = model.activity.group_activity_list(org_id,
limit=limit, offset=offset)
return model_dictize.activity_list_dictize(activity_objects, context)
@logic.validate(logic.schema.default_pagination_schema)
def recently_changed_packages_activity_list(context, data_dict):
'''Return the activity stream of all recently added or changed packages.
:param offset: where to start getting activity items from
(optional, default: 0)
:type offset: int
:param limit: the maximum number of activities to return
(optional, default: 31, the default value is configurable via the
ckan.activity_list_limit setting)
:type limit: int
:rtype: list of dictionaries
'''
# FIXME: Filter out activities whose subject or object the user is not
# authorized to read.
model = context['model']
offset = data_dict.get('offset', 0)
limit = int(
data_dict.get('limit', config.get('ckan.activity_list_limit', 31)))
activity_objects = model.activity.recently_changed_packages_activity_list(
limit=limit, offset=offset)
return model_dictize.activity_list_dictize(activity_objects, context)
def activity_detail_list(context, data_dict):
'''Return an activity's list of activity detail items.
:param id: the id of the activity
:type id: string
:rtype: list of dictionaries.
'''
# FIXME: Filter out activities whose subject or object the user is not
# authorized to read.
model = context['model']
activity_id = _get_or_bust(data_dict, 'id')
activity_detail_objects = model.ActivityDetail.by_activity_id(activity_id)
return model_dictize.activity_detail_list_dictize(activity_detail_objects, context)
def user_activity_list_html(context, data_dict):
'''Return a user's public activity stream as HTML.
The activity stream is rendered as a snippet of HTML meant to be included
in an HTML page, i.e. it doesn't have any HTML header or footer.
:param id: The id or name of the user.
:type id: string
:param offset: where to start getting activity items from
(optional, default: 0)
:type offset: int
:param limit: the maximum number of activities to return
(optional, default: 31, the default value is configurable via the
ckan.activity_list_limit setting)
:type limit: int
:rtype: string
'''
activity_stream = user_activity_list(context, data_dict)
offset = int(data_dict.get('offset', 0))
extra_vars = {
'controller': 'user',
'action': 'activity',
'id': data_dict['id'],
'offset': offset,
}
return activity_streams.activity_list_to_html(context, activity_stream,
extra_vars)
def package_activity_list_html(context, data_dict):
'''Return a package's activity stream as HTML.
The activity stream is rendered as a snippet of HTML meant to be included
in an HTML page, i.e. it doesn't have any HTML header or footer.
:param id: the id or name of the package
:type id: string
:param offset: where to start getting activity items from
(optional, default: 0)
:type offset: int
:param limit: the maximum number of activities to return
(optional, default: 31, the default value is configurable via the
ckan.activity_list_limit setting)
:type limit: int
:rtype: string
'''
activity_stream = package_activity_list(context, data_dict)
offset = int(data_dict.get('offset', 0))
extra_vars = {
'controller': 'package',
'action': 'activity',
'id': data_dict['id'],
'offset': offset,
}
return activity_streams.activity_list_to_html(context, activity_stream,
extra_vars)
def group_activity_list_html(context, data_dict):
'''Return a group's activity stream as HTML.
The activity stream is rendered as a snippet of HTML meant to be included
in an HTML page, i.e. it doesn't have any HTML header or footer.
:param id: the id or name of the group
:type id: string
:param offset: where to start getting activity items from
(optional, default: 0)
:type offset: int
:param limit: the maximum number of activities to return
(optional, default: 31, the default value is configurable via the
ckan.activity_list_limit setting)
:type limit: int
:rtype: string
'''
activity_stream = group_activity_list(context, data_dict)
offset = int(data_dict.get('offset', 0))
extra_vars = {
'controller': 'group',
'action': 'activity',
'id': data_dict['id'],
'offset': offset,
}
return activity_streams.activity_list_to_html(context, activity_stream,
extra_vars)
def organization_activity_list_html(context, data_dict):
'''Return a organization's activity stream as HTML.
The activity stream is rendered as a snippet of HTML meant to be included
in an HTML page, i.e. it doesn't have any HTML header or footer.
:param id: the id or name of the organization
:type id: string
:rtype: string
'''
activity_stream = organization_activity_list(context, data_dict)
offset = int(data_dict.get('offset', 0))
extra_vars = {
'controller': 'organization',
'action': 'activity',
'id': data_dict['id'],
'offset': offset,
}
return activity_streams.activity_list_to_html(context, activity_stream,
extra_vars)
def recently_changed_packages_activity_list_html(context, data_dict):
'''Return the activity stream of all recently changed packages as HTML.
The activity stream includes all recently added or changed packages. It is
rendered as a snippet of HTML meant to be included in an HTML page, i.e. it
doesn't have any HTML header or footer.
:param offset: where to start getting activity items from
(optional, default: 0)
:type offset: int
:param limit: the maximum number of activities to return
(optional, default: 31, the default value is configurable via the
ckan.activity_list_limit setting)
:type limit: int
:rtype: string
'''
activity_stream = recently_changed_packages_activity_list(context,
data_dict)
offset = int(data_dict.get('offset', 0))
extra_vars = {
'controller': 'package',
'action': 'activity',
'offset': offset,
}
return activity_streams.activity_list_to_html(context, activity_stream,
extra_vars)
def _follower_count(context, data_dict, default_schema, ModelClass):
schema = context.get('schema', default_schema)
data_dict, errors = _validate(data_dict, schema, context)
if errors:
raise ValidationError(errors)
return ModelClass.follower_count(data_dict['id'])
def user_follower_count(context, data_dict):
'''Return the number of followers of a user.
:param id: the id or name of the user
:type id: string
:rtype: int
'''
return _follower_count(context, data_dict,
ckan.logic.schema.default_follow_user_schema(),
context['model'].UserFollowingUser)
def dataset_follower_count(context, data_dict):
'''Return the number of followers of a dataset.
:param id: the id or name of the dataset
:type id: string
:rtype: int
'''
return _follower_count(context, data_dict,
ckan.logic.schema.default_follow_dataset_schema(),
context['model'].UserFollowingDataset)
def group_follower_count(context, data_dict):
'''Return the number of followers of a group.
:param id: the id or name of the group
:type id: string
:rtype: int
'''
return _follower_count(context, data_dict,
ckan.logic.schema.default_follow_group_schema(),
context['model'].UserFollowingGroup)
def _follower_list(context, data_dict, default_schema, FollowerClass):
schema = context.get('schema', default_schema)
data_dict, errors = _validate(data_dict, schema, context)
if errors:
raise ValidationError(errors)
# Get the list of Follower objects.
model = context['model']
object_id = data_dict.get('id')
followers = FollowerClass.follower_list(object_id)
# Convert the list of Follower objects to a list of User objects.
users = [model.User.get(follower.follower_id) for follower in followers]
users = [user for user in users if user is not None]
# Dictize the list of User objects.
return model_dictize.user_list_dictize(users, context)
def user_follower_list(context, data_dict):
'''Return the list of users that are following the given user.
:param id: the id or name of the user
:type id: string
:rtype: list of dictionaries
'''
_check_access('user_follower_list', context, data_dict)
return _follower_list(context, data_dict,
ckan.logic.schema.default_follow_user_schema(),
context['model'].UserFollowingUser)
def dataset_follower_list(context, data_dict):
'''Return the list of users that are following the given dataset.
:param id: the id or name of the dataset
:type id: string
:rtype: list of dictionaries
'''
_check_access('dataset_follower_list', context, data_dict)
return _follower_list(context, data_dict,
ckan.logic.schema.default_follow_dataset_schema(),
context['model'].UserFollowingDataset)
def group_follower_list(context, data_dict):
'''Return the list of users that are following the given group.
:param id: the id or name of the group
:type id: string
:rtype: list of dictionaries
'''
_check_access('group_follower_list', context, data_dict)
return _follower_list(context, data_dict,
ckan.logic.schema.default_follow_group_schema(),
context['model'].UserFollowingGroup)
def _am_following(context, data_dict, default_schema, FollowerClass):
schema = context.get('schema', default_schema)
data_dict, errors = _validate(data_dict, schema, context)
if errors:
raise ValidationError(errors)
if 'user' not in context:
raise logic.NotAuthorized
model = context['model']
userobj = model.User.get(context['user'])
if not userobj:
raise logic.NotAuthorized
object_id = data_dict.get('id')
return FollowerClass.is_following(userobj.id, object_id)
def am_following_user(context, data_dict):
'''Return ``True`` if you're following the given user, ``False`` if not.
:param id: the id or name of the user
:type id: string
:rtype: boolean
'''
return _am_following(context, data_dict,
ckan.logic.schema.default_follow_user_schema(),
context['model'].UserFollowingUser)
def am_following_dataset(context, data_dict):
'''Return ``True`` if you're following the given dataset, ``False`` if not.
:param id: the id or name of the dataset
:type id: string
:rtype: boolean
'''
return _am_following(context, data_dict,
ckan.logic.schema.default_follow_dataset_schema(),
context['model'].UserFollowingDataset)
def am_following_group(context, data_dict):
'''Return ``True`` if you're following the given group, ``False`` if not.
:param id: the id or name of the group
:type id: string
:rtype: boolean
'''
return _am_following(context, data_dict,
ckan.logic.schema.default_follow_group_schema(),
context['model'].UserFollowingGroup)
def _followee_count(context, data_dict, FollowerClass):
if not context.get('skip_validation'):
schema = context.get('schema',
ckan.logic.schema.default_follow_user_schema())
data_dict, errors = _validate(data_dict, schema, context)
if errors:
raise ValidationError(errors)
return FollowerClass.followee_count(data_dict['id'])
def followee_count(context, data_dict):
'''Return the number of objects that are followed by the given user.
Counts all objects, of any type, that the given user is following
(e.g. followed users, followed datasets, followed groups).
:param id: the id of the user
:type id: string
:rtype: int
'''
model = context['model']
followee_users = _followee_count(context, data_dict,
model.UserFollowingUser)
# followee_users has validated data_dict so the following functions don't
# need to validate it again.
context['skip_validation'] = True
followee_datasets = _followee_count(context, data_dict,
model.UserFollowingDataset)
followee_groups = _followee_count(context, data_dict,
model.UserFollowingGroup)
return sum((followee_users, followee_datasets, followee_groups))
def user_followee_count(context, data_dict):
'''Return the number of users that are followed by the given user.
:param id: the id of the user
:type id: string
:rtype: int
'''
return _followee_count(context, data_dict,
context['model'].UserFollowingUser)
def dataset_followee_count(context, data_dict):
'''Return the number of datasets that are followed by the given user.
:param id: the id of the user
:type id: string
:rtype: int
'''
return _followee_count(context, data_dict,
context['model'].UserFollowingDataset)
def group_followee_count(context, data_dict):
'''Return the number of groups that are followed by the given user.
:param id: the id of the user
:type id: string
:rtype: int
'''
return _followee_count(context, data_dict,
context['model'].UserFollowingGroup)
@logic.validate(logic.schema.default_follow_user_schema)
def followee_list(context, data_dict):
'''Return the list of objects that are followed by the given user.
Returns all objects, of any type, that the given user is following
(e.g. followed users, followed datasets, followed groups.. ).
:param id: the id of the user
:type id: string
:param q: a query string to limit results by, only objects whose display
name begins with the given string (case-insensitive) wil be returned
(optional)
:type q: string
:rtype: list of dictionaries, each with keys 'type' (e.g. 'user',
'dataset' or 'group'), 'display_name' (e.g. a user's display name,
or a package's title) and 'dict' (e.g. a dict representing the
followed user, package or group, the same as the dict that would be
returned by user_show, package_show or group_show)
'''
_check_access('followee_list', context, data_dict)
def display_name(followee):
'''Return a display name for the given user, group or dataset dict.'''
display_name = followee.get('display_name')
fullname = followee.get('fullname')
title = followee.get('title')
name = followee.get('name')
return display_name or fullname or title or name
# Get the followed objects.
# TODO: Catch exceptions raised by these *_followee_list() functions?
# FIXME should we be changing the context like this it seems dangerous
followee_dicts = []
context['skip_validation'] = True
context['ignore_auth'] = True
for followee_list_function, followee_type in (
(user_followee_list, 'user'),
(dataset_followee_list, 'dataset'),
(group_followee_list, 'group')):
dicts = followee_list_function(context, data_dict)
for d in dicts:
followee_dicts.append(
{'type': followee_type,
'display_name': display_name(d),
'dict': d})
followee_dicts.sort(key=lambda d: d['display_name'])
q = data_dict.get('q')
if q:
q = q.strip().lower()
matching_followee_dicts = []
for followee_dict in followee_dicts:
if followee_dict['display_name'].strip().lower().startswith(q):
matching_followee_dicts.append(followee_dict)
followee_dicts = matching_followee_dicts
return followee_dicts
def user_followee_list(context, data_dict):
'''Return the list of users that are followed by the given user.
:param id: the id of the user
:type id: string
:rtype: list of dictionaries
'''
_check_access('user_followee_list', context, data_dict)
if not context.get('skip_validation'):
schema = context.get('schema') or (
ckan.logic.schema.default_follow_user_schema())
data_dict, errors = _validate(data_dict, schema, context)
if errors:
raise ValidationError(errors)
# Get the list of Follower objects.
model = context['model']
user_id = _get_or_bust(data_dict, 'id')
followees = model.UserFollowingUser.followee_list(user_id)
# Convert the list of Follower objects to a list of User objects.
users = [model.User.get(followee.object_id) for followee in followees]
users = [user for user in users if user is not None]
# Dictize the list of User objects.
return model_dictize.user_list_dictize(users, context)
def dataset_followee_list(context, data_dict):
'''Return the list of datasets that are followed by the given user.
:param id: the id or name of the user
:type id: string
:rtype: list of dictionaries
'''
_check_access('dataset_followee_list', context, data_dict)
if not context.get('skip_validation'):
schema = context.get('schema') or (
ckan.logic.schema.default_follow_user_schema())
data_dict, errors = _validate(data_dict, schema, context)
if errors:
raise ValidationError(errors)
# Get the list of Follower objects.
model = context['model']
user_id = _get_or_bust(data_dict, 'id')
followees = model.UserFollowingDataset.followee_list(user_id)
# Convert the list of Follower objects to a list of Package objects.
datasets = [model.Package.get(followee.object_id) for followee in followees]
datasets = [dataset for dataset in datasets if dataset is not None]
# Dictize the list of Package objects.
return [model_dictize.package_dictize(dataset, context) for dataset in datasets]
def group_followee_list(context, data_dict):
'''Return the list of groups that are followed by the given user.
:param id: the id or name of the user
:type id: string
:rtype: list of dictionaries
'''
_check_access('group_followee_list', context, data_dict)
if not context.get('skip_validation'):
schema = context.get('schema',
ckan.logic.schema.default_follow_user_schema())
data_dict, errors = _validate(data_dict, schema, context)
if errors:
raise ValidationError(errors)
# Get the list of UserFollowingGroup objects.
model = context['model']
user_id = _get_or_bust(data_dict, 'id')
followees = model.UserFollowingGroup.followee_list(user_id)
# Convert the UserFollowingGroup objects to a list of Group objects.
groups = [model.Group.get(followee.object_id) for followee in followees]
groups = [group for group in groups if group is not None]
# Dictize the list of Group objects.
return [model_dictize.group_dictize(group, context) for group in groups]
@logic.validate(logic.schema.default_pagination_schema)
def dashboard_activity_list(context, data_dict):
'''Return the authorized user's dashboard activity stream.
Unlike the activity dictionaries returned by other ``*_activity_list``
actions, these activity dictionaries have an extra boolean value with key
``is_new`` that tells you whether the activity happened since the user last
viewed her dashboard (``'is_new': True``) or not (``'is_new': False``).
The user's own activities are always marked ``'is_new': False``.
:param offset: where to start getting activity items from
(optional, default: 0)
:type offset: int
:param limit: the maximum number of activities to return
(optional, default: 31, the default value is configurable via the
``ckan.activity_list_limit`` setting)
:rtype: list of activity dictionaries
'''
_check_access('dashboard_activity_list', context, data_dict)
model = context['model']
user_id = model.User.get(context['user']).id
offset = data_dict.get('offset', 0)
limit = int(
data_dict.get('limit', config.get('ckan.activity_list_limit', 31)))
# FIXME: Filter out activities whose subject or object the user is not
# authorized to read.
activity_objects = model.activity.dashboard_activity_list(user_id,
limit=limit, offset=offset)
activity_dicts = model_dictize.activity_list_dictize(
activity_objects, context)
# Mark the new (not yet seen by user) activities.
strptime = datetime.datetime.strptime
fmt = '%Y-%m-%dT%H:%M:%S.%f'
last_viewed = model.Dashboard.get(user_id).activity_stream_last_viewed
for activity in activity_dicts:
if activity['user_id'] == user_id:
# Never mark the user's own activities as new.
activity['is_new'] = False
else:
activity['is_new'] = (strptime(activity['timestamp'], fmt)
> last_viewed)
return activity_dicts
@logic.validate(ckan.logic.schema.default_pagination_schema)
def dashboard_activity_list_html(context, data_dict):
'''Return the authorized user's dashboard activity stream as HTML.
The activity stream is rendered as a snippet of HTML meant to be included
in an HTML page, i.e. it doesn't have any HTML header or footer.
:param id: the id or name of the user
:type id: string
:param offset: where to start getting activity items from
(optional, default: 0)
:type offset: int
:param limit: the maximum number of activities to return
(optional, default: 31, the default value is configurable via the
ckan.activity_list_limit setting)
:type limit: int
:rtype: string
'''
activity_stream = dashboard_activity_list(context, data_dict)
model = context['model']
offset = data_dict.get('offset', 0)
extra_vars = {
'controller': 'user',
'action': 'dashboard',
'offset': offset,
}
return activity_streams.activity_list_to_html(context, activity_stream,
extra_vars)
def dashboard_new_activities_count(context, data_dict):
'''Return the number of new activities in the user's dashboard.
Return the number of new activities in the authorized user's dashboard
activity stream.
Activities from the user herself are not counted by this function even
though they appear in the dashboard (users don't want to be notified about
things they did themselves).
:rtype: int
'''
_check_access('dashboard_new_activities_count', context, data_dict)
activities = logic.get_action('dashboard_activity_list')(
context, data_dict)
return len([activity for activity in activities if activity['is_new']])
def _unpick_search(sort, allowed_fields=None, total=None):
''' This is a helper function that takes a sort string
eg 'name asc, last_modified desc' and returns a list of
split field order eg [('name', 'asc'), ('last_modified', 'desc')]
allowed_fields can limit which field names are ok.
total controls how many sorts can be specifed '''
sorts = []
split_sort = sort.split(',')
for part in split_sort:
split_part = part.strip().split()
field = split_part[0]
if len(split_part) > 1:
order = split_part[1].lower()
else:
order = 'asc'
if allowed_fields:
if field not in allowed_fields:
raise ValidationError('Cannot sort by field `%s`' % field)
if order not in ['asc', 'desc']:
raise ValidationError('Invalid sort direction `%s`' % order)
sorts.append((field, order))
if total and len(sorts) > total:
raise ValidationError(
'Too many sort criteria provided only %s allowed' % total)
return sorts
def member_roles_list(context, data_dict):
'''Return the possible roles for members of groups and organizations.
:param group_type: the group type, either "group" or "organization"
(optional, default "organization")
:type id: string
:returns: a list of dictionaries each with two keys: "text" (the display
name of the role, e.g. "Admin") and "value" (the internal name of the
role, e.g. "admin")
:rtype: list of dictionaries
'''
group_type = data_dict.get('group_type', 'organization')
roles_list = new_authz.roles_list()
if group_type == 'group':
roles_list = [role for role in roles_list
if role['value'] != 'editor']
_check_access('member_roles_list', context, data_dict)
return roles_list
| {
"repo_name": "WilJoey/tn_ckan",
"path": "ckan/logic/action/get.py",
"copies": "1",
"size": "100246",
"license": "mit",
"hash": -1072922379604111500,
"line_mean": 32.5945040214,
"line_max": 95,
"alpha_frac": 0.6365441015,
"autogenerated": false,
"ratio": 3.9170834635823697,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016036943979623806,
"num_lines": 2984
} |
"""API functions to update heartbeats.
This module exists to keep external code decoupled from our model-based
implementation. This allows extending the functionality later to support
different mechanisms for tracking heartbeats (e.g. external services).
"""
from functools import wraps
from django.utils.timezone import now
from django_healthchecks.models import HeartbeatMonitor
def get_expired_heartbeats():
"""Provide a list of all heartbeats that expired.
:rtype: list
"""
return HeartbeatMonitor.objects.enabled().expired_names()
def get_heartbeat_statuses():
"""Provide a dict of ``name: status`` for every heartbeat.
:rtype: dict
"""
data = HeartbeatMonitor.objects.enabled().status_by_name()
data["__all__"] = all(data.values())
return data
def update_heartbeat(name, default_timeout=None, timeout=None):
"""Update a heartbeat monitor.
This tracks a new pulse, so the timer is reset.
Upon the first call, the ``default_timeout`` can be assigned.
To tune the timeout later, use the Django admin interface,
or make a call that provides the ``timeout`` value.
:param name: Name of the check.
:type name: str
:param default_timeout: The timeout to use by default on registration.
:type default_timeout: datetime.timedelta
:param timeout: The timeout to be forcefully updated.
:type timeout: datetime.timedelta
"""
HeartbeatMonitor._update(
name=name, default_timeout=default_timeout, timeout=timeout
)
def update_heartbeat_on_success(name, default_timeout=None, timeout=None):
"""Decorator to update a heartbeat when a function was successful.
Usage:
.. code-block::
@update_heartbeat_on_success("some.check.name")
def your_function():
pass
"""
def _inner(func):
@wraps(func)
def _update_heartbeat_decorator(*args, **kwargs):
value = func(*args, **kwargs)
update_heartbeat(name, default_timeout=default_timeout, timeout=timeout)
return value
return _update_heartbeat_decorator
return _inner
| {
"repo_name": "mvantellingen/django-healthchecks",
"path": "src/django_healthchecks/heartbeats.py",
"copies": "1",
"size": "2133",
"license": "mit",
"hash": -6579686351624049000,
"line_mean": 28.2191780822,
"line_max": 84,
"alpha_frac": 0.6877637131,
"autogenerated": false,
"ratio": 4.141747572815534,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5329511285915534,
"avg_score": null,
"num_lines": null
} |
"""ApiGatewayLambdaSetupFunction
Performs necessary API Gateway / Lambda setup.
* Adds Lambda execution permissions to API Gateway endpoints
* Adds necessary stage variables so function name mappings (from Swagger
for CORS) will work
* Adds necessary stage variables so necessary CORS headers can be resolved
by each function
"""
from __future__ import print_function
import json
import uuid
import re
import hashlib
import boto3
import botocore
import cfnresponse
apig_client = boto3.client('apigateway')
cloudformation_client = boto3.client('cloudformation')
lambda_client = boto3.client('lambda')
s3_client = boto3.client('s3')
cors_headers_stage_variable_map = {
"Access-Control-Allow-Methods": "CorsMethodsByResource",
"Access-Control-Allow-Headers": "CorsHeadersByResource"
}
cors_headers_to_save = cors_headers_stage_variable_map.keys()
cors_json_s3_key = "api-cors.json"
def lambda_handler(event, context):
print('Event: {}'.format(json.dumps(event)))
stack_id = event["StackId"]
stack_region = stack_id.split(":")[3]
stack_account_id = stack_id.split(":")[4]
request_type = event.get("RequestType")
resource_props = event["ResourceProperties"]
physical_resource_id = event.get("PhysicalResourceId")
response_dict = {}
bucket_name = resource_props["Bucket"]
if request_type == "Delete":
s3_client.delete_object(
Bucket = bucket_name,
Key = cors_json_s3_key
)
if request_type in ["Create", "Update"]:
rest_api_id = resource_props["RestApi"]
stage_name = resource_props["StageName"]
cors_origin_list = resource_props.get("CorsOriginList", "")
stage_redeploy_required = False
paginator = apig_client.get_paginator("get_resources")
response_iterator = paginator.paginate(
restApiId = rest_api_id
)
resource_list = []
for each_response in response_iterator:
resource_list.extend(each_response["items"])
lambda_function_resource_map = {}
resource_cors_map = {}
for each_resource in resource_list:
for each_method in each_resource.get("resourceMethods", {}).keys():
response = apig_client.get_method(
restApiId = rest_api_id,
resourceId = each_resource["id"],
httpMethod = each_method
)
method_integration = response.get("methodIntegration", {})
if ":lambda:" in method_integration.get("uri", ""):
# Lambda-backed endpoint
integration_uri = method_integration["uri"]
lambda_arn = ":".join(integration_uri.split(":")[6:]).split("/")[0]
lambda_function_name = lambda_arn.split(":")[5]
m = re.search(r"\${stageVariables\.([^}]+)}", lambda_function_name)
for each_lambda_function_resource_name in m.groups():
if each_lambda_function_resource_name not in lambda_function_resource_map:
lambda_function_resource_map[each_lambda_function_resource_name] = []
lambda_function_resource_map[each_lambda_function_resource_name].append({
"resource": each_resource["path"],
"method": each_method
})
if method_integration.get("type", "").upper() in ["MOCK", "AWS"]:
# Static endpoint or AWS service proxy
integration_response_keys = response["methodIntegration"]["integrationResponses"].keys()
for each_integration_response_key in integration_response_keys:
response_dict = response["methodIntegration"]["integrationResponses"][each_integration_response_key]
response_parameters = response_dict.get("responseParameters", {})
cors_origin_mapping_key = "method.response.header.Access-Control-Allow-Origin"
if response_parameters.get(cors_origin_mapping_key, "") != cors_origin_list:
print("Updating {} {} {} integration response parameter {}".format(
each_resource["path"],
each_method,
each_integration_response_key,
cors_origin_mapping_key
))
apig_client.update_integration_response(
restApiId = rest_api_id,
resourceId = each_resource["id"],
httpMethod = each_method,
statusCode = each_integration_response_key,
patchOperations = [{
"op": "replace",
"path": "/responseParameters/{}".format(cors_origin_mapping_key),
"value": "'{}'".format(cors_origin_list)
}]
)
stage_redeploy_required = True
if each_method.upper() == "OPTIONS":
headers_dict = {}
for each_key in response_parameters.keys():
if each_key.startswith("method.response.header."):
header_name = each_key[23:].upper()
headers_dict[header_name] = response_parameters[each_key]
resource_cors_map[each_resource["path"]] = {}
for each_header in cors_headers_to_save:
each_header_upper = each_header.upper()
if each_header_upper in headers_dict:
each_header_value = headers_dict[each_header_upper]
# Strip single quotes off.
each_header_value = each_header_value[1:][:-1]
resource_cors_map[each_resource["path"]][each_header] = each_header_value
lambda_function_resource_name_map = {}
for each_lambda_function_resource_name in lambda_function_resource_map.keys():
invocations_needing_access = lambda_function_resource_map[each_lambda_function_resource_name]
response = cloudformation_client.describe_stack_resource(
StackName = stack_id,
LogicalResourceId = each_lambda_function_resource_name
)
lambda_function_name = response["StackResourceDetail"]["PhysicalResourceId"]
lambda_function_resource_name_map[each_lambda_function_resource_name] = lambda_function_name
for each_invocation_dict in invocations_needing_access:
statement_id = "apigateway-{}".format(
hashlib.md5(json.dumps(each_invocation_dict, sort_keys=True)).hexdigest()
)
source_arn = "arn:aws:execute-api:{aws_region}:{aws_account_id}:{api_id}/*/{http_method}{http_path}".format(
aws_region = stack_region,
aws_account_id = stack_account_id,
api_id = rest_api_id,
http_method = each_invocation_dict["method"].upper(),
http_path = each_invocation_dict["resource"]
)
print("Adding permission to execute {} to {}".format(
lambda_function_name,
source_arn
))
try:
lambda_client.add_permission(
FunctionName = lambda_function_name,
StatementId = statement_id,
Action = "lambda:InvokeFunction",
Principal = "apigateway.amazonaws.com",
SourceArn = source_arn
)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'ResourceConflictException':
print("Already exists.")
else:
raise
stage_patch_operations = []
for each_lambda_function_resource_name in lambda_function_resource_name_map.keys():
each_lambda_function_name = lambda_function_resource_name_map[each_lambda_function_resource_name]
stage_patch_operations.append({
"op": "replace",
"path": "/variables/{}".format(each_lambda_function_resource_name),
"value": each_lambda_function_name
})
if len(stage_patch_operations) > 0:
print("Creating stage variables ({}) for Lambda function names.".format(len(stage_patch_operations)))
apig_client.update_stage(
restApiId = rest_api_id,
stageName = stage_name,
patchOperations = stage_patch_operations
)
print("Posting CORS header values to S3.")
s3_client.put_object(
Bucket = bucket_name,
Key = cors_json_s3_key,
Body = json.dumps(resource_cors_map, indent=4),
ContentType = "application/json"
)
additional_stage_variables = resource_props.get("StageVariables", {})
stage_patch_operations = []
for each_key in additional_stage_variables.keys():
each_value = additional_stage_variables[each_key]
stage_patch_operations.append({
"op": "replace",
"path": "/variables/{}".format(each_key),
"value": each_value
})
if len(stage_patch_operations) > 0:
print("Creating additional stage variables ({}).".format(len(stage_patch_operations)))
apig_client.update_stage(
restApiId = rest_api_id,
stageName = stage_name,
patchOperations = stage_patch_operations
)
if stage_redeploy_required:
print("Redeploying stage.")
apig_client.create_deployment(
restApiId = rest_api_id,
stageName = stage_name
)
cfnresponse.send(event, context, cfnresponse.SUCCESS, response_dict, physical_resource_id)
return {} | {
"repo_name": "moduspwnens/boa-chat",
"path": "boa-nimbus/lambda/ApiGatewayLambdaSetupFunction/index.py",
"copies": "1",
"size": "11319",
"license": "mit",
"hash": 4530461941589525500,
"line_mean": 41.3970037453,
"line_max": 124,
"alpha_frac": 0.5018111141,
"autogenerated": false,
"ratio": 4.971014492753623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5972825606853623,
"avg_score": null,
"num_lines": null
} |
import string
import inspect
import os
import sys
import pkgutil
import shutil
def _obj_name(obj):
if hasattr(obj, '__name__'):
return obj.__name__
def docstring_to_markdown(docstring):
"""Convert a Python object's docstring to markdown
Parameters
----------
docstring : str
The docstring body.
Returns
----------
clean_lst : list
The markdown formatted docstring as lines (str) in a Python list.
"""
new_docstring_lst = []
encountered_examples = False
for idx, line in enumerate(docstring.split('\n')):
line = line.strip()
if set(line) in ({'-'}, {'='}):
new_docstring_lst[idx-1] = '**%s**' % new_docstring_lst[idx-1]
elif line.startswith('>>>'):
if not encountered_examples:
new_docstring_lst.append('```')
encountered_examples = True
new_docstring_lst.append(line)
for idx, line in enumerate(new_docstring_lst[1:]):
if line:
if line.startswith('Description : '):
new_docstring_lst[idx+1] = (new_docstring_lst[idx+1]
.replace('Description : ', ''))
elif ' : ' in line:
line = line.replace(' : ', '` : ')
new_docstring_lst[idx+1] = '\n- `%s\n' % line
elif '**' in new_docstring_lst[idx-1] and '**' not in line:
new_docstring_lst[idx+1] = '\n%s' % line.lstrip()
elif '**' not in line:
new_docstring_lst[idx+1] = ' %s' % line.lstrip()
clean_lst = []
for line in new_docstring_lst:
if set(line.strip()) not in ({'-'}, {'='}):
clean_lst.append(line)
if encountered_examples:
clean_lst.append('```')
return clean_lst
def object_to_markdownpage(obj_name, obj, s=''):
"""Generate the markdown documentation of a Python object.
Parameters
----------
obj_name : str
Name of the Python object.
obj : object
Python object (class, method, function, ...)
s : str (default: '')
A string to which the documentation will be appended to.
Returns
---------
s : str
The markdown page.
"""
# header
s += '## %s\n' % obj_name
# function/class/method signature
sig = str(inspect.signature(obj)).replace('(self, ', '(')
s += '\n*%s%s*\n\n' % (obj_name, sig)
# docstring body
doc = str(inspect.getdoc(obj))
ds = docstring_to_markdown(doc)
s += '\n'.join(ds)
# document methods
if inspect.isclass(obj):
methods, properties = '\n\n### Methods', '\n\n### Properties'
members = inspect.getmembers(obj)
for m in members:
if not m[0].startswith('_') and len(m) >= 2:
if isinstance(m[1], property):
properties += '\n\n<hr>\n\n*%s*\n\n' % m[0]
m_doc = docstring_to_markdown(str(inspect.getdoc(m[1])))
properties += '\n'.join(m_doc)
else:
sig = str(inspect.signature(m[1]))
sig = sig.replace('(self, ', '(').replace('(self)', '()')
sig = sig.replace('(self)', '()')
methods += '\n\n<hr>\n\n*%s%s*\n\n' % (m[0], sig)
m_doc = docstring_to_markdown(str(inspect.getdoc(m[1])))
methods += '\n'.join(m_doc)
s += methods
s += properties
return s + '\n\n'
def import_package(rel_path_to_package, package_name):
"""Imports a python package into the current namespace.
Parameters
----------
rel_path_to_package : str
Path to the package containing director relative from this script's
directory.
package_name : str
The name of the package to be imported.
Returns
---------
package : The imported package object.
"""
try:
curr_dir = os.path.dirname(os.path.realpath(__file__))
except NameError:
curr_dir = os.path.dirname(os.path.realpath(os.getcwd()))
package_path = os.path.join(curr_dir, rel_path_to_package)
if package_path not in sys.path:
sys.path = [package_path] + sys.path
package = __import__(package_name)
return package
def get_subpackages(package):
"""Return subpackages of a package.
Parameters
----------
package : python package object
Returns
--------
list : list containing (importer, subpackage_name) tuples
"""
return [i for i in pkgutil.iter_modules(package.__path__) if i[2]]
def get_modules(package):
"""Return modules of a package.
Parameters
----------
package : python package object
Returns
--------
list : list containing (importer, subpackage_name) tuples
"""
return [i for i in pkgutil.iter_modules(package.__path__)]
def get_functions_and_classes(package):
"""Retun lists of functions and classes from a package.
Parameters
----------
package : python package object
Returns
--------
list, list : list of classes and functions
Each sublist consists of [name, member] sublists.
"""
classes, functions = [], []
for name, member in inspect.getmembers(package):
if not name.startswith('_'):
if inspect.isclass(member):
classes.append([name, member])
elif inspect.isfunction(member):
functions.append([name, member])
return classes, functions
def generate_api_docs(package, api_dir, clean=False, printlog=True):
"""Generate a module level API documentation of a python package.
Description
-----------
Generates markdown API files for each module in a Python package whereas
the structure is as follows:
`package/package.subpackage/package.subpackage.module.md`
Parameters
-----------
package : Python package object
api_dir : str
Output directory path for the top-level package directory
clean : bool (default: False)
Removes previously existing API directory if True.
printlog : bool (default: True)
Prints a progress log to the standard output screen if True.
"""
if printlog:
print('\n\nGenerating Module Files\n%s\n' % (50 * '='))
prefix = package.__name__ + "."
# clear the previous version
if clean:
if os.path.isdir(api_dir):
shutil.rmtree(api_dir)
# get subpackages
api_docs = {}
for importer, pkg_name, is_pkg in pkgutil.iter_modules(
package.__path__,
prefix):
if is_pkg:
subpackage = __import__(pkg_name, fromlist="dummy")
prefix = subpackage.__name__ + "."
# get functions and classes
classes, functions = get_functions_and_classes(subpackage)
target_dir = os.path.join(api_dir, subpackage.__name__)
# create the subdirs
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
if printlog:
print('created %s' % target_dir)
# create markdown documents in memory
for obj in classes + functions:
md_path = os.path.join(target_dir, obj[0]) + '.md'
if md_path not in api_docs:
api_docs[md_path] = object_to_markdownpage(obj_name=obj[0],
obj=obj[1],
s='')
else:
api_docs[md_path] += object_to_markdownpage(obj_name=(
obj[0]),
obj=obj[1],
s='')
# write to files
for d in sorted(api_docs):
prev = ''
if os.path.isfile(d):
with open(d, 'r') as f:
prev = f.read()
if prev == api_docs[d]:
msg = 'skipped'
else:
msg = 'updated'
else:
msg = 'created'
if msg != 'skipped':
with open(d, 'w') as f:
f.write(api_docs[d])
if printlog:
print('%s %s' % (msg, d))
def summarize_methdods_and_functions(api_modules, out_dir,
printlog=False, clean=True,
str_above_header=''):
"""Generates subpacke-level summary files.
Description
-----------
A function to generate subpacke-level summary markdown API files from
a module-level API documentation previously created via the
`generate_api_docs` function.
The output structure is:
package/package.subpackage.md
Parameters
----------
api_modules : str
Path to the API documentation crated via `generate_api_docs`
out_dir : str
Path to the desired output directory for the new markdown files.
clean : bool (default: False)
Removes previously existing API directory if True.
printlog : bool (default: True)
Prints a progress log to the standard output screen if True.
str_above_header : str (default: '')
Places a string just above the header.
"""
if printlog:
print('\n\nGenerating Subpackage Files\n%s\n' % (50 * '='))
if clean:
if os.path.isdir(out_dir):
shutil.rmtree(out_dir)
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
if printlog:
print('created %s' % out_dir)
subdir_paths = [os.path.join(api_modules, d)
for d in os.listdir(api_modules)
if not d.startswith('.')]
out_files = [os.path.join(out_dir, os.path.basename(d)) + '.md'
for d in subdir_paths]
for sub_p, out_f in zip(subdir_paths, out_files):
module_paths = (os.path.join(sub_p, m)
for m in os.listdir(sub_p)
if not m.startswith('.'))
new_output = []
if str_above_header:
new_output.append(str_above_header)
for p in module_paths:
with open(p, 'r') as r:
new_output.extend(r.readlines())
msg = ''
if not os.path.isfile(out_f):
msg = 'created'
if msg != 'created':
with open(out_f, 'r') as f:
prev = f.readlines()
if prev != new_output:
msg = 'updated'
else:
msg = 'skipped'
if msg != 'skipped':
with open(out_f, 'w') as f:
f.write(''.join(new_output))
if printlog:
print('%s %s' % (msg, out_f))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='Convert docstring into a markdown API documentation.',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-n', '--package_name',
default='biopandas',
help='Name of the package')
parser.add_argument('-d', '--package_dir',
default='../../biopandas/',
help="Path to the package's enclosing directory")
parser.add_argument('-o1', '--output_module_api',
default='../docs/sources/api_modules',
help=('Target directory for the module-level'
' API Markdown files'))
parser.add_argument('-o2', '--output_subpackage_api',
default='../docs/sources/api_subpackages',
help=('Target directory for the'
'subpackage-level API Markdown files'))
parser.add_argument('-c', '--clean',
action='store_true',
help='Remove previous API files')
parser.add_argument('-s', '--silent',
action='store_true',
help='Suppress log printed to the screen')
parser.add_argument('-v', '--version',
action='version',
version='v. 0.1')
args = parser.parse_args()
package = import_package(args.package_dir, args.package_name)
generate_api_docs(package=package,
api_dir=args.output_module_api,
clean=args.clean,
printlog=not(args.silent))
summarize_methdods_and_functions(api_modules=args.output_module_api,
out_dir=args.output_subpackage_api,
printlog=not(args.silent),
clean=args.clean,
str_above_header=('biopandas'
' version: %s\n' % (
package.__version__)))
| {
"repo_name": "rasbt/biopandas",
"path": "docs/make_api.py",
"copies": "1",
"size": "13363",
"license": "bsd-3-clause",
"hash": -3116583341007039500,
"line_mean": 31.4344660194,
"line_max": 79,
"alpha_frac": 0.513058445,
"autogenerated": false,
"ratio": 4.269329073482428,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5282387518482428,
"avg_score": null,
"num_lines": null
} |
"""API Handler Class"""
# standard library
import gzip
import os
import shutil
from logging.handlers import RotatingFileHandler
from typing import Optional
class RotatingFileHandlerCustom(RotatingFileHandler):
"""Logger handler for ThreatConnect Exchange File logging."""
def __init__(
self,
filename: str,
mode: Optional[str] = 'a',
maxBytes: Optional[int] = 0,
backupCount: Optional[int] = 0,
encoding: Optional[str] = None,
delay: Optional[bool] = False,
):
"""Customize RotatingFileHandler to create full log path.
Args:
filename: The name of the logfile.
mode: The write mode for the file.
maxBytes: The max file size before rotating.
backupCount: The maximum # of backup files.
encoding: The log file encoding.
delay: If True, then file opening is deferred until the first call to emit().
"""
if encoding is None and os.getenv('LANG') is None:
encoding = 'UTF-8'
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename), exist_ok=True)
RotatingFileHandler.__init__(self, filename, mode, maxBytes, backupCount, encoding, delay)
# set namer
self.namer = self.custom_gzip_namer
self.rotator = self.custom_gzip_rotator
@staticmethod
def custom_gzip_namer(name):
"""Namer for rotating log handler with gz extension.
Args:
name: The current name of the logfile.
"""
return name + '.gz'
@staticmethod
def custom_gzip_rotator(source: str, dest: str) -> None:
"""Rotate and compress log file.
Args:
source: The source filename.
dest: The destination filename.
"""
with open(source, 'rb') as f_in:
with gzip.open(dest, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(source)
# class RotatingFileHandlerFormatter(logging.Formatter):
# """Custom logging formatter that allows a different format depending on the logging level."""
#
# def __init__(self):
# """Initialize formatter parent."""
# super().__init__(fmt='%(levelno)d: %(msg)s', datefmt=None, style='%')
#
# def format(self, record):
# """Format file handle log event according to logging level.
#
# Args:
# record (obj): The record to be logged.
# """
# # Replace the original format with one customized by logging level
# self._style._fmt = self.standard_format
# if record.levelno < 10: # <= logging.DEBUG
# self._style._fmt = self.trace_format
#
# # Call the original formatter class to do the grunt work
# result = logging.Formatter.format(self, record)
#
# return result
#
# @property
# def standard_format(self):
# """Return the standard log format"""
# return (
# '%(asctime)s - %(name)s - %(levelname)s - %(message)s '
# '(%(filename)s:%(funcName)s:%(lineno)d:%(threadName)s)'
# )
#
# @property
# def trace_format(self):
# """Return the standard log format"""
# return (
# '%(asctime)s - %(name)s - %(levelname)s - [%(funcName)s:%(lineno)d] %(message)s '
# '(%(filename)s:%(threadName)s)'
# )
| {
"repo_name": "kstilwell/tcex",
"path": "tcex/logger/rotating_file_handler_custom.py",
"copies": "2",
"size": "3432",
"license": "apache-2.0",
"hash": 1982358539146169300,
"line_mean": 32.6470588235,
"line_max": 99,
"alpha_frac": 0.5818764569,
"autogenerated": false,
"ratio": 3.953917050691244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5535793507591245,
"avg_score": null,
"num_lines": null
} |
"""API Handler Class"""
# standard library
import logging
import threading
import time
class ApiHandler(logging.Handler):
"""Logger handler for ThreatConnect Exchange API logging."""
def __init__(self, session, flush_limit=100):
"""Initialize Class properties.
Args:
session (Request.Session): The preconfigured instance of Session for ThreatConnect API.
flush_limit (int): The limit to flush batch logs to the API.
"""
super().__init__()
self.session = session
self.flush_limit = flush_limit
self._entries = []
self.in_token_renewal = False
def flush(self):
"""Close the logger and flush entries."""
self.log_to_api(self.entries) # pragma: no cover
def emit(self, record):
"""Emit a record.
Args:
record (obj): The record to be logged.
"""
if threading.current_thread().name != 'MainThread': # pragma: no cover
return
# queue log events
self._entries.append(self.format(record))
# flush queue once limit is hit token module is currently renewing token
if (
len(self._entries) > self.flush_limit or record.levelname == 'ERROR'
) and not self.in_token_renewal:
self.log_to_api(self.entries)
@property
def entries(self):
"""Return a copy and clear self._entries."""
entries = list(self._entries)
self._entries = []
return entries
def log_to_api(self, entries):
"""Send log events to the ThreatConnect API"""
if entries:
try:
headers = {'Content-Type': 'application/json'}
self.session.post('/v2/logs/app', headers=headers, json=entries)
except Exception: # nosec; pragma: no cover
pass
class ApiHandlerFormatter(logging.Formatter):
"""Logger formatter for ThreatConnect Exchange API logging."""
def __init__(self):
"""Initialize Class properties."""
super().__init__()
def format(self, record):
"""Format log record for ThreatConnect API.
Example log event::
[{
"timestamp": 1478907537000,
"message": "Test Message",
"level": "DEBUG"
}]
Args:
record (obj): The record to be logged.
"""
return {
'timestamp': int(float(record.created or time.time()) * 1000),
'message': record.msg or '',
'level': record.levelname or 'DEBUG',
}
| {
"repo_name": "kstilwell/tcex",
"path": "tcex/logger/api_handler.py",
"copies": "1",
"size": "2614",
"license": "apache-2.0",
"hash": -6659380453747026000,
"line_mean": 28.7045454545,
"line_max": 99,
"alpha_frac": 0.5623565417,
"autogenerated": false,
"ratio": 4.4683760683760685,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5530732610076069,
"avg_score": null,
"num_lines": null
} |
"""API Handlers.
"""
# third-party imports
from google.appengine.api import mail
# local imports
from app.base.handlers import BaseAjaxHandler
from app import config
from app.forms.contacts import ContactForm
class ContactHandler(BaseAjaxHandler):
"""Handles the form submission from the frontend contact page.
"""
def post(self):
"""Validate the form data, if successful send of the email
returning a successful json response with the submitted data.
Else return a failed json response with the form errors.
"""
code = 200
return_data = {}
form = ContactForm(self.request.POST)
if form.validate():
# Render the user generated content using jinja2,
# to enable auto-escaping
template_data = {
'title': 'Someone wants to get in touch.',
'form': form
}
mail.send_mail(
sender='contact@{}.appspotmail.com'.format(config.APP_ID),
to=config.EMAIL_TO,
subject='SharonReganArt: Contact',
body=self.render_to_string('emails/form.txt', template_data),
html=self.render_to_string('emails/form.html', template_data)
)
return_data['status'] = 'success'
else:
code = 400
return_data['status'] = 'fail'
return_data['data'] = form.errors
self.response.set_status(code)
return self.render_json(return_data)
| {
"repo_name": "mjmcconnell/sra",
"path": "src-server/app/handlers/apis/mail.py",
"copies": "1",
"size": "1529",
"license": "apache-2.0",
"hash": 746518695700519300,
"line_mean": 32.2391304348,
"line_max": 77,
"alpha_frac": 0.5905820798,
"autogenerated": false,
"ratio": 4.356125356125356,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5446707435925356,
"avg_score": null,
"num_lines": null
} |
"""API harvester for DataOne - for the SHARE project
Example query: https://cn.dataone.org/cn/v1/query/solr/?q=dateModified:[NOW-5DAY%20TO%20*]&rows=10
"""
from __future__ import unicode_literals
import logging
from datetime import timedelta, date
from lxml import etree
from functools import partial
from dateutil.parser import parse
from xml.etree import ElementTree
from nameparser import HumanName
from scrapi import requests
from scrapi import settings
from scrapi.base import helpers
from scrapi.base import XMLHarvester
from scrapi.util import copy_to_unicode
from scrapi.linter.document import RawDocument
from scrapi.base.helpers import compose, single_result, build_properties, datetime_formatter
logger = logging.getLogger(__name__)
DEFAULT_ENCODING = 'UTF-8'
DATAONE_SOLR_ENDPOINT = 'https://cn.dataone.org/cn/v1/query/solr/'
def process_contributors(author, submitters, contributors,
investigators):
if not author:
author = ''
elif isinstance(author, list):
author = author[0]
if not isinstance(contributors, list):
contributors = [contributors]
if not isinstance(investigators, list):
investigators = [investigators]
unique_contributors = list(set([author] + contributors + investigators))
if len(unique_contributors) < 1:
return []
# this is the index of the author in the unique_contributors list
if author != '':
author_index = unique_contributors.index(author)
else:
author_index = None
# grabs the email if there is one, this should go with the author index
email = ''
for submitter in submitters:
if '@' in submitter:
email = submitter
contributor_list = []
for index, contributor in enumerate(unique_contributors):
if author_index is not None and index == author_index:
# if contributor == NAME and email != '':
# # TODO - maybe add this back in someday
# sometimes this yields really weird names like mjg4
# # TODO - names not always perfectly lined up with emails...
# contributor = name_from_email(email)
name = HumanName(contributor)
contributor_dict = {
'name': contributor,
'givenName': name.first,
'additionalName': name.middle,
'familyName': name.last,
}
if email:
contributor_dict['email'] = email
contributor_list.append(contributor_dict)
else:
name = HumanName(contributor)
contributor_list.append({
'name': contributor,
'givenName': name.first,
'additionalName': name.middle,
'familyName': name.last,
})
return contributor_list
class DataOneHarvester(XMLHarvester):
short_name = 'dataone'
long_name = 'DataONE: Data Observation Network for Earth'
url = 'https://www.dataone.org/'
namespaces = {}
record_encoding = None
schema = {
'otherProperties': build_properties(
('authorGivenName', ("str[@name='authorGivenName']/node()")),
('authorSurName', ("str[@name='authorSurName']/node()")),
('authoritativeMN', ("str[@name='authoritativeMN']/node()")),
('checksum', ("str[@name='checksum']/node()")),
('checksumAlgorithm', ("str[@name='checksumAlgorithm']/node()")),
('datasource', ("str[@name='datasource']/node()")),
('datePublished', ("date[@name='datePublished']/node()")),
('dateUploaded', ("date[@name='dateUploaded']/node()")),
('pubDate', ("date[@name='pubDate']/node()")),
('updateDate', ("date[@name='updateDate']/node()")),
('fileID', ("str[@name='fileID']/node()")),
('formatId', ("str[@name='formatId']/node()")),
('formatType', ("str[@name='formatType']/node()")),
('identifier', ("str[@name='identifier']/node()")),
('readPermission', "arr[@name='readPermission']/str/node()"),
('replicaMN', "arr[@name='replicaMN']/str/node()"),
('replicaVerifiedDate', "arr[@name='replicaVerifiedDate']/date/node()"),
('replicationAllowed', ("bool[@name='replicationAllowed']/node()")),
('numberReplicas', ("int[@name='numberReplicas']/node()")),
('preferredReplicationMN', "arr[@name='preferredReplicationMN']/str/node()"),
('rightsHolder', ("str[@name='rightsHolder']/node()")),
('scientificName', "arr[@name='scientificName']/str/node()"),
('site', "arr[@name='site']/str/node()"),
('size', ("long[@name='size']/node()")),
('isDocumentedBy', "arr[@name='isDocumentedBy']/str/node()"),
('serviceID', "str[@name='id']/node()"),
('sku', "str[@name='sku']/node()")
),
'freeToRead': {
'startDate': ("bool[@name='isPublic']/node()", "date[@name='dateModified']/node()", lambda x, y: parse(y[0]).date().isoformat() if x else None)
},
'contributors': ("str[@name='author']/node()", "str[@name='submitter']/node()", "arr[@name='origin']/str/node()", "arr[@name='investigator']/str/node()", process_contributors),
'uris': ("str[@name='id']/node()", "//str[@name='dataUrl']/node()", "arr[@name='resourceMap']/str/node()", partial(helpers.oai_process_uris, use_doi=True)),
'tags': ("//arr[@name='keywords']/str/node()", lambda x: x if isinstance(x, list) else [x]),
'providerUpdatedDateTime': ("str[@name='dateModified']/node()", compose(datetime_formatter, single_result)),
'title': ("str[@name='title']/node()", single_result),
'description': ("str[@name='abstract']/node()", single_result)
}
def harvest(self, start_date=None, end_date=None):
start_date = start_date or date.today() - timedelta(settings.DAYS_BACK)
end_date = end_date or date.today()
records = self.get_records(start_date, end_date)
xml_list = []
for record in records:
# This ID is unique per data package, but won't unify multiple packages for the smae project
doc_id = record.xpath("str[@name='id']")[0].text
record = ElementTree.tostring(record, encoding=self.record_encoding)
xml_list.append(RawDocument({
'doc': record,
'source': self.short_name,
'docID': copy_to_unicode(doc_id),
'filetype': 'xml'
}))
return xml_list
def get_records(self, start_date, end_date):
''' helper function to get a response from the DataONE
API, with the specified number of rows.
Returns an etree element with results '''
query = 'dateModified:[{}T00:00:00Z TO {}T00:00:00Z]'.format(start_date.isoformat(), end_date.isoformat())
doc = requests.get(DATAONE_SOLR_ENDPOINT, params={
'q': query,
'start': 0,
'rows': 1
})
doc = etree.XML(doc.content)
rows = int(doc.xpath("//result/@numFound")[0])
n = 0
while n < rows:
data = requests.get(DATAONE_SOLR_ENDPOINT, params={
'q': query,
'start': n,
'rows': 1000
})
docs = etree.XML(data.content).xpath('//doc')
for doc in docs:
yield doc
n += 1000
| {
"repo_name": "fabianvf/scrapi",
"path": "scrapi/harvesters/dataone.py",
"copies": "1",
"size": "7555",
"license": "apache-2.0",
"hash": 4731498114516477000,
"line_mean": 39.4010695187,
"line_max": 184,
"alpha_frac": 0.5759099934,
"autogenerated": false,
"ratio": 4.066200215285253,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5142110208685253,
"avg_score": null,
"num_lines": null
} |
"""API harvester for DataOne - for the SHARE project
Example query: https://cn.dataone.org/cn/v1/query/solr/?q=dateModified:[NOW-5DAY%20TO%20*]&rows=10
"""
## harvester for DataONE SOLR search API
from __future__ import unicode_literals
import re
import logging
from datetime import timedelta, date
from lxml import etree
from dateutil.parser import *
from xml.etree import ElementTree
from nameparser import HumanName
from scrapi import requests
from scrapi import settings
from scrapi.base import XMLHarvester
from scrapi.util import copy_to_unicode
from scrapi.linter.document import RawDocument
from scrapi.base.helpers import compose, single_result, build_properties
logger = logging.getLogger(__name__)
DEFAULT_ENCODING = 'UTF-8'
DATAONE_SOLR_ENDPOINT = 'https://cn.dataone.org/cn/v1/query/solr/'
def process_doi(service_id, doc_doi):
doi_re = '10\\.\\d{4}/\\w*\\.\\w*(/\\w*)?'
doi_list = map(lambda x: x.replace('doi', ''), doc_doi) if isinstance(doc_doi, list) else [doc_doi.replace('doi', '')]
for item in [service_id] + doi_list:
try:
return re.search(doi_re, item).group(0)
except AttributeError:
continue
return ''
def process_contributors(author, submitters, contributors,
investigators):
if not author:
author = ''
elif isinstance(author, list):
author = author[0]
if not isinstance(contributors, list):
contributors = [contributors]
if not isinstance(investigators, list):
investigators = [investigators]
unique_contributors = list(set([author] + contributors + investigators))
if len(unique_contributors) < 1:
return []
# this is the index of the author in the unique_contributors list
if author != '':
author_index = unique_contributors.index(author)
else:
author_index = None
# grabs the email if there is one, this should go with the author index
email = ''
for submitter in submitters:
if '@' in submitter:
email = submitter
contributor_list = []
for index, contributor in enumerate(unique_contributors):
if author_index is not None and index == author_index:
# if contributor == NAME and email != '':
# # TODO - maybe add this back in someday
# sometimes this yields really weird names like mjg4
# # TODO - names not always perfectly lined up with emails...
# contributor = name_from_email(email)
name = HumanName(contributor)
contributor_dict = {
'name': contributor,
'givenName': name.first,
'additionalName': name.middle,
'familyName': name.last,
'email': unicode(email)
}
contributor_list.append(contributor_dict)
else:
name = HumanName(contributor)
contributor_list.append({
'name': contributor,
'givenName': name.first,
'additionalName': name.middle,
'familyName': name.last,
})
return contributor_list
class DataOneHarvester(XMLHarvester):
short_name = 'dataone'
long_name = 'DataONE: Data Observation Network for Earth'
url = 'https://www.dataone.org/'
namespaces = {}
record_encoding = None
schema = {
'otherProperties': build_properties(
('authorGivenName', ("str[@name='authorGivenName']/node()")),
('authorSurName', ("str[@name='authorSurName']/node()")),
('authoritativeMN', ("str[@name='authoritativeMN']/node()")),
('checksum', ("str[@name='checksum']/node()")),
('checksumAlgorithm', ("str[@name='checksumAlgorithm']/node()")),
('datasource', ("str[@name='datasource']/node()")),
('datePublished', ("date[@name='datePublished']/node()")),
('dateUploaded', ("date[@name='dateUploaded']/node()")),
('pubDate', ("date[@name='pubDate']/node()")),
('updateDate', ("date[@name='updateDate']/node()")),
('fileID', ("str[@name='fileID']/node()")),
('formatId', ("str[@name='formatId']/node()")),
('formatType', ("str[@name='formatType']/node()")),
('identifier', ("str[@name='identifier']/node()")),
('readPermission', "arr[@name='readPermission']/str/node()"),
('replicaMN', "arr[@name='replicaMN']/str/node()"),
('replicaVerifiedDate', "arr[@name='replicaVerifiedDate']/date/node()"),
('replicationAllowed', ("bool[@name='replicationAllowed']/node()")),
('numberReplicas', ("int[@name='numberReplicas']/node()")),
('preferredReplicationMN', "arr[@name='preferredReplicationMN']/str/node()"),
('rightsHolder', ("str[@name='rightsHolder']/node()")),
('scientificName', "arr[@name='scientificName']/str/node()"),
('site', "arr[@name='site']/str/node()"),
('size', ("long[@name='size']/node()")),
('isDocumentedBy', "arr[@name='isDocumentedBy']/str/node()"),
('serviceID', "str[@name='id']/node()"),
('sku', "str[@name='sku']/node()")
),
'freeToRead': {
'startDate': ("bool[@name='isPublic']/node()", "date[@name='dateModified']/node()", lambda x, y: parse(y[0]).date().isoformat() if x else None)
},
'contributors': ("str[@name='author']/node()", "str[@name='submitter']/node()", "arr[@name='origin']/str/node()", "arr[@name='investigator']/str/node()", process_contributors),
'uris': {
'canonicalUri': ("str[@name='id']/node()", "//str[@name='dataUrl']/node()", lambda x, y: y[0] if 'http' in single_result(y) else x[0] if 'http' in single_result(x) else ''),
'objectUri': ("arr[@name='resourceMap']/str/node()", compose(lambda x: x.replace('doi:', 'http://dx.doi.org/'), single_result))
},
'tags': ("//arr[@name='keywords']/str/node()", lambda x: x if isinstance(x, list) else [x]),
'providerUpdatedDateTime': ("str[@name='dateModified']/node()", compose(lambda x: parse(x).date().isoformat(), single_result)),
'title': ("str[@name='title']/node()", single_result),
'description': ("str[@name='abstract']/node()", single_result)
}
def harvest(self, start_date=None, end_date=None):
start_date = start_date or date.today() - timedelta(settings.DAYS_BACK)
end_date = end_date or date.today()
records = self.get_records(start_date, end_date)
xml_list = []
for record in records:
doc_id = record.xpath("str[@name='id']")[0].text
record = ElementTree.tostring(record, encoding=self.record_encoding)
xml_list.append(RawDocument({
'doc': record,
'source': self.short_name,
'docID': copy_to_unicode(doc_id),
'filetype': 'xml'
}))
return xml_list
def get_records(self, start_date, end_date):
''' helper function to get a response from the DataONE
API, with the specified number of rows.
Returns an etree element with results '''
query = 'dateModified:[{}T00:00:00Z TO {}T00:00:00Z]'.format(start_date.isoformat(), end_date.isoformat())
doc = requests.get(DATAONE_SOLR_ENDPOINT, params={
'q': query,
'start': 0,
'rows': 1
})
doc = etree.XML(doc.content)
rows = int(doc.xpath("//result/@numFound")[0])
n = 0
while n < rows:
data = requests.get(DATAONE_SOLR_ENDPOINT, params={
'q': query,
'start': n,
'rows': 1000
})
docs = etree.XML(data.content).xpath('//doc')
for doc in docs:
yield doc
n += 1000
| {
"repo_name": "icereval/scrapi",
"path": "scrapi/harvesters/dataone.py",
"copies": "1",
"size": "7978",
"license": "apache-2.0",
"hash": -5343840664655323000,
"line_mean": 38.3004926108,
"line_max": 185,
"alpha_frac": 0.5688142392,
"autogenerated": false,
"ratio": 3.9455984174085064,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5014412656608506,
"avg_score": null,
"num_lines": null
} |
# API host url to make the call against
HOST_URL = ''
PORT = 443
# Represents WS1 _numeric_ organization group-ID
TENANT_GROUP_ID = ''
# The WS1 REST API tenant code
AW_TENANT_CODE = ''
# Username and Password to access WS1 REST APIs
API_USERNAME = ''
API_PASSWORD = ''
# # 1024 * 1024 = 1 MB default. Modify as required
MAX_UPLOAD_BYTE_LENGTH = 1024 * 1024
# List of assignment groups in WS1 UEM for Alpha deployment
ALPHA_GROUPS = []
# List of assignment groups in WS1 UEM for Beta deployment
BETA_GROUPS = []
# List of assignment groups in WS1 UEM for Production deployment
PRODUCTION_GROUPS = []
# POST_SCRIPT_VALIDATION : 1 => When set to 1, after uploading and publishing the app, the script will fetch
# the details of the uploaded app and validate:
# Application Name is same as mentioned in script arguments
# Application is in Active State
# Application is uploaded in the same OG as mentioned in the config file
# Application is assigned to the same Smart Groups as mentioned in the config file
# Application is published with same push mode as mentioned in script arguments
# : 0 => When set to 0. It will just upload and publish the app. Validations will not be done.
POST_SCRIPT_VALIDATION = 1
# ----------------------------------------------------------------------------------------------------------------------
# Build Server Details
# Fill in the following details if this python script needs to be integrated with the build server.
# If the build pipeline used is Jenkins, pass the script name "jenkins_build_information.py 1" in place of build number
# Build server url where the app build is run
BUILD_SERVER_URL = ''
# Build project name
BUILD_PROJECT_NAME = ''
# Username to login to the build server
BUILD_SERVER_USERNAME = ''
# Password to login to the build server
BUILD_SERVER_PASSWORD = ''
| {
"repo_name": "vmwaresamples/AirWatch-samples",
"path": "Mobile apps CICD script/config/config.py",
"copies": "1",
"size": "2025",
"license": "bsd-3-clause",
"hash": 2491613008707397600,
"line_mean": 47.2142857143,
"line_max": 120,
"alpha_frac": 0.6335802469,
"autogenerated": false,
"ratio": 4.5,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.56335802469,
"avg_score": null,
"num_lines": null
} |
""" API implementation for course grade-policy """
from rest_framework import generics
from api import *
from serializers import CourseGradeSerializer
from rest_framework.permissions import IsAuthenticated
from django.http import Http404
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from oauth2_provider.ext.rest_framework.authentication import OAuth2Authentication
# Create your views here.
class CourseGradeList(generics.ListAPIView):
"""
**Use Case**
*Get a paginated list of courses with their grading policies in the edX Platform.
Each page in the list can contain up to 10 courses.
**Example Requests**
GET /api/courses/v2/grading_policies
**Response Values**
On success with Response Code <200>
* count: The number of courses in the edX platform.
* next: The URI to the next page of courses.
* previous: The URI to the previous page of courses.
* num_pages: The number of pages listing courses.
* results: A list of courses returned. Each collection in the list
contains these fields.
* course_id: The unique identifier for the course.
* course_display_name: The display name of the course.
* organization: The organization specified for the course.
* run: The run of the course.
* course: The course number.
* course_start: The course start date.
* course_end: The course end date. If course end date is not specified, the
value is null.
* course_registration_start: The course registration start date.
* course_registration_end: The course registration end date. If course registration end date is not
specified, the value is null.
* grader : List of different assignments type and their details like total number, weight, number of droppable
* grade_cutoffs : List of different grades of the course, with their individual range
**ERROR RESPONSES**
* Response Code <403> FORBIDDEN
"""
queryset = get_all_courses_grading_policy() # Get grading policy for all courses
serializer_class = CourseGradeSerializer
permission_classes = (IsAuthenticated,)
authentication_classes = (SessionAuthentication, BasicAuthentication, OAuth2Authentication)
class CourseGradeDetail(generics.RetrieveAPIView):
"""
**Use Case**
Get grade policy for a specific course.
**Example Requests**
GET /api/courses/v2/grading_policies{course_organization}+{course_name}+{course_run}
**Response Values**
On success with Response Code <200>
* course_id: The unique identifier for the course.
* course_display_name: The display name of the course.
* organization: The organization specified for the course.
* run: The run of the course.
* name: The course name
* course_start: The course start date.
* course_end: The course end date. If course end date is not specified, the
value is null.
* course_registration_start: The course registration start date.
* course_registration_end: The course registration end date. If course registration end date is not
specified, the value is null.
* grader : List of different assignments type and their details like total number, weight, number of droppable
* grade_cutoffs : List of different grades of the course, with their individual range
**ERROR RESPONSES**
* Response Code <404> COURSE NOT FOUND
* Response Code <403> FORBIDDEN
"""
serializer_class = CourseGradeSerializer
permission_classes = (IsAuthenticated,)
authentication_classes = (SessionAuthentication, BasicAuthentication, OAuth2Authentication)
def get_object(self):
try:
list = get_grading_policy(self.kwargs['name'], self.kwargs['run'], self.kwargs['org'])
list['course_id']
return list
except:
raise Http404 | {
"repo_name": "jaygoswami2303/course_dashboard_api",
"path": "v2/GradePolicyAPI/views.py",
"copies": "1",
"size": "4691",
"license": "mit",
"hash": 3809723215747540500,
"line_mean": 35.0923076923,
"line_max": 126,
"alpha_frac": 0.5924109998,
"autogenerated": false,
"ratio": 5.282657657657658,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01572232113865372,
"num_lines": 130
} |
# API information
# http://myanimelist.net/modules.php?go=api
from logging import debug, info, warning, error
import re
from .. import AbstractInfoHandler
from data.models import UnprocessedShow, ShowType
class InfoHandler(AbstractInfoHandler):
_show_link_base = "http://myanimelist.net/anime/{id}/"
_show_link_matcher = "https?://(?:.+?\.)?myanimelist\.net/anime/([0-9]{5,})/"
_season_show_url = "http://myanimelist.net/anime/season"
_api_search_base = "http://myanimelist.net/api/anime/search.xml?q={q}"
def __init__(self):
super().__init__("mal", "MyAnimeList")
def get_link(self, link):
if link is None:
return None
return self._show_link_base.format(id=link.site_key)
def extract_show_id(self, url):
if url is not None:
match = re.match(self._show_link_matcher, url, re.I)
if match:
return match.group(1)
return None
def find_show(self, show_name, **kwargs):
url = self._api_search_base.format(q=show_name)
result = self._mal_api_request(url, **kwargs)
if result is None:
error("Failed to find show")
return list()
assert result.tag == "anime"
shows = list()
for child in result:
print(child)
assert child.tag == "entry"
id = child.find("id").text
name = child.find("title").text
more_names = [child.find("english").text]
show = UnprocessedShow(self.key, id, name, more_names, ShowType.UNKNOWN, 0, False)
shows.append(show)
return shows
def find_show_info(self, show_id, **kwargs):
debug("Getting show info for {}".format(show_id))
# Request show page from MAL
url = self._show_link_base.format(id=show_id)
response = self._mal_request(url, **kwargs)
if response is None:
error("Cannot get show page")
return None
# Parse show page
names_sib = response.find("h2", string="Alternative Titles")
# English
name_elem = names_sib.find_next_sibling("div")
if name_elem is None:
warning(" Name elem not found")
return None
name_english = name_elem.string
info(" English: {}".format(name_english))
names = [name_english]
return UnprocessedShow(self.key, id, None, names, ShowType.UNKNOWN, 0, False)
def get_episode_count(self, link, **kwargs):
debug("Getting episode count")
# Request show page from MAL
url = self._show_link_base.format(id=link.site_key)
response = self._mal_request(url, **kwargs)
if response is None:
error("Cannot get show page")
return None
# Parse show page (ugh, HTML parsing)
count_sib = response.find("span", string="Episodes:")
if count_sib is None:
error("Failed to find episode count sibling")
return None
count_elem = count_sib.find_next_sibling(string=re.compile("\d+"))
if count_elem is None:
warning(" Count not found")
return None
count = int(count_elem.strip())
debug(" Count: {}".format(count))
return count
def get_show_score(self, show, link, **kwargs):
debug("Getting show score")
# Request show page
url = self._show_link_base.format(id=link.site_key)
response = self._mal_request(url, **kwargs)
if response is None:
error("Cannot get show page")
return None
# Find score
score_elem = response.find("span", attrs={"itemprop": "ratingValue"})
if score_elem is None:
warning(" Count not found")
return None
score = float(score_elem.string)
debug(" Score: {}".format(score))
return score
def get_seasonal_shows(self, year=None, season=None, **kwargs):
#TODO: use year and season if provided
debug("Getting season shows: year={}, season={}".format(year, season))
# Request season page from MAL
response = self._mal_request(self._season_show_url, **kwargs)
if response is None:
error("Cannot get show list")
return list()
# Parse page (ugh, HTML parsing. Where's the useful API, MAL?)
lists = response.find_all(class_="seasonal-anime-list")
if len(lists) == 0:
error("Invalid page? Lists not found")
return list()
new_list = lists[0].find_all(class_="seasonal-anime")
if len(new_list) == 0:
error("Invalid page? Shows not found in list")
return list()
new_shows = list()
episode_count_regex = re.compile("(\d+|\?) eps?")
for show in new_list:
show_key = show.find(class_="genres")["id"]
title = str(show.find("a", class_="link-title").string)
title = _normalize_title(title)
more_names = [title[:-11]] if title.lower().endswith("2nd season") else list()
show_type = ShowType.TV #TODO, changes based on section/list
episode_count = episode_count_regex.search(show.find(class_="eps").find(string=episode_count_regex)).group(1)
episode_count = None if episode_count == "?" else int(episode_count)
has_source = show.find(class_="source").string != "Original"
new_shows.append(UnprocessedShow(self.key, show_key, title, more_names, show_type, episode_count, has_source))
return new_shows
# Private
def _mal_request(self, url, **kwargs):
return self.request(url, html=True, **kwargs)
def _mal_api_request(self, url, **kwargs):
if "username" not in self.config or "password" not in self.config:
error("Username and password required for MAL requests")
return None
auth = (self.config["username"], self.config["password"])
return self.request(url, auth=auth, xml=True, **kwargs)
def _convert_type(mal_type):
return None
def _normalize_title(title):
title = re.sub(" \(TV\)", "", title)
return title
| {
"repo_name": "TheEnigmaBlade/holo",
"path": "src/services/info/myanimelist.py",
"copies": "2",
"size": "5405",
"license": "mit",
"hash": 6806540905714169000,
"line_mean": 30.0632183908,
"line_max": 113,
"alpha_frac": 0.670120259,
"autogenerated": false,
"ratio": 3.0640589569161,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8862151421700489,
"avg_score": 0.17440555884312214,
"num_lines": 174
} |
# API information
# http://wiki.anidb.net/w/HTTP_API_Definition
# Limits
# - 1 page every 2 seconds
# - Avoid calling same function multiple times per day
#
# Season page
# http://anidb.net/perl-bin/animedb.pl?tvseries=1&show=calendar
# - Based on year and month, defaults to current month
from logging import debug, info, warning, error
import re
from .. import AbstractInfoHandler
from data.models import UnprocessedShow, ShowType
class InfoHandler(AbstractInfoHandler):
_show_link_base = "http://anidb.net/perl-bin/animedb.pl?show=anime&aid={id}"
_show_link_matcher = "https?://anidb\\.net/a([0-9]+)|https?://anidb\\.net/perl-bin/animedb\\.pl\\?(?:[^/]+&)aid=([0-9]+)"
_season_url = "http://anidb.net/perl-bin/animedb.pl?show=calendar&tvseries=1&ova=1&last.anime.month=1&last.anime.year=2016"
_api_base = "http://api.anidb.net:9001/httpapi?client={client}&clientver={ver}&protover=1&request={request}"
def __init__(self):
super().__init__("anidb", "AniDB")
self.rate_limit_wait = 2
def get_link(self, link):
if link is None:
return None
return self._show_link_base.format(id=link.site_key)
def extract_show_id(self, url):
if url is not None:
match = re.match(self._show_link_matcher, url, re.I)
if match:
return match.group(1) or match.group(2)
return None
def get_episode_count(self, link, **kwargs):
return None
def get_show_score(self, show, link, **kwargs):
return None
def get_seasonal_shows(self, year=None, season=None, **kwargs):
return []
#TODO: use year and season if provided
debug("Getting season shows: year={}, season={}".format(year, season))
# Request season page from AniDB
response = self._site_request(self._season_url, **kwargs)
if response is None:
error("Cannot get show list")
return list()
# Parse page
shows_list = response.select(".calendar_all .g_section.middle .content .box")
new_shows = list()
for show in shows_list:
top = show.find(class_="top")
title_e = top.find("a")
title = str(title_e.string)
title = _normalize_title(title)
show_link = title_e["href"]
key = re.search("aid=([0-9]+)", show_link).group(1)
data = show.find(class_="data")
more_names = list()
show_info_str = data.find(class_="series").string.strip()
debug("Show info: {}".format(show_info_str))
show_info = show_info_str.split(", ")
show_type = _convert_show_type(show_info[0])
if len(show_info) == 1:
episode_count = 1
else:
ec_match = re.match("([0-9]+) eps", show_info[1])
episode_count = int(ec_match.group(1)) if ec_match else None
tags = data.find(class_="tags")
has_source = tags.find("a", string=re.compile("manga|novel|visual novel")) is not None
new_shows.append(UnprocessedShow(self.key, key, title, more_names, show_type, episode_count, has_source))
return new_shows
def find_show(self, show_name, **kwargs):
return list()
def find_show_info(self, show_id, **kwargs):
return None
def _site_request(self, url, **kwargs):
return self.request(url, html=True, **kwargs)
def _convert_show_type(type_str):
type_str = type_str.lower()
if type_str == "tv series":
return ShowType.TV
if type_str == "movie":
return ShowType.MOVIE
if type_str == "ova":
return ShowType.OVA
return ShowType.UNKNOWN
def _normalize_title(title):
year_match = re.match("(.*) \([0-9]+\)", title)
if year_match:
title = year_match.group(1)
title = re.sub(": second season", " 2nd Season", title, flags=re.I)
title = re.sub(": third season", " 3rd Season", title, flags=re.I)
title = re.sub(": fourth season", " 4th Season", title, flags=re.I)
title = re.sub(": fifth season", " 5th Season", title, flags=re.I)
title = re.sub(": sixth season", " 6th Season", title, flags=re.I)
return title
| {
"repo_name": "andrewhillcode/holo",
"path": "src/services/info/anidb.py",
"copies": "2",
"size": "3784",
"license": "mit",
"hash": 7516533145852217000,
"line_mean": 31.9043478261,
"line_max": 124,
"alpha_frac": 0.6649048626,
"autogenerated": false,
"ratio": 2.7905604719764012,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44554653345764006,
"avg_score": null,
"num_lines": null
} |
# api/__init__.py
from flask import Flask, request
from flask_sqlalchemy import SQLAlchemy
from config import config
db = SQLAlchemy()
def add_cors_headers(response, ):
response.headers.add('Access-Control-Allow-Origin', '*')
if request.method == 'OPTIONS':
response.headers['Access-Control-Allow-Methods'] = 'DELETE, GET, POST, PUT'
headers = request.headers.get('Access-Control-Request-Headers')
if headers:
response.headers['Access-Control-Allow-Headers'] = headers
# print("Response: ", )
return response
def create_app(config_name):
"""Application factory function for app instance creation."""
app = Flask(__name__)
app.config.from_object(config[config_name])
db.init_app(app)
# from api.resources import api
# api.init_app(app)
from api.resources.api_auth_v1 import auth_bp
from api.resources import api_v1
app.register_blueprint(auth_bp, url_prefix='/auth')
app.register_blueprint(api_v1, url_prefix='/api/v1')
return app
| {
"repo_name": "Mbarak-Mbigo/cp2_bucketlist",
"path": "api/__init__.py",
"copies": "1",
"size": "1046",
"license": "mit",
"hash": -4063555159342365000,
"line_mean": 29.7647058824,
"line_max": 83,
"alpha_frac": 0.6711281071,
"autogenerated": false,
"ratio": 3.762589928057554,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9834335787329417,
"avg_score": 0.01987644956562747,
"num_lines": 34
} |
""" API integration for osu!
Adds Mods enums with raw value calculations and some
request functions.
"""
from collections import namedtuple
from enum import Enum
import re
from pcbot import utils
api_url = "https://osu.ppy.sh/api/"
api_key = ""
requests_sent = 0
ripple_url = "https://ripple.moe/api/"
ripple_pattern = re.compile(r"ripple:\s*(?P<data>.+)")
mode_names = {
"Standard": ["standard", "osu"],
"Taiko": ["taiko"],
"Catch": ["catch", "ctb", "fruits"],
"Mania": ["mania", "keys"]
}
def set_api_key(s: str):
""" Set the osu! API key. This simplifies every API function as they
can exclude the "k" parameter.
"""
global api_key
api_key = s
class GameMode(Enum):
""" Enum for gamemodes. """
Standard = 0
Taiko = 1
Catch = 2
Mania = 3
@classmethod
def get_mode(cls, mode: str):
""" Return the mode with the specified string. """
for mode_name, names in mode_names.items():
for name in names:
if name.startswith(mode.lower()):
return GameMode.__members__[mode_name]
return None
class Mods(Enum):
""" Enum for displaying mods. """
NF = 0
EZ = 1
TD = 2
HD = 3
HR = 4
SD = 5
DT = 6
RX = 7
HT = 8
NC = 9
FL = 10
AU = 11
SO = 12
AP = 13
PF = 14
Key4 = 15
Key5 = 16
Key6 = 17
Key7 = 18
Key8 = 19
FI = 20
RD = 21
Cinema = 22
Key9 = 24
KeyCoop = 25
Key1 = 26
Key3 = 27
Key2 = 28
ScoreV2 = 29
LastMod = 30
KeyMod = Key4 | Key5 | Key6 | Key7 | Key8
FreeModAllowed = NF | EZ | HD | HR | SD | FL | FI | RX | AP | SO | KeyMod # ¯\_(ツ)_/¯
ScoreIncreaseMods = HD | HR | DT | FL | FI
def __new__(cls, num):
""" Convert the given value to 2^num. """
obj = object.__new__(cls)
obj._value_ = 2 ** num
return obj
@classmethod
def list_mods(cls, bitwise: int):
""" Return a list of mod enums from the given bitwise (enabled_mods in the osu! API) """
bin_str = str(bin(bitwise))[2:]
bin_list = [int(d) for d in bin_str[::-1]]
mods_bin = (pow(2, i) for i, d in enumerate(bin_list) if d == 1)
mods = [cls(mod) for mod in mods_bin]
# Manual checks for multiples
if Mods.DT in mods and Mods.NC in mods:
mods.remove(Mods.DT)
return mods
@classmethod
def format_mods(cls, mods):
""" Return a string with the mods in a sorted format, such as DTHD.
mods is either a bitwise or a list of mod enums.
"""
if type(mods) is int:
mods = cls.list_mods(mods)
assert type(mods) is list
return "".join((mod.name for mod in mods) if mods else ["Nomod"])
def def_section(api_name: str, first_element: bool=False):
""" Add a section using a template to simplify adding API functions. """
async def template(url=api_url, request_tries: int=1, **params):
global requests_sent
# Convert ripple id properly and change the url
if "u" in params:
ripple = ripple_pattern.match(params["u"])
if ripple:
params["u"] = ripple.group("data")
url = ripple_url
# Add the API key unless we're not sending to the official API
if url == api_url and "k" not in params:
params["k"] = api_key
# Download using a URL of the given API function name
for i in range(request_tries):
json = await utils.download_json(url + api_name, **params)
requests_sent += 1
if json is not None:
break
else:
return None
# Unless we want to extract the first element, return the entire object (usually a list)
if not first_element:
return json
# If the returned value should be the first element, see if we can cut it
return json[0] if len(json) > 0 else None
# Set the correct name of the function and add simple docstring
template.__name__ = api_name
template.__doc__ = "Get " + ("list" if not first_element else "dict") + " using " + api_url + api_name
return template
# Define all osu! API requests using the template
get_beatmaps = def_section("get_beatmaps")
get_user = def_section("get_user", first_element=True)
get_scores = def_section("get_scores")
get_user_best = def_section("get_user_best")
get_user_recent = def_section("get_user_recent")
get_match = def_section("get_match", first_element=True)
get_replay = def_section("get_replay")
beatmap_url_pattern_v1 = re.compile(r"https?://osu\.ppy\.sh/(?P<type>[bs])/(?P<id>\d+)(?:\?m=(?P<mode>\d))?")
beatmap_url_pattern_v2 = re.compile(r"https?://osu\.ppy\.sh/beatmapsets/(?P<beatmapset_id>\d+)(?:#(?P<mode>\w+)/(?P<beatmap_id>\d+))?")
BeatmapURLInfo = namedtuple("BeatmapURLInfo", "beatmapset_id beatmap_id gamemode")
def parse_beatmap_url(url: str):
""" Parse the beatmap url and return either a BeatmapURLInfo.
For V1, only one parameter of either beatmap_id or beatmapset_id will be set.
For V2, only beatmapset_id will be set, or all arguments are set.
:raise SyntaxError: The URL is neither a v1 or v2 osu! url.
"""
match_v1 = beatmap_url_pattern_v1.match(url)
if match_v1:
# There might be some gamemode info in the url
mode = None
if match_v1.group("mode") is not None:
mode = GameMode(int(match_v1.group("mode")))
if match_v1.group("type") == "b":
return BeatmapURLInfo(beatmapset_id=None, beatmap_id=match_v1.group("id"), gamemode=mode)
else:
return BeatmapURLInfo(beatmapset_id=match_v1.group("id"), beatmap_id=None, gamemode=mode)
match_v2 = beatmap_url_pattern_v2.match(url)
if match_v2:
if match_v2.group("mode") is None:
return BeatmapURLInfo(beatmapset_id=match_v2.group("beatmapset_id"), beatmap_id=None, gamemode=None)
else:
return BeatmapURLInfo(beatmapset_id=match_v2.group("beatmapset_id"),
beatmap_id=match_v2.group("beatmap_id"),
gamemode=GameMode.get_mode(match_v2.group("mode")))
raise SyntaxError("The given URL is invalid.")
async def beatmap_from_url(url: str, mode: GameMode=GameMode.Standard, *, return_type: str="beatmap"):
""" Takes a url and returns the beatmap in the specified gamemode.
If a url for a submission is given, it will find the most difficult map.
:param url: The osu! beatmap url to lookup.
:param mode: The GameMode to lookup.
:param return_type: Defaults to "beatmap". Use "id" to only return the id (spares a request for /b/ urls).
:raise SyntaxError: The URL is neither a v1 or v2 osu! url.
:raise LookupError: The beatmap linked in the URL was not found.
"""
beatmap_info = parse_beatmap_url(url)
# Get the beatmap specified
if beatmap_info.beatmap_id is not None:
if return_type == "id":
return beatmap_info.beatmap_id
# Only download the beatmap of the id, so that only this beatmap will be returned
difficulties = await get_beatmaps(b=beatmap_info.beatmap_id, m=mode.value, limit=1)
else:
difficulties = await get_beatmaps(s=beatmap_info.beatmapset_id, m=mode.value)
# If the beatmap doesn't exist, the operation was unsuccessful
if not difficulties:
raise LookupError("The beatmap with the given URL was not found.")
# Find the most difficult beatmap
beatmap = None
highest = -1
for diff in difficulties:
stars = float(diff["difficultyrating"])
if stars > highest:
beatmap, highest = diff, stars
if return_type == "id":
return beatmap["beatmap_id"]
return beatmap
async def beatmapset_from_url(url: str):
""" Takes a url and returns the beatmapset of the specified beatmap.
:param url: The osu! beatmap url to lookup.
:raise SyntaxError: The URL is neither a v1 or v2 osu! url.
:raise LookupError: The beatmap linked in the URL was not found.
"""
beatmap_info = parse_beatmap_url(url)
# Use the beatmapset_id from the url if it has one, else find the beatmapset
if beatmap_info.beatmapset_id is not None:
beatmapset_id = beatmap_info.beatmapset_id
else:
difficulty = await get_beatmaps(b=beatmap_info.beatmap_id, limit=1)
# If the beatmap doesn't exist, the operation was unsuccessful
if not difficulty:
raise LookupError("The beatmap with the given URL was not found.")
beatmapset_id = difficulty[0]["beatmapset_id"]
beatmapset = await get_beatmaps(s=beatmapset_id)
# Also make sure we get the beatmap
if not beatmapset:
raise LookupError("The beatmapset with the given URL was not found.")
return beatmapset
def lookup_beatmap(beatmaps: list, **lookup):
""" Finds and returns the first beatmap with the lookup specified.
Beatmaps is a list of beatmap dicts and could be used with get_beatmaps().
Lookup is any key stored in a beatmap from get_beatmaps().
"""
if not beatmaps:
return None
for beatmap in beatmaps:
match = True
for key, value in lookup.items():
if key.lower() not in beatmap:
raise KeyError("The list of beatmaps does not have key: {}".format(key))
if not beatmap[key].lower() == value.lower():
match = False
if match:
return beatmap
else:
return None
def rank_from_events(events: dict, beatmap_id: str):
""" Return the rank of the first score of given beatmap_id from a
list of events gathered via get_user().
"""
for event in events:
if event["beatmap_id"] == beatmap_id:
match = re.search(r"rank\s#(?P<rank>\d+)(?:<|\s)", event["display_html"])
if match:
return int(match.group("rank"))
else:
return None
| {
"repo_name": "PcBoy111/PCBOT",
"path": "plugins/osulib/api.py",
"copies": "1",
"size": "10082",
"license": "mit",
"hash": 3258670899188709400,
"line_mean": 31.0955414013,
"line_max": 135,
"alpha_frac": 0.6085532844,
"autogenerated": false,
"ratio": 3.3909825033647376,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4499535787764738,
"avg_score": null,
"num_lines": null
} |
""" API integration for osu!
Adds Mods enums with raw value calculations and some
request functions.
"""
import logging
import re
from collections import namedtuple
from enum import Enum
from pcbot import utils
api_url = "https://osu.ppy.sh/api/"
api_key = ""
requests_sent = 0
ripple_url = "https://ripple.moe/api/"
ripple_pattern = re.compile(r"ripple:\s*(?P<data>.+)")
mode_names = {
"Standard": ["standard", "osu"],
"Taiko": ["taiko"],
"Catch": ["catch", "ctb", "fruits"],
"Mania": ["mania", "keys"]
}
def set_api_key(s: str):
""" Set the osu! API key. This simplifies every API function as they
can exclude the "k" parameter.
"""
global api_key
api_key = s
class GameMode(Enum):
""" Enum for gamemodes. """
Standard = 0
Taiko = 1
Catch = 2
Mania = 3
@classmethod
def get_mode(cls, mode: str):
""" Return the mode with the specified string. """
for mode_name, names in mode_names.items():
for name in names:
if name.startswith(mode.lower()):
return GameMode.__members__[mode_name]
return None
class Mods(Enum):
""" Enum for displaying mods. """
NF = 0
EZ = 1
TD = 2
HD = 3
HR = 4
SD = 5
DT = 6
RX = 7
HT = 8
NC = 9
FL = 10
AU = 11
SO = 12
AP = 13
PF = 14
Key4 = 15
Key5 = 16
Key6 = 17
Key7 = 18
Key8 = 19
FI = 20
RD = 21
Cinema = 22
Key9 = 24
KeyCoop = 25
Key1 = 26
Key3 = 27
Key2 = 28
ScoreV2 = 29
LastMod = 30
KeyMod = Key4 | Key5 | Key6 | Key7 | Key8
FreeModAllowed = NF | EZ | HD | HR | SD | FL | FI | RX | AP | SO | KeyMod # ¯\_(ツ)_/¯
ScoreIncreaseMods = HD | HR | DT | FL | FI
def __new__(cls, num):
""" Convert the given value to 2^num. """
obj = object.__new__(cls)
obj._value_ = 2 ** num
return obj
@classmethod
def list_mods(cls, bitwise: int):
""" Return a list of mod enums from the given bitwise (enabled_mods in the osu! API) """
bin_str = str(bin(bitwise))[2:]
bin_list = [int(d) for d in bin_str[::-1]]
mods_bin = (pow(2, i) for i, d in enumerate(bin_list) if d == 1)
mods = [cls(mod) for mod in mods_bin]
# Manual checks for multiples
if Mods.DT in mods and Mods.NC in mods:
mods.remove(Mods.DT)
return mods
@classmethod
def format_mods(cls, mods):
""" Return a string with the mods in a sorted format, such as DTHD.
mods is either a bitwise or a list of mod enums.
"""
if type(mods) is int:
mods = cls.list_mods(mods)
assert type(mods) is list
return "".join((mod.name for mod in mods) if mods else ["Nomod"])
def def_section(api_name: str, first_element: bool=False):
""" Add a section using a template to simplify adding API functions. """
async def template(url=api_url, request_tries: int=1, **params):
global requests_sent
# Convert ripple id properly and change the url
if "u" in params:
ripple = ripple_pattern.match(params["u"])
if ripple:
params["u"] = ripple.group("data")
url = ripple_url
# Add the API key unless we're not sending to the official API
if url == api_url and "k" not in params:
params["k"] = api_key
# Download using a URL of the given API function name
for i in range(request_tries):
try:
json = await utils.download_json(url + api_name, **params)
except ValueError as e:
logging.warning("ValueError Calling {}: {}".format(url + api_name, e))
else:
requests_sent += 1
if json is not None:
break
else:
return None
# Unless we want to extract the first element, return the entire object (usually a list)
if not first_element:
return json
# If the returned value should be the first element, see if we can cut it
return json[0] if len(json) > 0 else None
# Set the correct name of the function and add simple docstring
template.__name__ = api_name
template.__doc__ = "Get " + ("list" if not first_element else "dict") + " using " + api_url + api_name
return template
# Define all osu! API requests using the template
get_beatmaps = def_section("get_beatmaps")
get_user = def_section("get_user", first_element=True)
get_scores = def_section("get_scores")
get_user_best = def_section("get_user_best")
get_user_recent = def_section("get_user_recent")
get_match = def_section("get_match", first_element=True)
get_replay = def_section("get_replay")
beatmap_url_pattern_v1 = re.compile(r"https?://(osu|old)\.ppy\.sh/(?P<type>[bs])/(?P<id>\d+)(?:\?m=(?P<mode>\d))?")
beatmap_url_pattern_v2 = re.compile(r"https?://osu\.ppy\.sh/beatmapsets/(?P<beatmapset_id>\d+)(?:#(?P<mode>\w+)/(?P<beatmap_id>\d+))?")
BeatmapURLInfo = namedtuple("BeatmapURLInfo", "beatmapset_id beatmap_id gamemode")
def parse_beatmap_url(url: str):
""" Parse the beatmap url and return either a BeatmapURLInfo.
For V1, only one parameter of either beatmap_id or beatmapset_id will be set.
For V2, only beatmapset_id will be set, or all arguments are set.
:raise SyntaxError: The URL is neither a v1 or v2 osu! url.
"""
match_v1 = beatmap_url_pattern_v1.match(url)
if match_v1:
# There might be some gamemode info in the url
mode = None
if match_v1.group("mode") is not None:
mode = GameMode(int(match_v1.group("mode")))
if match_v1.group("type") == "b":
return BeatmapURLInfo(beatmapset_id=None, beatmap_id=match_v1.group("id"), gamemode=mode)
else:
return BeatmapURLInfo(beatmapset_id=match_v1.group("id"), beatmap_id=None, gamemode=mode)
match_v2 = beatmap_url_pattern_v2.match(url)
if match_v2:
if match_v2.group("mode") is None:
return BeatmapURLInfo(beatmapset_id=match_v2.group("beatmapset_id"), beatmap_id=None, gamemode=None)
else:
return BeatmapURLInfo(beatmapset_id=match_v2.group("beatmapset_id"),
beatmap_id=match_v2.group("beatmap_id"),
gamemode=GameMode.get_mode(match_v2.group("mode")))
raise SyntaxError("The given URL is invalid.")
async def beatmap_from_url(url: str, mode: GameMode=GameMode.Standard, *, return_type: str="beatmap"):
""" Takes a url and returns the beatmap in the specified gamemode.
If a url for a submission is given, it will find the most difficult map.
:param url: The osu! beatmap url to lookup.
:param mode: The GameMode to lookup.
:param return_type: Defaults to "beatmap". Use "id" to only return the id (spares a request for /b/ urls).
:raise SyntaxError: The URL is neither a v1 or v2 osu! url.
:raise LookupError: The beatmap linked in the URL was not found.
"""
beatmap_info = parse_beatmap_url(url)
# Get the beatmap specified
if beatmap_info.beatmap_id is not None:
if return_type == "id":
return beatmap_info.beatmap_id
# Only download the beatmap of the id, so that only this beatmap will be returned
difficulties = await get_beatmaps(b=beatmap_info.beatmap_id, m=mode.value, limit=1)
else:
difficulties = await get_beatmaps(s=beatmap_info.beatmapset_id, m=mode.value)
# If the beatmap doesn't exist, the operation was unsuccessful
if not difficulties:
raise LookupError("The beatmap with the given URL was not found.")
# Find the most difficult beatmap
beatmap = None
highest = -1
for diff in difficulties:
stars = float(diff["difficultyrating"])
if stars > highest:
beatmap, highest = diff, stars
if return_type == "id":
return beatmap["beatmap_id"]
return beatmap
async def beatmapset_from_url(url: str):
""" Takes a url and returns the beatmapset of the specified beatmap.
:param url: The osu! beatmap url to lookup.
:raise SyntaxError: The URL is neither a v1 or v2 osu! url.
:raise LookupError: The beatmap linked in the URL was not found.
"""
beatmap_info = parse_beatmap_url(url)
# Use the beatmapset_id from the url if it has one, else find the beatmapset
if beatmap_info.beatmapset_id is not None:
beatmapset_id = beatmap_info.beatmapset_id
else:
difficulty = await get_beatmaps(b=beatmap_info.beatmap_id, limit=1)
# If the beatmap doesn't exist, the operation was unsuccessful
if not difficulty:
raise LookupError("The beatmap with the given URL was not found.")
beatmapset_id = difficulty[0]["beatmapset_id"]
beatmapset = await get_beatmaps(s=beatmapset_id)
# Also make sure we get the beatmap
if not beatmapset:
raise LookupError("The beatmapset with the given URL was not found.")
return beatmapset
def lookup_beatmap(beatmaps: list, **lookup):
""" Finds and returns the first beatmap with the lookup specified.
Beatmaps is a list of beatmap dicts and could be used with get_beatmaps().
Lookup is any key stored in a beatmap from get_beatmaps().
"""
if not beatmaps:
return None
for beatmap in beatmaps:
match = True
for key, value in lookup.items():
if key.lower() not in beatmap:
raise KeyError("The list of beatmaps does not have key: {}".format(key))
if not beatmap[key].lower() == value.lower():
match = False
if match:
return beatmap
else:
return None
def rank_from_events(events: dict, beatmap_id: str):
""" Return the rank of the first score of given beatmap_id from a
list of events gathered via get_user().
"""
for event in events:
if event["beatmap_id"] == beatmap_id:
match = re.search(r"rank\s#(?P<rank>\d+)(?:<|\s)", event["display_html"])
if match:
return int(match.group("rank"))
else:
return None
| {
"repo_name": "PcBoy111/PC-BOT-V2",
"path": "plugins/osulib/api.py",
"copies": "2",
"size": "10277",
"license": "mit",
"hash": 6150155475992589000,
"line_mean": 31.2037617555,
"line_max": 135,
"alpha_frac": 0.6057626789,
"autogenerated": false,
"ratio": 3.4140910601528747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5019853739052875,
"avg_score": null,
"num_lines": null
} |
"""API interactions with community.lsst.org API."""
import os
import csv
import json
import requests
BASE_URL = 'https://community.lsst.org'
KEY = os.getenv('DISCOURSE_KEY')
USER = os.getenv('DISCOURSE_USER')
if KEY is None or USER is None:
print('Please set $DISCOURSE_KEY and $DISCOURSE_USER environment '
'variables.')
def get(path, params=None):
"""Generic GET against the Discourse API."""
if not path.startswith('/'):
path = '/' + path
_params = {'api_key': KEY, 'api_user': USER}
if params is not None:
_params.update(params)
r = requests.get(BASE_URL + path, params=_params)
print(r.status_code)
return r
def put(path, data=None, params=None):
"""Generic PUT against the Discourse API."""
if not path.startswith('/'):
path = '/' + path
_params = {'api_key': KEY, 'api_user': USER}
if params is not None:
_params.update(params)
r = requests.put(BASE_URL + path, data=data, params=_params)
print(r.status_code)
return r
def post(path, data=None):
"""Generic POST against the Discourse API."""
if not path.startswith('/'):
path = '/' + path
_data = {'api_key': KEY, 'api_user': USER}
if data is not None:
_data.update(data)
print(_data)
session = requests.session()
r = session.post(BASE_URL + path, data=_data)
print(r.status_code)
return r
def all_users(export_csv_path):
"""Iterate over all API users.
Yields
------
user : DiscourseUser
A discourse user
"""
exported_users = ExportList(export_csv_path)
for u in exported_users.users:
yield DiscourseUser.from_username(u.username, email=u.email)
# base_path = '/groups/trust_level_0/members.json?limit=50&offset=50'
# limit = 50
# offset = 0
# while True:
# r = get(base_path, params={'limit': limit, offset: offset})
# data = r.json()
# for user_json in data['members']:
# print(user_json['username'])
# yield DiscourseUser.from_username(user_json['username'])
# if len(data['members']) == limit:
# offset += limit
# else:
# break
def resolve_group_id(group_name):
"""Get the ID of a group, given its name."""
r = get('/admin/groups.json')
for g in r.json():
if g['name'] == group_name:
print(g.keys())
group_id = g['id']
return group_id
raise RuntimeError('Group {0} no found'.format(group_name))
def get_category_ids():
path = '/categories.json'
r = get(path)
data = r.json()
categories = {c['name']: c['id']
for c in data['category_list']['categories']}
return categories
class DiscourseUser(object):
"""A user in Discourse."""
def __init__(self, json_data=None, email=None):
super().__init__()
self.data = json_data
self._email = email
def __str__(self):
return 'DiscourseUser(self.username, self.email)'.format(self=self)
@classmethod
def from_username(cls, username, email=None):
"""Get a user from their username. Set email since it is not available
via the API.
"""
r = get('/users/{0}.json'.format(username))
assert r.status_code == 200
return cls(json_data=r.json(), email=email)
@property
def email(self):
try:
return self.data['user']['email']
except KeyError:
return self._email
@property
def username(self):
return self.data['user']['username']
@property
def groups(self):
return self.data['user']['groups']
def add_to_group(self, group_name):
group_id = resolve_group_id(group_name)
path = '/admin/groups/{group_id}/members.json'.format(
group_name=group_name, group_id=group_id)
payload = {'usernames': self.username}
r = put(path, data=payload)
return r
def private_message(self, subject, content):
"""Send a user a private messsage."""
path = '/posts'
payload = {
'title': subject,
'raw': content,
'archetype': 'private_message',
'target_usernames': self.username
}
r = post(path, data=payload)
return r
class ExportUser(object):
"""A User in the Discourse CSV export file."""
def __init__(self, *args):
super().__init__()
self.user_id = int(args[0])
self.name = args[1]
self.username = args[2]
self.email = args[3]
self.title = args[4]
self.created_at = args[5]
self.last_seen_at = args[6]
self.last_posted_at = args[7]
self.last_emailed_at = args[8]
self.trust_level = int(args[9])
self.approved = json.loads(args[10])
self.suspended_at = args[11]
self.suspended_till = args[12]
self.blocked = json.loads(args[13])
self.active = json.loads(args[14])
self.admin = json.loads(args[15])
self.moderator = json.loads(args[16])
self.ip_address = args[17]
self.topics_entered = args[18]
self.posts_read_count = int(args[19])
self.time_read = int(args[20])
self.topic_count = int(args[21])
self.post_count = int(args[22])
self.likes_given = int(args[23])
self.likes_received = int(args[24])
self.institution = args[25]
self.group_names = [s for s in args[26].split(';')]
def __str__(self):
s = 'ExportUser({self.user_id}, {self.username}, {self.email})'.format(
self=self)
return s
class ExportList(object):
"""User list from a CSV export
https://community.lsst.org/admin/users/list/active
"""
def __init__(self, csv_path):
super().__init__()
self.users = []
with open(csv_path, encoding='utf-8') as f:
reader = csv.reader(f)
next(reader) # skip header
for row in reader:
if len(row) == 1:
continue
p = ExportUser(*[s.strip() for s in row[:27]])
self.users.append(p)
def find_by_email(self, email):
for u in self.users:
if u.email == email:
return u
return None
def find_by_username(self, username):
for u in self.users:
if u.username == username:
return u
return None
| {
"repo_name": "lsst-sqre/community_api_notebooks",
"path": "community/api.py",
"copies": "1",
"size": "6518",
"license": "mit",
"hash": -811355343041001100,
"line_mean": 27.8407079646,
"line_max": 79,
"alpha_frac": 0.5599877263,
"autogenerated": false,
"ratio": 3.623123957754308,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4683111684054308,
"avg_score": null,
"num_lines": null
} |
"""API interface."""
from http import HTTPStatus
import logging
import requests
from flask_forecaster.tracker.models import ProjectSnapshot
logger = logging.getLogger(__name__)
class Tracker(object):
"""Represents the API and exposes appropriate methods."""
BASE_URL = 'https://www.pivotaltracker.com/services/v5/'
"""Base URL for the Tracker API."""
def __init__(self, token):
self.token = token
self.headers = self._create_headers(token)
def get_project(self, project_id):
"""Get the data for a specified project.
Arguments:
project_id (:py:class:`int`): The ID of the project.
Returns:
:py:class:`dict`: The JSON project data.
"""
response = requests.get(
self.BASE_URL + 'projects/{}'.format(project_id),
headers=self.headers,
)
result = self._handle_response(response)
if result is not None and 'error' not in result:
return result
def get_project_history(self, project_id, convert=False):
"""Get the history for a specified project.
Arguments:
project_id (:py:class:`int` or :py:class:`str`): The ID of
the project.
convert (:py:class:`bool`, optional): Whether to convert the
JSON data into model objects (defaults to ``False``).
Returns:
:py:class:`list`: The history data.
"""
response = requests.get(
self.BASE_URL + 'projects/{}/history/snapshots'.format(project_id),
headers=self.headers,
)
result = self._handle_response(response)
if result is not None and 'error' not in result:
if not convert:
return result
return [ProjectSnapshot.from_response(data) for data in result]
@staticmethod
def _handle_response(response):
"""Handle the standard response pattern."""
if response.status_code == HTTPStatus.OK:
result = response.json()
if 'error' in result:
logger.warning('API call failed with error %s', result['error'])
return result
else:
logging.warning('request failed with code %s', response.status_code)
@classmethod
def validate_token(cls, token):
"""Validate the supplied token.
Arguments:
token (:py:class:`str`): The token to validate.
Returns:
:py:class:`list`: The user's projects, or `None` if the token
is invalid.
"""
response = requests.get(
cls.BASE_URL + 'me',
headers=Tracker._create_headers(token),
)
result = cls._handle_response(response)
if result is not None:
return result.get('projects')
@classmethod
def _create_headers(cls, token):
"""Create the default headers."""
return {'X-TrackerToken': token}
@classmethod
def from_untrusted_token(cls, token):
"""Generate a new instance from a potentially-invalid token.
Arguments:
token (:py:class:`str`): The token to validate.
Returns:
:py:class:`Tracker`: The API instance.
Raises:
:py:class:`ValueError`: If the token isn't valid.
"""
if cls.validate_token(token) is None:
raise ValueError('invalid token {}'.format(token))
return cls(token)
| {
"repo_name": "textbook/flask-forecaster",
"path": "flask_forecaster/tracker/api.py",
"copies": "1",
"size": "3461",
"license": "isc",
"hash": -6131895690925394000,
"line_mean": 28.5811965812,
"line_max": 80,
"alpha_frac": 0.5827795435,
"autogenerated": false,
"ratio": 4.420178799489144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5502958342989144,
"avg_score": null,
"num_lines": null
} |
# api_internal_cache/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.db import models
from django.utils.timezone import now
from django.db.models import Q
from datetime import timedelta
import json
from wevote_functions.functions import positive_value_exists
class ApiInternalCacheManager(models.Manager):
def __unicode__(self):
return "ApiInternalCacheManager"
def create_api_internal_cache(
self,
api_name='',
cached_api_response_serialized='',
election_id_list_serialized='',
date_cached=None):
status = ''
api_internal_cache = None
api_internal_cache_id = 0
api_internal_cache_saved = False
if not positive_value_exists(api_name):
status += "API_INTERNAL_CACHE_MISSING_API_NAME "
results = {
'success': False,
'status': status,
'api_internal_cache_saved': api_internal_cache_saved,
'api_internal_cache': api_internal_cache,
'api_internal_cache_id': 0,
}
return results
if date_cached is None:
date_cached = now()
try:
api_internal_cache = ApiInternalCache.objects.create(
api_name=api_name,
cached_api_response_serialized=cached_api_response_serialized,
date_cached=date_cached,
election_id_list_serialized=election_id_list_serialized,
)
api_internal_cache_saved = True
api_internal_cache_id = api_internal_cache.id
success = True
status += "API_INTERNAL_CACHE_CREATED "
except Exception as e:
api_internal_cache_saved = False
success = False
status += "API_INTERNAL_CACHE_NOT_CREATED " + str(e) + ' '
results = {
'success': success,
'status': status,
'api_internal_cache_saved': api_internal_cache_saved,
'api_internal_cache': api_internal_cache,
'api_internal_cache_id': api_internal_cache_id,
}
return results
def create_api_refresh_request(
self,
api_name='',
election_id_list_serialized='',
date_refresh_is_needed=None):
status = ''
api_refresh_request = None
api_refresh_request_saved = False
if not positive_value_exists(api_name):
status += "API_REFRESH_REQUEST_MISSING_API_NAME "
results = {
'success': False,
'status': status,
'api_refresh_request_saved': api_refresh_request_saved,
'api_refresh_request': api_refresh_request,
}
return results
if date_refresh_is_needed is None:
date_refresh_is_needed = now()
try:
api_refresh_request = ApiRefreshRequest.objects.create(
api_name=api_name,
election_id_list_serialized=election_id_list_serialized,
date_refresh_is_needed=date_refresh_is_needed,
date_scheduled=now(),
)
api_refresh_request_saved = True
success = True
status += "API_REFRESH_REQUEST_CREATED "
except Exception as e:
api_refresh_request_saved = False
success = False
status += "API_REFRESH_REQUEST_NOT_CREATED " + str(e) + ' '
results = {
'success': success,
'status': status,
'api_refresh_request_saved': api_refresh_request_saved,
'api_refresh_request': api_refresh_request,
}
return results
def does_api_refresh_request_exist_in_future(
self,
api_name='',
election_id_list_serialized=''):
api_refresh_request_found = False
status = ''
success = True
try:
query = ApiRefreshRequest.objects.filter(
api_name__iexact=api_name,
date_refresh_is_needed__gt=now(),
election_id_list_serialized__iexact=election_id_list_serialized,
refresh_completed=False)
number_found = query.count()
if positive_value_exists(number_found):
api_refresh_request_found = True
except ApiRefreshRequest.DoesNotExist:
status += "API_REFRESH_REQUEST_IN_FUTURE_NOT_FOUND "
except Exception as e:
success = False
status += 'API_REFRESH_REQUEST_IN_FUTURE_ERROR ' + str(e) + ' '
results = {
'success': success,
'status': status,
'api_refresh_request_found': api_refresh_request_found,
}
return results
def mark_prior_api_internal_cache_entries_as_replaced(
self,
api_name="",
election_id_list_serialized="",
excluded_api_internal_cache_id=0):
status = ''
success = True
if positive_value_exists(excluded_api_internal_cache_id):
try:
query = ApiInternalCache.objects.filter(
api_name__iexact=api_name,
election_id_list_serialized__iexact=election_id_list_serialized,
replaced=False)
query = query.exclude(id=excluded_api_internal_cache_id)
number_updated = query.update(
replaced=True,
date_replaced=now(),
)
status += "API_INTERNAL_CACHE_MARK_REPLACED_COUNT: " + str(number_updated) + " "
except Exception as e:
success = False
status += 'API_INTERNAL_CACHE_MARK_REPLACED_ERROR ' + str(e) + ' '
else:
status += 'MUST_SPECIFY_REPLACEMENT_CACHE '
success = False
results = {
'success': success,
'status': status,
}
return results
def mark_refresh_completed_for_prior_api_refresh_requested(
self,
api_name='',
election_id_list_serialized=''):
status = ''
try:
number_updated = ApiRefreshRequest.objects.filter(
api_name__iexact=api_name,
election_id_list_serialized__iexact=election_id_list_serialized,
date_refresh_is_needed__lte=now(),
refresh_completed=False)\
.update(
date_refresh_completed=now(),
refresh_completed=True)
success = True
status += "API_REFRESH_REQUESTED_MARK_REFRESH_COUNT: " + str(number_updated) + " "
except Exception as e:
success = False
status += 'MARK_REFRESH_COMPLETED_ERROR ' + str(e) + ' '
results = {
'success': success,
'status': status,
}
return results
def retrieve_next_api_refresh_request(self):
api_refresh_request = None
api_refresh_request_found = False
api_refresh_request_list = []
status = ''
try:
# Pick up Refresh requests that were started over 15 minutes ago, and not marked as refresh_completed since
fifteen_minutes_ago = now() - timedelta(minutes=15)
query = ApiRefreshRequest.objects.filter(
date_refresh_is_needed__lte=now(),
refresh_completed=False)
query = query.filter(Q(date_checked_out__isnull=True) | Q(date_checked_out__lte=fifteen_minutes_ago))
query = query.order_by('-date_refresh_is_needed')
api_refresh_request_list = list(query)
if len(api_refresh_request_list):
api_refresh_request = api_refresh_request_list[0]
api_refresh_request_found = True
success = True
except ApiRefreshRequest.DoesNotExist:
success = True
status += "RETRIEVE_NEXT_REFRESH_REQUEST_NOT_FOUND "
except Exception as e:
success = False
status += 'RETRIEVE_NEXT_REFRESH_REQUEST_ERROR ' + str(e) + ' '
results = {
'success': success,
'status': status,
'api_refresh_request': api_refresh_request,
'api_refresh_request_found': api_refresh_request_found,
'api_refresh_request_list': api_refresh_request_list,
}
return results
def retrieve_latest_api_internal_cache(
self,
api_name='',
election_id_list_serialized=''):
api_internal_cache = None
api_internal_cache_found = False
api_internal_cache_list = []
cached_api_response_json_data = {}
status = ''
if not positive_value_exists(api_name):
status += "RETRIEVE_LATEST_CACHE-MISSING_API_NAME "
results = {
'success': False,
'status': status,
'api_internal_cache': None,
'api_internal_cache_found': False,
'api_internal_cache_list': [],
'cached_api_response_json_data': {},
}
return results
try:
query = ApiInternalCache.objects.filter(
api_name__iexact=api_name,
election_id_list_serialized__iexact=election_id_list_serialized,
replaced=False)
query = query.exclude(cached_api_response_serialized='')
query = query.order_by('-date_cached')
api_internal_cache_list = list(query)
if len(api_internal_cache_list):
api_internal_cache = api_internal_cache_list[0]
api_internal_cache_found = True
if positive_value_exists(api_internal_cache.cached_api_response_serialized):
cached_api_response_json_data = api_internal_cache.cached_api_response_json_data()
success = True
except ApiInternalCache.DoesNotExist:
success = True
status += "RETRIEVE_LATEST_CACHE_NOT_FOUND "
except Exception as e:
success = False
status += 'RETRIEVE_LATEST_CACHE_ERROR ' + str(e) + ' '
results = {
'success': success,
'status': status,
'api_internal_cache': api_internal_cache,
'api_internal_cache_found': api_internal_cache_found,
'api_internal_cache_list': api_internal_cache_list,
'cached_api_response_json_data': cached_api_response_json_data,
}
return results
def schedule_refresh_of_api_internal_cache(
self,
api_name='',
election_id_list_serialized='',
api_internal_cache=None):
api_internal_cache_found = False
status = ''
success = True
if api_internal_cache and hasattr(api_internal_cache, 'api_name'):
# Work with this existing object
api_internal_cache_found = True
status += "API_INTERNAL_CACHE_PASSED_IN "
else:
status += "API_INTERNAL_CACHE_NOT_PASSED_IN "
results = self.retrieve_latest_api_internal_cache(
api_name=api_name,
election_id_list_serialized=election_id_list_serialized)
if results['api_internal_cache_found']:
api_internal_cache_found = True
api_internal_cache = results['api_internal_cache']
status += "API_INTERNAL_CACHE_RETRIEVED "
# Was there an existing api_internal_cache retrieved in the last 60 minutes?
# If not, schedule refresh immediately.
create_entry_immediately = False
if not api_internal_cache_found:
create_entry_immediately = True
elif api_internal_cache and hasattr(api_internal_cache, 'date_cached'):
sixty_minutes_ago = now() - timedelta(hours=1)
if api_internal_cache.date_cached < sixty_minutes_ago:
create_entry_immediately = True
if create_entry_immediately:
# We don't pass in date_refresh_is_needed, so it assumes value is "immediately"
results = self.create_api_refresh_request(
api_name=api_name,
election_id_list_serialized=election_id_list_serialized)
status += results['status']
# Do we have an ApiRefreshRequest entry scheduled in the future? If not, schedule one 55 minutes from now.
results = self.does_api_refresh_request_exist_in_future(
api_name=api_name,
election_id_list_serialized=election_id_list_serialized)
if not results['success']:
status += "NOT_ABLE_TO_SEE-(does_api_refresh_request_exist_in_future): " + str(results['status']) + " "
elif results['api_refresh_request_found']:
status += "API_REFRESH_REQUEST_FOUND "
else:
date_refresh_is_needed = now() + timedelta(minutes=55) # Schedule 55 minutes from now
results = self.create_api_refresh_request(
api_name=api_name,
election_id_list_serialized=election_id_list_serialized,
date_refresh_is_needed=date_refresh_is_needed)
status += results['status']
results = {
'success': success,
'status': status,
}
return results
class ApiInternalCache(models.Model):
"""
We pre-generate responses for API calls that take too long for a voter to wait.
"""
api_name = models.CharField(max_length=255, null=False, blank=True, default='')
election_id_list_serialized = models.TextField(null=False, default='')
# The full json response, serialized
cached_api_response_serialized = models.TextField(null=False, default='')
date_cached = models.DateTimeField(null=True, auto_now_add=True)
# If there is a newer version of this data, set "replaced" to True
replaced = models.BooleanField(default=False)
date_replaced = models.DateTimeField(null=True)
def cached_api_response_json_data(self):
if positive_value_exists(self.cached_api_response_serialized):
return json.loads(self.cached_api_response_serialized)
else:
return {}
class ApiRefreshRequest(models.Model):
"""
Our internal caching logic has determined that the import_batch_system should kick off a refresh of this data
"""
api_name = models.CharField(max_length=255, null=True, blank=True, unique=False)
election_id_list_serialized = models.TextField(null=True, blank=True)
# When was this scheduled for processing?
date_scheduled = models.DateTimeField(null=True, auto_now_add=True)
# When should the refresh take place? (i.e., any time after this date)
date_refresh_is_needed = models.DateTimeField(null=True)
# When did the scheduled process start?
date_checked_out = models.DateTimeField(null=True)
# When did the refresh finish?
date_refresh_completed = models.DateTimeField(null=True)
# A boolean to make it easy to figure out which refreshes have finished, and which one's haven't
refresh_completed = models.BooleanField(default=False)
| {
"repo_name": "wevote/WeVoteServer",
"path": "api_internal_cache/models.py",
"copies": "1",
"size": "15958",
"license": "mit",
"hash": -6409071588054139000,
"line_mean": 40.4493506494,
"line_max": 119,
"alpha_frac": 0.5511968918,
"autogenerated": false,
"ratio": 4.324661246612466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005806519264149785,
"num_lines": 385
} |
#API KEY
#AIzaSyBTK9GUrd7sMxjt6EUlHyN9TXPkqb6R0VA
########################
## IMPORT ##
########################
from flask import Flask, jsonify, render_template, request, url_for
from flask_jsglue import JSGlue
import time, os, requests, sqlite3, sys, re
from math import radians, cos, sin, asin, sqrt
from metar import Metar
import logging
########################
## CONFIG ##
########################
app = Flask(__name__)
JSGlue(app)
# ensure responses aren't cached
if app.config["DEBUG"]:
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
#Set up logging for Flask server
logging.getLogger('werkzeug').setLevel(logging.ERROR)
#Setup VatSee internal logging
logging.basicConfig(filename='vatsee.log', level=logging.DEBUG, format='%(levelname)s :: %(asctime)s :: %(message)s')
########################
## GLOBALS ##
########################
#Memoization for METAR and airline data
metars = {}
airlines = {}
#Waypoint Data will be stored in memory which significantly speeds up access
waypoints = {}
#List of countries (as Country objects) for purposes of testing whether a certian geographical point (that plane is at)
#is within that country
#nations = []
#Logging Messages written
log_msgs = []
#TO--DO LOGGING: log application has started
logging.info('Application started')
#Populate Waypoints into memory database
conn = sqlite3.connect('static_data.db')
c = conn.cursor()
result = c.execute("""SELECT * FROM "waypoints";""").fetchall()
#Store waypoints, for repeated waypoints :::: {'LINNG' : [ (-23, 43), (123, 56), (-43, 56)] etc.
for line in result:
#Create list if not present in dictionary already
if not(line[1] in waypoints):
waypoints[line[1]] = []
#Append to dictionary
waypoints[line[1]].append((float(line[2])/1000000.0, float(line[3])/1000000.0))
##############################################
###Standard function definitions start here###
##############################################
def add_to_log(message, value):
''' Recieves a message in text, and a value in LOGGING.x format, and writes to log if it wasnt already written '''
if not(message in log_msgs):
#Write to log
if value == logging.INFO:
logging.info(message)
elif value == logging.DEBUG:
logging.debug(message)
elif value == logging.ERROR:
logging.error(message)
elif value == logging.WARNING:
logging.warning(message)
log_msgs.append(message)
def callsign_to_icao(callsign):
''' Gets a IATA callsign like SEA and return ICAO callsign like KSEA'''
#Something like "ASIA"
#TO--DO LOGGING: Log an unnatural callsign (4 letter callsigns are not normal)
if not(callsign):
return None
if len(callsign.split("_")[0]) == 4:
return callsign.split("_")[0]
add_to_log('Unknown IATA from callsign_to_icao (4 letters long) - "%s"' % callsign, logging.ERROR)
else:
conn = sqlite3.connect('static_data.db')
c = conn.cursor()
apt_code = tuple([callsign.split("_")[0]])
result = c.execute("SELECT * FROM 'airports' WHERE iata=? LIMIT 1", apt_code).fetchone()
try:
return result[5]
except TypeError:
#No results were found
return callsign.split("_")[0]
add_to_log('Unknown IATA from callsign_to_icao. Not found in database - "%s"' % callsign, logging.ERROR)
def callsign_to_ATC(callsign):
'''Gets callsign like 'ATL_GND' and returns a type code:
0 ATIS 1 CLNC 2 GND 3 TWR 4 APP/DEP 5 CTR 6 UNKNOWN '''
codes = {"ATIS": 0, "DEL" : 1, "GND" : 2, "TWR" : 3, "APP" : 4, "DEP" : 4, "CTR" : 5, "OBS" : 6}
if callsign.split("_")[-1] in codes:
#Foudn the type, return the proper code (This is used by front end to display )
return codes[callsign.split("_")[-1]]
else:
#Unknown type, so it will be passed an unknown code!
#TO--DO LOGGING: LOG THIS HERE, what ATC is this!!?
add_to_log('Unknown ATC type from callsign_to_ATC - "%s"' % callsign, logging.ERROR)
return 6
def callsign_to_loc(callsign):
'''Function receives a callsign like "ATL_W_GND" or "KATL_CTR" and returns a geographical location for airport'''
#Database connection
conn = sqlite3.connect('static_data.db')
c = conn.cursor()
if callsign is None or callsign.lower() == "none":
return None
#Determine the code friom the string
apt_code = tuple([callsign.split("_")[0]])
if len(apt_code[0]) == 3:
#IATA code provided
c.execute("SELECT * FROM 'airports' WHERE iata=?", apt_code)
elif len(apt_code[0]) == 4:
#ICAO code provided
c.execute("SELECT * FROM 'airports' WHERE icao=?", apt_code)
else:
#TO--DO LOGGING: log this unknown airport, this also messes with the map, so its important to add these airports later!
logging.error('Unknown prefix - cannot find geographical location for callsign from callsign_to_loc - "%s"' % callsign)
return None
result = c.fetchone()
try:
#attempt to return data
return tuple(list(result[6:9]) + [result[1]])
except:
#Likely result was none (aka nothing found in DB)
add_to_log('Could not find callsign in database even though it looks proper ICAO/IATA in callsign_to_loc - "%s"' % callsign, logging.WARNING)
return None
def flightlevel_to_feet(flightlevel):
'''Function recieves something like 'FL360' and returns 36000'''
if not(flightlevel):
return 0
flightlevel = str(flightlevel).lower()
if "fl" in flightlevel or "f" in flightlevel:
return int(flightlevel.replace("fl", "").replace("f", "")) * 100
else:
#Some pilots file in feet rather than Flight Level, so just attempt to return the integer (eg. 33000)
try:
return int(flightlevel.replace("ft", ""))
except ValueError:
#TO--DO LOGGING: unknown altitude filed
add_to_log('Could not convert filed altitude to numeric in flightlevel_to_feet - "%s"' % flightlevel, logging.INFO)
return 0
def decode_airline(callsign):
'''Gets a name like 'BAW156' or 'BA156' and returns a tuple such as ('British Airways', 'UK', 'Speedbird', 'BAW', '156')'''
#Check for VFR tail numbers
if re.findall(r"^[A-Z][A-Z0-9]{3,5}$", callsign.replace('-', '')):
return (callsign, callsign, callsign, callsign, callsign)
#Get flight number and airline
airline_letter = re.findall(r"^[A-Z]*", callsign)
if airline_letter:
airline_letter = airline_letter[0]
else:
#Couldnt find the callsign, so quit
add_to_log('Airline callsign provided to decode_airline not parsable - "%s"' % callsign, logging.WARNING)
return (callsign, callsign, callsign, callsign, callsign)
#Try to find airline letter, starting at the position AFTER the airline letter ends (eg. BAW191, start at '1')
airline_num = callsign[len(airline_letter):]
if not(airline_num):
add_to_log('Airline flight number could not be parsed by decode_airline - "%s"' % callsign, logging.WARNING)
return (callsign, callsign, callsign, callsign, callsign)
#Now look in memoized data, and if not found then the DB
if airline_letter in airlines:
row = airlines[airline_letter]
else:
#Search the database
conn = sqlite3.connect('static_data.db')
c = conn.cursor()
result = c.execute("SELECT * FROM 'airlinecodes' WHERE iata=? OR icao=?", (airline_letter, airline_letter)).fetchone()
if result:
#Add to cached data
airlines[airline_letter] = (result[3], result[5], result[4])
row = airlines[airline_letter]
else:
add_to_log('Airline callsign not found in database - "%s"' % callsign, logging.WARNING)
return (callsign, callsign, callsign, callsign, callsign)
return (row[0], row[1], row[2], airline_letter, airline_num)
def haversine(lon1, lat1, lon2, lat2):
"""
Haversine formula for calculating the great circle distance between two points
on earth. From https://stackoverflow.com/questions/15736995/how-can-i-quickly-estimate-the-distance-between-two-latitude-longitude-points
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
km = 6367 * c
return km
def get_center_coords(name):
''' Returns center coordinates as list of tuples given center name, or returns None (for logging) '''
#Check for empty string or something
if not(name):
return None
#Name might be "LAX" or somethng like "LAX_EAST", in which case we need to search both
conn = sqlite3.connect('static_data.db')
c = conn.cursor()
result = c.execute("SELECT * FROM 'centers' WHERE name=? OR name=?", (name, name.split('_')[0])).fetchone()
if result:
#return the result - [ (x, y), (x, y), (x, y) ... ]
return [(line.split(',')[0], line.split(',')[1]) for line in result[2].split("\n") if line]
else:
#No result, return none
return None
def get_METAR(given_code):
'''Returns METAR data from aviation weather website from US Government; code is a METAR weather station code.
Uses 30 minute caching to prevent overaccessing of the Aviation Weather database'''
#Convert to upper case
given_code = given_code.upper()
if not(given_code) or len(given_code) != 4:
return None
#Check the cache
if metars.get(given_code):
#if younger than 1800 seconds, return from cache
if abs(metars[given_code][0] - time.time()) < 1800:
#Return the saved information, indicating that it is indeed from the cache (more for debugging purposes)
metars[given_code][1]["cached"] = "True"
return metars[given_code][1]
url = 'https://aviationweather.gov/adds/dataserver_current/httpparam?dataSource=metars&requestType=retrieve&format=xml&hoursBeforeNow=5&mostRecent=true&stationString=%s' % given_code
raw_metar_data = requests.get(url).text
#Try to get code and flight conditions
#TO--DO: potentially use an XML parser here, rather than regex to improve speed of parsing
code_data = re.findall(r"<raw_text>(.*)<\/raw_text>", raw_metar_data)
#Get Flight Condition
flight_category_data = re.findall(r"<flight_category>(.*)<\/flight_category>", raw_metar_data)
try:
code = code_data[0]
except IndexError:
#There was nothing found!
#TO--DO LOGGING: either website is down (perhaps include size of raw_metar_data to see if we even got anything!), or something is wrong
add_to_log("Critical error in get_METAR method; size of raw_metar_data - %s" % len(raw_metar_data), logging.ERROR)
return None
try:
flight_cat = flight_category_data[0]
except IndexError:
flight_cat = "UNKNOWN"
#Do the work!
try:
obs = Metar.Metar(code)
except Metar.ParserError:
add_to_log('METAR parsing error for airport %s with code "%s"' % (given_code, code), logging.ERROR)
return None
#Return dictionary, with some default values filled in
ret = {"category": flight_cat, "raw_text": code, "clouds": obs.sky_conditions(), "time": None, "wind": 0, "wind_value": 0, "wind_gust_value": 0, \
"wind_dir": 0, "visibility": 0, "visibility_value": 0, "temp": 0, "temp_value": 0, "altimeter": 0, "sealevelpressure": 0}
#Build return dictionary
if obs.station_id:
ret["stationID"] = obs.station_id
if obs.time:
ret["time"] = obs.time.ctime()
if obs.wind_speed:
ret["wind"] = obs.wind()
ret['wind_value'] = obs.wind_speed.value()
if obs.wind_gust:
ret['wind_gust_value'] = obs.wind_gust.value()
if obs.wind_dir:
ret["wind_dir"] = obs.wind_dir.value()
if obs.vis:
ret["visibility"] = obs.visibility()
ret["visibility_value"] = obs.vis.value()
if obs.temp:
ret["temp"] = obs.temp.string("C")
ret["temp_value"] = obs.temp.value()
if obs.dewpt:
ret["temp"] += " / " + obs.dewpt.string("C")
if obs.press:
ret["altimeter"] = obs.press.string("in")
if obs.press_sea_level:
ret["sealevelpressure"] = obs.press_sea_level.string("mb")
#Cache it
ret["cached"] = "False"
metars[given_code] = (time.time(), ret)
return ret
def decode_route(route, departure_airport):
''' This recieves a text route (KSFO WAYNE5 DCT BLAHH J20 BABAB STARR4 KLAX, (-123.22, 37.534)),
and decodes it into tuples of geographical locations'''
#To be returned
ret = []
if not(route):
return ret
#set previous waypoint to the departure
previous_waypoint = departure_airport
#Some pilots file their routes with periods!
route = route.replace(".", " ").upper()
#Loop through the waypoints as they are now all space-separated
for count, waypoint in enumerate(route.split(' ')):
#In case it's not upper case, and get rid of extraneous information
waypoint = waypoint.split("/")[0]
#If the waypoint is not 3 or 5, then we should move on.
if not(len(waypoint) in [3,5]):
continue
#Look for airways (e.g. A56, B38, etc) and ignore them
if len(waypoint) == 3:
if (waypoint[0].isalpha() == True and waypoint[1:].isalpha() == False) or waypoint == "DCT":
continue
#Look for waypoint in memory-stored dictionary of waypoints
#Data structure is ===>> {'Waypoint': [(x, y), (x, y), (x, y)]} #May be one or more location tuples
if waypoint in waypoints:
#Initially, closest distance is first point in the dictionary, whose index is 0
closest_dist = haversine(waypoints[waypoint][0][1], waypoints[waypoint][0][0], previous_waypoint[1], previous_waypoint[0])
closest_index = 0
#Loop through rest of list of same-named waypoints
for count, candidates in enumerate(waypoints[waypoint]):
#Compare the distance for current
candidate_dist = haversine(previous_waypoint[1], previous_waypoint[0], candidates[1], candidates[0])
#We found a candiudate that is even closer!
if candidate_dist < closest_dist:
#update the closest dist, and index to current candidate waypoint
closest_dist = candidate_dist
closest_index = count
#TO--DO: what is the best candidate waypoint??
if closest_dist < 1000000:
ret.append((waypoint, waypoints[waypoint][closest_index][0], waypoints[waypoint][closest_index][1]))
previous_waypoint = waypoints[waypoint][count]
#TO--DO LOGGING: log what the closest one was (compare to below in ELSE to see what the points being missed are too)
else:
#TO--DO LOGGING: we sholuld log the closest candidates - like if there are a bunch/even one ~5000 km away, then we should include them!
pass
else:
#This is a waypoint that wasnt found (probably log it)
#TO-DO LOGGING: log an unknown waypoint?, unless it has "/" in it
continue
return ret
#Country memebership (for detemining whether plane is in certain country)
#class countries():
# ''' This class represents a country, containing its name, polygonal coordinates, and an 'outer box' containing the
# country in its entirety. The purpose of it is to allow testing of membership of a point within a certain nation '''
#
# def __init__(self, name, data):
# #Name is country name, data is specialized data obtained from Longitude Latitude Dataset for Countries
# #https://fusiontables.google.com/DataSource?docid=1uL8KJV0bMb7A8-SkrIe0ko2DMtSypHX52DatEE4#rows:id=1
# self.name = name
# self.coords = [(float(item.split(',')[0]), float(item.split(',')[1])) for item in data if item]
# self.xmin = min(self.coords, key=lambda x: x[0])[0]
# self.xmax = max(self.coords, key=lambda x: x[0])[0]
# self.ymin = min(self.coords, key=lambda x: x[1])[1]
# self.ymax = max(self.coords, key=lambda x: x[1])[1]
#
# def test_membership(self, x, y):
# #Determines whether given geographical point x, y is in this country
#
# #First we do a rough test to see whether this point is within the outer box (this is a fast calculation)
# if (float(x) >= self.xmin and float(x) <= self.xmax and float(y) >= self.ymin and float(y) <= self.ymax) == False:
# return False
#
# #This is the RAY CASTING method, adapted from :
# #https://stackoverflow.com/questions/36399381/whats-the-fastest-way-of-checking-if-a-point-is-inside-a-polygon-in-python
#
# n = len(self.coords)
# inside = False
#
# x = float(x)
# y = float(y)
#
# p1x, p1y = self.coords[0]
# for i in range(n + 1):
# p2x, p2y = self.coords[i % n]
# if y > min(p1y, p2y):
# if y <= max(p1y, p2y):
# if x <= max(p1x, p2x):
# if p1y != p2y:
# xints = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
# if p1x == p2x or x <= xints:
# inside = not inside
# p1x, p1y = p2x, p2y
# return inside
#
#with open('countries.txt') as f:
# for line in f.readlines():
# try:
# tmp = line.strip().split('"')
# country = tmp[2].split(",")[-3]
# data = re.sub('<[/]?[A-Za-z]*>', '', tmp[1]).split(',0')
# nations.append(countries(country, data))
# except IndexError:
# continue
#
#def country_by_location(lat, lon):
# ''' This function loops through countries generated above to determine which country contains the given geographical point '''
# lat = float(lat)
# lon = float(lon)
#
# for nation in nations:
## return lat
# #return nation.test_membership()
# if nation.test_membership(lon, lat) == True:
# return nation.name
# return "International territory"
##############################################
### Flask route definitions ###
##############################################
@app.route("/")
def index():
# if not os.environ.get("API_KEY"):
# raise RuntimeError("API_KEY not set")
return render_template("index.html", key="AIzaSyBTK9GUrd7sMxjt6EUlHyN9TXPkqb6R0VA") #os.environ.get("API_KEY"))
@app.route("/update")
def update():
''' This is called by AJAX to return a full update of the plane data '''
#Open file that is continuously updated by cronupdater.py
conn = sqlite3.connect('database.db')
c = conn.cursor()
#Data structure that will be returned;
# holds [ATC, Planes, Centres, administrative data]
jsondata = [[], [], [], []]
#This keeps id mapping for airports and centres as they are parsed
tmp_airport_ids = {}
tmp_centres_ids = {}
#new pilots get assigned this ID
pilot_counter = 0
#Get results from DB; returned as tuples; orderby TYPE (type is ATC or PILOT)
result = c.execute("""SELECT * FROM "onlines" WHERE "latest"=1 ORDER BY 'type'""").fetchall()
#TO--DO LOGGING: if result set length is 0
if len(result) == 0:
pass
#Index map for results from database
atc_indices = {"callsign":2, "cid":3, "name":4, "freq":5, "latitude":6, "longitude":7, "visrange":8, "atismsg":9, "timelogon":10}
ctr_indices = {"callsign":2, "cid":3, "name":4, "freq":5, "visrange":8, "atismsg":9, "timelogon":10}
plane_indices = {"callsign": 2, "cid": 3, "real_name": 4, "latitude": 6, "longitude": 7, "timelogon": 10, "altitude": 12, "speed": 13, "heading": 24, "deptime": 20, "altairport" : 21, \
"aircraft": 14, "tascruise": 15, "depairport" :16, "arrairport": 18, "plannedaltitude": 17, "flighttype": 19, "remarks": 22, "route": 23}
#Loop through the result set
for line in result:
curr_callsign = line[atc_indices["callsign"]].upper()
row_type = line[11] #"pilot" or "atc"
#Check for ATC by Callsign having underscore. There are some logins that say ATC when when piloting...
if row_type == "ATC":
#If normal ATC (ASIA is a known ATC that does not have 'ctr' in name but is really a centre)
if "_" in curr_callsign and not("CTR" in curr_callsign or "OBS" in curr_callsign or "SUP" in curr_callsign) and not(curr_callsign in ["ASIA"]):
#Get icao callsign (SEA --> KSEA)
icao = callsign_to_icao(curr_callsign)
#TO--DO : ppotentially make curr callsign an object
#See if we have run into the ICAO before
if not(icao in tmp_airport_ids):
#New airport! Make a new ID first
new_id = len(jsondata[0])
#Returns some data about airport from database, like latitude, long, altitude, full name
tmp = callsign_to_loc(curr_callsign)
if not(tmp is None):
new_lat, new_long, new_alt, new_name = tmp
else:
#TO--DO LOGGING: We should log the Callsign, becuase these airports are very problematic!!!
new_lat = line[atc_indices["latitude"]]
new_long = line[atc_indices["longitude"]]
new_alt = 0
new_name = line[atc_indices["cid"]]
#ATC_pic is which picture to use for the marker on the front end (it's a sorted concatenation of all available ATC). -1 is simple dot no atc
new_data = {"id" : new_id, "icao": icao, "name": new_name, "longitude": new_long, "latitude": new_lat, "altitude": new_alt, "atc": [], "atc_pic" : "-1", "depplanes": [], "arrplanes": []}
jsondata[0].append(new_data)
#Add to tmp airport directory
tmp_airport_ids[icao] = new_id
else:
#Airport already exists
new_id = tmp_airport_ids[icao]
#Now, lets update the ATC dictionary of airports with current row's data
tmp_atc = {item: line[value] for item, value in atc_indices.items()}
tmp_atc["atctype"] = callsign_to_ATC(curr_callsign)
jsondata[0][new_id]["atc"].append(tmp_atc)
#5 is center which is plotted spearately
jsondata[0][new_id]["atc_pic"] = ''.join(sorted(list({str(item["atctype"]) for item in jsondata[0][new_id]["atc"] if item["atctype"] != 5})))
#ASIA is a known non underscore/'CTR' based centre callsign
elif ("_" in curr_callsign and "CTR" in curr_callsign) or (curr_callsign in ["ASIA"]):
#TO--DO LOGGING - log the newly found center
callsign_initials = curr_callsign.split("_")[0]
tmp_ctr_atc = {item: line[value] for item, value in ctr_indices.items()}
tmp_ctr_atc["atctype"] = 5
#See if centre present or not
if not(callsign_initials in tmp_centres_ids):
#New airport! Make a new ID first
new_id = len(jsondata[1])
#Use ATC-idices because we dont want lat/long in ctr_indices (becuase they are kept at a differnet level that dictionarhy comprehension line)
new_lat = line[atc_indices["latitude"]]
new_lon = line[atc_indices["longitude"]]
#ATC_pic is which picture to use for the marker on the front end (it's a sorted concatenation of all available ATC). -1 is simple dot no atc
ctr_data = {"id": new_id, "icao": callsign_initials, "marker_lat": new_lat, "marker_lon": new_lon, "atc_pic": "0", "atc": [], "polygon": []}
curr_callsign_coords = get_center_coords(callsign_initials)
if curr_callsign_coords is None:
ctr_data["coordinates"] = None
add_to_log('Center found - "%s, not present in database"' % curr_callsign, logging.INFO)
else:
ctr_data["coordinates"] = curr_callsign_coords
jsondata[1].append(ctr_data)
#Add to tmp airport directory
tmp_centres_ids[callsign_initials] = new_id
else:
#Airport already exists
new_id = tmp_centres_ids[callsign_initials]
jsondata[1][new_id]["atc"].append(tmp_ctr_atc)
elif row_type == "PILOT":
#Check for airport, create dummy if needed
#{ callsign cid real_name VATSIMlatitude VATSIMlongitude time_logon altitude groundspeed heading planned_deptime planned_altairport
#planned_aircraft planned_tascruise planned_depairport planned_altitude planned_destairport planned_flighttype planned_remarks planned_route
#Arrivial_apt_id dep_apt_id
#}
#plane_indices = {"callsign": 2, "cid": 3, "real_name": 4, "latitude": 6, "longitude": 7, "timelogon": 10, "altitude": 12, "speed": 13, "heading": 24, "deptime": 20, "altairport" : 21, \
#"aircraft": 14, "tascruise": 15, "depairport" :16, "arrairport": 18, "plannedaltitude": 17, "flighttype": 19, "remarks": 22, "route": 23}
pilot_counter += 1
departure_icao = callsign_to_icao(line[plane_indices["depairport"]])
arrival_icao = callsign_to_icao(line[plane_indices["arrairport"]])
if not(departure_icao in tmp_airport_ids):
#Create dummy airport
dep_new_id = len(jsondata[0])
#Returns some data about airport from database, like latitude, long, altitude, full name
tmp = callsign_to_loc(departure_icao)
if not(tmp is None):
new_lat, new_long, new_alt, new_name = tmp
else:
new_lat = 0
new_long = 0
new_alt = 0
new_name = departure_icao
#TO--DO: log here because airport was not found
#ATC_pic is which picture to use for the marker on the front end (it's a sorted concatenation of all available ATC). -1 is simple dot no atc
new_data = {"id" : dep_new_id, "icao": departure_icao, "name": new_name, "longitude": new_long, "latitude": new_lat, "altitude": new_alt, "atc": [], "atc_pic" : "-1", "depplanes": [], "arrplanes": []}
jsondata[0].append(new_data)
#Add to tmp airport directory
tmp_airport_ids[departure_icao] = dep_new_id
else:
#Airport already exists
dep_new_id = tmp_airport_ids[departure_icao]
#Add this plane to the airport, whether newly created or not
jsondata[0][dep_new_id]["depplanes"].append(pilot_counter)
if not(arrival_icao in tmp_airport_ids):
#Create dummy airport
arr_new_id = len(jsondata[0])
#Returns some data about airport from database, like latitude, long, altitude, full name
tmp = callsign_to_loc(arrival_icao)
if not(tmp is None):
new_lat, new_long, new_alt, new_name = tmp
else:
new_lat = 0
new_long = 0
new_alt = 0
new_name = arrival_icao
#ATC_pic is which picture to use for the marker on the front end (it's a sorted concatenation of all available ATC). -1 is simple dot no atc
new_data = {"id" : arr_new_id, "icao": arrival_icao, "name": new_name, "longitude": new_long, "latitude": new_lat, "altitude": new_alt, "atc": [], "atc_pic" : "-1", "depplanes": [], "arrplanes": []}
jsondata[0].append(new_data)
#Add to tmp airport directory
tmp_airport_ids[arrival_icao] = arr_new_id
else:
#airport already exists
arr_new_id = tmp_airport_ids[arrival_icao]
#Add this plane to the airport, whether newly created or not
jsondata[0][arr_new_id]["arrplanes"].append(pilot_counter)
#Get airline name (eg. BAW ==> British Airways)
airline_name, airline_country, airline_callsign, airline_short, flight_num = decode_airline(curr_callsign)
#add plane to plane list
tmp_pilot = {item: line[value] for item, value in plane_indices.items()}
tmp_pilot["id"] = pilot_counter #JSON id, tjat is reffrerd to by airports!
tmp_pilot["depairport_id"] = dep_new_id
tmp_pilot["arrairport_id"] = arr_new_id
tmp_pilot["airline_name"] = airline_name
tmp_pilot["airline_country"] = airline_country
tmp_pilot["airline_callsign"] = airline_callsign
tmp_pilot["airline_short"] = airline_short
tmp_pilot["airline_flightnum"] = flight_num
# tmp_pilot["current_country"] = country_by_location(tmp_pilot["latitude"], tmp_pilot["longitude"])
#Route is 23
#TO--DO: WORKING!!!
tmp_pilot["detailedroute"] = decode_route(line[23], (jsondata[0][dep_new_id]["latitude"], jsondata[0][dep_new_id]["longitude"]))
jsondata[2].append(tmp_pilot)
#Add admin stuff to final json column
jsondata[3].append({"time_updated": result[0][1], "number_of_records": len(result), "size_bytes": sys.getsizeof(jsondata)})
#sort the ATCs
#TO--DO: this needs to be done in the front end
#jsondata[0] = sorted(jsondata[0], key=lambda x: len(x["atc"]), reverse=True)
#TO--DO: ALSO RETURN A HISTORY OF THIS CALLSIGN+CID (javascript will use this to plot a path!)!!!
#TO--DO: only return releavnt part of map;
#TO--DO: only return updated data rather than everytthing
#TO--DO: ADD {PILOT DATA HERE!!!}
# memo = airports
return jsonify(jsondata)
@app.route("/history")
def history():
''' This recieves a parameter (basically a JSON of either a plane or ATC), and returns an JSON to put with airport data '''
conn = sqlite3.connect('database.db')
c = conn.cursor()
j = dict(request.args)
if j['type'][0] == "ATC":
#Arriving / departing
jsondata = [[], []]
#THis is Airport, so show how many ground, taxxing, within 15 nm
x = """SELECT * FROM 'onlines' WHERE "latest" = '1' AND type = 'PILOT'"""
result = c.execute(x).fetchall()
for row in result:
#If not arriving or departing from the given airport, then move on
if not(j['data[icao]'][0] in [callsign_to_icao(row[18]), callsign_to_icao(row[16])] ): continue
#Calculate distance, speed, altitude
dist = haversine(float(j['data[longitude]'][0]), float(j['data[latitude]'][0]), float(row[7]), float(row[6]))
speed = row[13]
airport_altitude = int(j['data[altitude]'][0])
plane_altitude = row[12]
#create pilot dictionary to be appended
tmp_pilot = {'callsign': row[2], 'cid': row[3], 'altitude': row[12], 'groundspeed': row[13], 'planned_aircraft': row[14], 'planned_tascruise': row[15], \
'planned_depairport': row[16], 'planned_altitude': row[17], 'planned_destairport': row[18], 'planned_deptime': row[20], 'heading': row[24], \
'airline_name': decode_airline(row[2])[0], 'airline_callsign': decode_airline(row[2])[2], 'airline_short': decode_airline(row[2])[3], 'airline_flightnum': decode_airline(row[2])[4], 'id':row[0]}
#Distance from airport
tmp_pilot['distance_from_airport'] = int(dist)
#close by (<10), speed 0 (menaing parked), and on the ground (altitude same)
if dist < 15 and speed == 0 and abs(airport_altitude - plane_altitude) < 50:
status = "In terminal"
#jsondata[0].append(tmp_pilot)
#moving on the ground
elif dist < 15 and speed > 0 and abs(airport_altitude - plane_altitude) < 50:
status = "Taxiing"
#distance is nearby (imminently arriving/departing)
elif dist < 55 and j['data[icao]'][0] == callsign_to_icao(row[18]):
status = "Arriving"
elif dist < 55 and j['data[icao]'][0] == callsign_to_icao(row[16]):
status = "Departing"
elif dist > 55 and speed == 0:
status = "Not yet departed"
elif dist > 55 and speed < 55:
status = "Taxiing"
else:
status = "Enroute"
tmp_pilot['status'] = status
#If arriving
if j['data[icao]'][0] == callsign_to_icao(row[18]):
jsondata[0].append(tmp_pilot)
elif j['data[icao]'][0] == callsign_to_icao(row[16]):
#departing aircraft
jsondata[1].append(tmp_pilot)
return jsonify(jsondata)
elif j['type'][0] == "PLANE":
# [distance from origin, distance to destination], [{time: altitude}], [{time: speed}]
jsondata = []
#DO SQL search - TO--DO: limit this to one day hisotry only or something like that TO--DO: prevetnt database injections!
x = "SELECT * FROM 'onlines' WHERE cid = '%s' AND type = 'PILOT' AND ABS(time_updated - %s) < 50000 ORDER BY time_updated" % (j['cid'][0], time.time())
print(x)
result = c.execute(x).fetchall()
#Do time delta for plotting
orig_time = 0
for row in result:
if orig_time == 0:
orig_time = row[1]
time_delta = abs(orig_time - row[1])
#sending back time_delta altitude speed
jsondata.append([float(time_delta), row[12], row[13]])
return jsonify(jsondata)
@app.route("/metar")
def metar():
''' This route returns JSON of requested METAR '''
#Get the METAR ID'
try:
metarID = dict(request.args)['station'][0]
ret = get_METAR(metarID)
except KeyError:
return jsonify(None)
return jsonify(ret)
@app.route("/worstweather")
def worstweather():
'''Looks through currently online airports, and returns the worst weather '''
#Parse raw airports
try:
worst_weather_airports = dict(request.args)['airports'][0].split(" ")
except KeyError:
#log this; no METAR shouldnt be requested
return jsonify(None)
ret = []
#Loop through airports and get METARs on them, calculate wind, visibility, precipitation, temperature SCORES
for airport in worst_weather_airports:
#No airport given!
if not(airport): continue
#get METAR
curr_metar = get_METAR(airport)
#Check to see if nothing was returned
if curr_metar is None:
continue
#Calculate wind score
curr_wind = (curr_metar['wind_value'] / 10)
try:
curr_wind += (curr_metar['wind_gust_value'] / 10)
except TypeError:
#No wind gust data exist (it is None)
pass
curr_wind = round(curr_wind, 1)
#Calculate visibility score
if 'meter' in curr_metar['visibility']:
#convert to feet; in metric units
#If it's 10000, then it's really like 10 miles, so assume it's 10 miles (formula below) is normailzed for 10 miles
#that is: "10 miles" and "10,000 meters" are functionally equivalent
if curr_metar['visibility_value'] == 10000:
visibility_feet = 5280 * 10
else:
visibility_feet = curr_metar['visibility_value'] * 3.28
else:
#in miles!
visibility_feet = curr_metar['visibility_value'] * 5280
#Score
curr_visi = round((visibility_feet * (-5 / 53000)) + 5, 1)
#Calculate Temperature Score
if 0 <= curr_metar['temp_value'] <= 25:
#Mild weather, score is 0
curr_temp = 0
elif curr_metar['temp_value'] < -20 or curr_metar['temp_value'] > 40:
#Extreme weather, score is 5
curr_temp = 5
elif -20 <= curr_metar['temp_value'] <= 0:
curr_temp = round((curr_metar['temp_value'] * (-5 / 20)) + 5, 1)
elif 25 < curr_metar['temp_value'] <= 40:
curr_temp = round((curr_metar['temp_value'] * (5 / 15)) - 8.33333, 1)
#Calculate precipitation score
curr_weather = 0
curr_weather_remark = ""
precip_dict = {'DZ' : ('Drizzle', 1), 'RA' : ('Rain', 3), 'SN' : ('Snow', 5), 'SG' : ('Snow Grains', 3), 'IC' : ('Ice', 3.5), \
'PL' : ('Ice Pellets', 3), 'GR' : ('Hail', 4.5), 'GS' : ('Small Hail', 3.5)}
for item, value in precip_dict.items():
if item in curr_metar['raw_text']:
curr_weather += value[1]
curr_weather_remark += " " + value[0]
if curr_weather_remark == "": curr_weather_remark = "None"
#Build return
ret.append({'airport': airport, 'wind_score': curr_wind, 'wind': curr_metar['wind'], 'visibility_score': curr_visi, 'visibility': curr_metar['visibility'], \
'precipitation_score': curr_weather, 'precipitation': curr_weather_remark, 'temperature_score': curr_temp, 'temperature': curr_metar['temp'], \
'total_score': round(curr_wind + curr_visi + curr_weather + curr_temp, 1)})
return jsonify(ret)
| {
"repo_name": "sabidhasan/VATSee",
"path": "application.py",
"copies": "1",
"size": "38439",
"license": "mit",
"hash": 4196643161280457000,
"line_mean": 43.4895833333,
"line_max": 216,
"alpha_frac": 0.5856031635,
"autogenerated": false,
"ratio": 3.5113729788983283,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9559170070777934,
"avg_score": 0.007561214324078784,
"num_lines": 864
} |
api_key = ""
headers = {}
prev_response = {}
_base = 'http://api.football-data.org'
endpoints = {
'fixture': _base + '/v1/fixtures/{}',
'all_fixtures': _base + '/v1/fixtures/',
'competition': _base + '/v1/competitions/{}',
'all_competitions': _base + '/v1/competitions/',
'comp_teams': _base + '/v1/competitions/{}/teams',
'comp_fixtures': _base + '/v1/competitions/{}/fixtures',
'team': _base + '/v1/teams/{}',
'team_players': _base + '/v1/teams/{}/players',
'team_fixtures': _base + '/v1/teams/{}/fixtures/',
'league_table': _base + '/v1/competitions/{}/leagueTable'
}
def update_prev_response(r, endpoint):
""" Sets the prev_response attribute to contain a dict that includes
the response status code and headers of the most recent HTTP
request.
Arguments:
r -- The response object (of the latest HTTP request).
endpoint -- The endpoint used (in the latest HTTP request).
"""
global prev_response
prev_response = r.headers
prev_response['Status-Code'] = r.status_code
prev_response['Endpoint'] = endpoint
| {
"repo_name": "xozzo/pyfootball",
"path": "pyfootball/globals.py",
"copies": "1",
"size": "1116",
"license": "mit",
"hash": 8902392602932061000,
"line_mean": 33.875,
"line_max": 72,
"alpha_frac": 0.6155913978,
"autogenerated": false,
"ratio": 3.455108359133127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45706997569331265,
"avg_score": null,
"num_lines": null
} |
APIKEY = '' # Set Google Vision API KEY
OUTPUT_VIDEO = '.avi' # Output as AVI file
INPUT_VIDEO = '' # Input video (mov, avi, mp4, etc)
##
# Process video
##
import cv2
import io
import os
import requests
import base64
import json
import sys
# Function to make output text more readable
def makeLikelyText(i):
if i == "VERY_LIKELY":
return "absolutly"
elif i == "LIKELY":
return "probably"
elif i == "POSSIBLE":
return "maybe"
elif i == "UNLIKELY":
return "probably not"
else:
return "not"
# Load the input video
vidcap = cv2.VideoCapture(INPUT_VIDEO)
# Set output codec
fourcc = cv2.cv.CV_FOURCC('M', 'J', 'P', 'G')
# Create the output video
outputvid = cv2.VideoWriter(OUTPUT_VIDEO, fourcc, int(vidcap.get(cv2.cv.CV_CAP_PROP_FPS)), (int(vidcap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)), int(vidcap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))), True)
# Count the number of frames
count = 0
# Load succes?
success = True
# Run through all frames
while success:
success, image = vidcap.read()
# Print current status
print('Process frame: ', count, ' of ', int(vidcap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)))
# Save image to temp jpg file
cv2.imwrite("__tmp.jpg", image)
# Open the jpg file for base64 encoding
with open("__tmp.jpg", "rb") as temp_image_file:
encoded_image = base64.b64encode(temp_image_file.read())
# Remove the file, it is not needed anymore
os.remove("__tmp.jpg")
# Do the API callable for faces when even
if (count % 2 == 0):
RESTdata = '{ "requests": [{"image":{"content": "' + encoded_image + '" },"features": [{"type":"FACE_DETECTION"}]}] }'
RESTresponse = requests.post("https://vision.googleapis.com/v1/images:annotate?key=" + APIKEY, data=RESTdata)
RESTresultFaces = RESTresponse.json()
# Do the API callable for labels
if (count % 2 == 0):
RESTdata = '{ "requests": [{"image":{"content": "' + encoded_image + '" },"features": [{"type":"LABEL_DETECTION"}]}] }'
RESTresponse = requests.post("https://vision.googleapis.com/v1/images:annotate?key=" + APIKEY, data=RESTdata)
RESTresultLabels = RESTresponse.json()
# Do the API callable for text
if (count % 2 == 0):
RESTdata = '{ "requests": [{"image":{"content": "' + encoded_image + '" },"features": [{"type":"TEXT_DETECTION"}]}] }'
RESTresponse = requests.post("https://vision.googleapis.com/v1/images:annotate?key=" + APIKEY, data=RESTdata)
RESTresultText = RESTresponse.json()
# process the text
try:
if 'textAnnotations' in RESTresultText['responses'][0]:
for text in RESTresultText['responses'][0]['textAnnotations']:
# Draw text outline - line 1
cv2.line(image, ( text['boundingPoly']['vertices'][0]['x'],
text['boundingPoly']['vertices'][0]['y']),
( text['boundingPoly']['vertices'][1]['x'],
text['boundingPoly']['vertices'][1]['y']),
(255, 0, 255))
# Draw text outline - line 2
cv2.line(image, ( text['boundingPoly']['vertices'][1]['x'],
text['boundingPoly']['vertices'][1]['y']),
( text['boundingPoly']['vertices'][2]['x'],
text['boundingPoly']['vertices'][2]['y']),
(255, 0, 255))
# Draw text outline - line 3
cv2.line(image, ( text['boundingPoly']['vertices'][2]['x'],
text['boundingPoly']['vertices'][2]['y']),
( text['boundingPoly']['vertices'][3]['x'],
text['boundingPoly']['vertices'][3]['y']),
(255, 0, 255))
# Draw text outline - line 4
cv2.line(image, ( text['boundingPoly']['vertices'][3]['x'],
text['boundingPoly']['vertices'][3]['y']),
( text['boundingPoly']['vertices'][0]['x'],
text['boundingPoly']['vertices'][0]['y']),
(255, 0, 255))
# Add desc text
cv2.putText(image, text['description'], (text['boundingPoly']['vertices'][0]['x'], text['boundingPoly']['vertices'][0]['y']), cv2.FONT_HERSHEY_PLAIN, 1.5, (255,0,255), 2)
except:
pass # pass if something misses in this frame
# Process the labels
try:
if 'labelAnnotations' in RESTresultLabels['responses'][0]:
labels = list()
for label in RESTresultLabels['responses'][0]['labelAnnotations']:
label_final = label['description'] + ' (' + str(int((label['score']*100))) + '%) - '
labels.append(label_final[:-2])
# Sort
labels.sort()
cv2.putText(image, ' '.join(labels), (4, 42), cv2.FONT_HERSHEY_PLAIN, 2, (0,0,255), 2)
except:
pass # pass if something misses in this frame
# Loop through all the found faces
try:
if 'faceAnnotations' in RESTresultFaces['responses'][0]:
for singleFace in RESTresultFaces['responses'][0]['faceAnnotations']:
# Draw face outline - line 1
cv2.line(image, ( singleFace['boundingPoly']['vertices'][0]['x'],
singleFace['boundingPoly']['vertices'][0]['y']),
( singleFace['boundingPoly']['vertices'][1]['x'],
singleFace['boundingPoly']['vertices'][1]['y']),
(0, 255, 0))
# Draw face outline - line 2
cv2.line(image, ( singleFace['boundingPoly']['vertices'][1]['x'],
singleFace['boundingPoly']['vertices'][1]['y']),
( singleFace['boundingPoly']['vertices'][2]['x'],
singleFace['boundingPoly']['vertices'][2]['y']),
(0, 255, 0))
# Draw face outline - line 3
cv2.line(image, ( singleFace['boundingPoly']['vertices'][2]['x'],
singleFace['boundingPoly']['vertices'][2]['y']),
( singleFace['boundingPoly']['vertices'][3]['x'],
singleFace['boundingPoly']['vertices'][3]['y']),
(0, 255, 0))
# Draw face outline - line 4
cv2.line(image, ( singleFace['boundingPoly']['vertices'][3]['x'],
singleFace['boundingPoly']['vertices'][3]['y']),
( singleFace['boundingPoly']['vertices'][0]['x'],
singleFace['boundingPoly']['vertices'][0]['y']),
(0, 255, 0))
# Add head text
cv2.putText(image, makeLikelyText(singleFace['joyLikelihood']) + " joyful", (singleFace['boundingPoly']['vertices'][0]['x'], singleFace['boundingPoly']['vertices'][0]['y']), cv2.FONT_HERSHEY_PLAIN, 1.5, (0,255,0), 2)
cv2.putText(image, makeLikelyText(singleFace['angerLikelihood']) + " angry", (singleFace['boundingPoly']['vertices'][0]['x'], singleFace['boundingPoly']['vertices'][0]['y']-25), cv2.FONT_HERSHEY_PLAIN, 1.5, (0,255,0), 2)
cv2.putText(image, makeLikelyText(singleFace['surpriseLikelihood']) + " surprised", (singleFace['boundingPoly']['vertices'][0]['x'], singleFace['boundingPoly']['vertices'][0]['y']-50), cv2.FONT_HERSHEY_PLAIN, 1.5, (0,255,0), 2)
cv2.putText(image, makeLikelyText(singleFace['sorrowLikelihood']) + " sorrow", (singleFace['boundingPoly']['vertices'][0]['x'], singleFace['boundingPoly']['vertices'][0]['y']-75), cv2.FONT_HERSHEY_PLAIN, 1.5, (0,255,0), 2)
cv2.putText(image, makeLikelyText(singleFace['headwearLikelihood']) + " wearing a hat", (singleFace['boundingPoly']['vertices'][0]['x'], singleFace['boundingPoly']['vertices'][0]['y']-100), cv2.FONT_HERSHEY_PLAIN, 1.5, (0,255,0), 2)
# Add landmarks (like; nose, eyes, etc)
for landmark in singleFace['landmarks']:
if landmark['type'] in ['MOUTH_CENTER', 'NOSE_TIP', 'LEFT_EYE_PUPIL', 'RIGTH_EYE_PUPIL', 'FOREHEAD_GLABELLA']:
cv2.putText(image, landmark['type'], (int(landmark['position']['x']), int(landmark['position']['y'])), cv2.FONT_HERSHEY_PLAIN, 1, (255,0,0), 1)
except:
pass # pass if something misses in this frame
# Write all images to the output video
outputvid.write(image)
# Counter for frames in movie +1
count += 1
# flush outputvid
sys.stdout.flush()
# Set this if you want the video to render prematurely
if count == 3341:
break
# Wrap everything up
cv2.destroyAllWindows()
outputvid.release()
| {
"repo_name": "tzmartin/google-vision",
"path": "sentiment.py",
"copies": "1",
"size": "9307",
"license": "mit",
"hash": -2190967862008114700,
"line_mean": 47.2227979275,
"line_max": 248,
"alpha_frac": 0.5193940045,
"autogenerated": false,
"ratio": 3.941973739940703,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4961367744440703,
"avg_score": null,
"num_lines": null
} |
#--- API LICENSE CONFIGURATION -----------------------------------------------------------------------
# Default license keys used by pattern.web.SearchEngine to contact different API's.
# Google and Yahoo are paid services for which you need a personal license + payment method.
# The default Google license is for testing purposes (100 daily queries).
# Twitter, Wikipedia and Facebook are free.
# Bing, Flickr and ProductsWiki use licenses shared among all Pattern users.
license = {}
license["Google"] = "AIzaSyBxe9jC4WLr-Rry_5OUMOZ7PCsEyWpiU48"
license["Bing"] = "VnJEK4HTlntE3SyF58QLkUCLp/78tkYjV1Fl3J7lHa0="
license["Yahoo"] = ("", "") # OAuth consumer key + consumer secret.
license["Twitter"] = ("p7HUdPLlkKaqlPn6TzKkA", # OAuth (key, secret, token)
"R7I1LRuLY27EKjzulutov74lKB0FjqcI2DYRUmsu7DQ", (
"14898655-TE9dXQLrzrNd0Zwf4zhK7koR5Ahqt40Ftt35Y2qY",
"q1lSRDOguxQrfgeWWSJgnMHsO67bqTd5dTElBsyTM"))
license["Facebook"] = "332061826907464|jdHvL3lslFvN-s_sphK1ypCwNaY"
license["Wikipedia"] = None
license["Flickr"] = "787081027f43b0412ba41142d4540480"
license["Products"] = "64819965ec784395a494a0d7ed0def32"
| {
"repo_name": "decebel/dataAtom_alpha",
"path": "bin/plug/py/external/pattern/web/api.py",
"copies": "1",
"size": "1207",
"license": "apache-2.0",
"hash": -291462509342432830,
"line_mean": 62.5263157895,
"line_max": 102,
"alpha_frac": 0.6818558409,
"autogenerated": false,
"ratio": 2.80046403712297,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8929316641599101,
"avg_score": 0.010600647284773616,
"num_lines": 19
} |
#--- API LICENSE CONFIGURATION -------------------------------------------
# Default license keys used by pattern.web.SearchEngine to contact different API's.
# Google and Yahoo are paid services for which you need a personal license + payment method.
# The default Google license is for testing purposes (= 100 daily queries).
# Wikipedia, Twitter and Facebook are free.
# Bing, Flickr and ProductsWiki use licenses shared among all Pattern users.
license = {}
license["Google"] = \
"AIzaSyBxe9jC4WLr-Rry_5OUMOZ7PCsEyWpiU48"
license["Bing"] = \
"VnJEK4HTlntE3SyF58QLkUCLp/78tkYjV1Fl3J7lHa0="
license["Yahoo"] = \
("", "") # OAuth (key, secret)
license["DuckDuckGo"] = \
None
license["Wikipedia"] = \
None
license["Twitter"] = (
"p7HUdPLlkKaqlPn6TzKkA", # OAuth (key, secret, token)
"R7I1LRuLY27EKjzulutov74lKB0FjqcI2DYRUmsu7DQ", (
"14898655-TE9dXQLrzrNd0Zwf4zhK7koR5Ahqt40Ftt35Y2qY",
"q1lSRDOguxQrfgeWWSJgnMHsO67bqTd5dTElBsyTM"))
license["Facebook"] = \
"332061826907464|jdHvL3lslFvN-s_sphK1ypCwNaY"
license["Flickr"] = \
"787081027f43b0412ba41142d4540480"
license["ProductWiki"] = \
"64819965ec784395a494a0d7ed0def32"
| {
"repo_name": "hayd/pattern",
"path": "pattern/web/api.py",
"copies": "3",
"size": "1187",
"license": "bsd-3-clause",
"hash": 1263896320851274500,
"line_mean": 31.0810810811,
"line_max": 92,
"alpha_frac": 0.700084246,
"autogenerated": false,
"ratio": 2.5637149028077753,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9758935529428028,
"avg_score": 0.0009727238759496826,
"num_lines": 37
} |
# API-like access to St Andrews MMS.
# Useful for things like grade scrapers and so on.
# I realised my old code was dated, unpythonic, and according to Lenary "horrible",
# so it was probably best to start again.
# Also, interesting to get my head out of functional-land every once in a while :)
from bs4 import BeautifulSoup
import htmllib
import re
import requests
import time
import json
import sys
import urllib
# Exceptions:
# ImproperUseError is thrown when the library isn't used properly.
class ImproperUseError(Exception):
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
def __repr__(self):
return self.msg
# AuthenticationError is thrown when there is a problem with the given
# credentials.
class AuthenticationError(Exception):
def __init__(self):
Exception.__init__(self)
def __repr__(self):
return "The given username or password was incorrect."
class ToolNotAvailableError(Exception):
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
def __repr__(self):
return self.msg
class CourseworkNotAvailableError(Exception):
def __init__(self):
Exception.__init__(self)
def __repr__(self):
return "No coursework to download!"
# MMS Tools (at least the student ones. I'm not staff. If you're reading this,
# Tristan, patches welcome ;))
# TODO: Python 2 doesn't have enums... There must be a nicer way of doing this
class MMSToolType(object):
Attendance, Content, Coursework, Enrollment, Moodle, \
Signup, URL, Invalid = range(8)
@staticmethod
def from_string(tool_str):
if tool_str == "coursework":
return MMSToolType.Coursework
elif tool_str == "tas":
return MMSToolType.Attendance
elif tool_str == "Enrollment":
return MMSToolType.Enrollment
elif tool_str == "URL":
return MMSToolType.URL
elif tool_str == "content":
return MMSToolType.Content
elif tool_str == "signup":
return MMSToolType.Signup
elif tool_str == "moodlelink":
return MMSToolType.Moodle
else:
return MMSToolType.Invalid
@staticmethod
def show_string(tool_type):
if tool_type == MMSToolType.Attendance:
return "Attendance"
elif tool_type == MMSToolType.Content:
return "Content"
elif tool_type == MMSToolType.Coursework:
return "Coursework"
elif tool_type == MMSToolType.Enrollment:
return "Enrollment"
elif tool_type == MMSToolType.Moodle:
return "Moodle Link"
elif tool_type == MMSToolType.Signup:
return "Signup"
elif tool_type == MMSToolType.URL:
return "URL"
else:
return "Invalid Tool"
class MMSTool(object):
def __init__(self, name, tool_type, url, lib):
self.name = name
self.tool_type = tool_type
self.url = url
self.lib = lib
class MMSCourseworkTool(MMSTool):
def __init__(self, name, url, lib):
MMSTool.__init__(self, name, MMSToolType.Coursework, url, lib)
def get_assignments(self):
cwk_page = self.lib._mms_get(self.url)
assignments = _parse_cwk(cwk_page, self.url, self.lib)
return assignments
# Representation of an MMS Module
class MMSModule(object):
def __init__(self, module_code, module_name, semester, tools):
self.module_code = module_code
self.module_name = module_name
self.semester = semester
self.tools = tools
def get_tools(self, tool_ty=None):
if tool_ty == None:
return self.tools
return filter(lambda tool: tool.tool_type == tool_ty, self.tools)
class MMSFeedback(object):
def __init__(self, name, date, content, file_url):
self.name = name
self.date = date
self.content = content
self.file_url = file_url
def __repr__(self):
str_date = time.strftime("%d %b %y, %H:%M", self.date)
ret = "Feedback from " + self.name + " on " + str_date + ": \n"
if self.file_url != None:
ret = ret + "Feedback file URL: " + self.file_url
if self.content != None:
ret = ret + self.content
return ret
def __str__(self):
return self.__repr__().encode("utf-8", "ignore")
class MMSAssignment(object):
def __init__(self, id, name, due_date, feedback_date, submitted_date,
submission_url, feedback_urls, grade, weighting, chart_link, lib):
self.id = id
self.name = name
self.due_date = due_date
self.feedback_date = feedback_date
self.submitted_date = submitted_date
self.submission_url = submission_url
self._feedback_urls = feedback_urls
self.grade = grade
self.weighting = weighting
self._lib = lib
def __repr__(self):
ret = ["------ Assignment %s -------" % self.name,
"ID: %s" % self.id,
"Due date: %s" % time.strftime("%d %b %y, %H:%M", self.due_date),
"Feedback date: %s" % time.strftime("%d %b %y", self.feedback_date)]
if self.submitted_date != None:
ret.append( "Submitted date: %s" % \
time.strftime("%d %b %y, %H:%M", self.submitted_date))
ret.append("Uploaded file URL: %s" % self.submission_url)
else:
ret.append("Not submitted")
# ret.append("Comments: ")
# for comment in self.comments:
# ret.append(" %s" % comment)
if self.grade != None:
ret.append("Grade: %f" % self.grade)
else:
ret.append("No grade recorded")
if self.weighting != None:
ret.append("Weighting: %f" % self.weighting)
else:
ret.append("Not weighted")
return "\n".join(ret)
def __str__(self):
return self.__repr__().encode("utf-8", "ignore")
def get_feedback(self):
return map(lambda x: _fetch_feedback(x, self._lib), self._feedback_urls)
def download_submission(self):
if self.submission_url == None:
raise CourseworkNotAvailableError
return self._lib._mms_download(self.submission_url)
# Accesses are stateful, so we need a class to encapsulate this
class MMSLib(object):
# All URLs in MMS are relative, which isn't much use to us!
BASE_URL = "https://mms.st-andrews.ac.uk"
LOGIN_URL = "https://login.st-andrews.ac.uk"
INCORRECT_TEXT = "cannot be determined to be authentic"
NOT_LOGGED_IN_TEXT = "Log in here with your"
def __init__(self, user, passwd):
# When creating object, try to login, and populate
# cookies.
self.user = user
self.passwd = passwd
self.sess = requests.Session()
user_home = MMSLib.BASE_URL + "/mms/user/me/Modules"
self._mms_get(user_home)
# Attempts to log in. Throws an AuthenticationError if incorrect,
# otherwise returns page shown upon successful login
def _login(self, login_page):
# print "logging in"
# Get the required hidden metadata for the SSO system
parsed_login = _parse_login(login_page)
args = { "username" : self.user, "password": self.passwd,
"lt" : parsed_login["lt"], "_eventId" : parsed_login["eventid"] }
# Make the login request
req_url = MMSLib.LOGIN_URL + "/" + parsed_login["dest"]
resp = self.sess.post(req_url, data=args)
# If login failure, then throw an error
if MMSLib.INCORRECT_TEXT in resp.text:
raise AuthenticationError()
return resp.text
# Stateful get access, handles login if necessary
def _mms_get(self, req_url):
resp = self.sess.get(req_url)
if MMSLib.NOT_LOGGED_IN_TEXT in resp.text:
return self._login(resp.text)
## RAWR!!!! >=[
# Unicode is being incredibly annoying, and I can't fix it. So ASCII
# for now. Sorry, languages students.
html = resp.text.encode("ascii", "ignore")
html = html.replace(" ", "")
# html = unescape(html)
return html
# Stolen from...
# http://stackoverflow.com/questions/16694907/
# how-to-download-large-file-in-python-with-requests-py
def _mms_download(self, url):
# Default to the filename...
local_filename = url.split('/')[-1]
r = self.sess.get(url, stream=True)
# But if we can, get the nice name instead :)
#print r.headers
if "content-disposition" in r.headers:
content_disp = r.headers.get("content-disposition")
local_filename = content_disp.split("filename=")[1].replace("\"","")
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
# Gets a list of MMSModules.
# If academic_year is None, the current year is fetched.
def get_modules(self, academic_year=None):
# https://mms.st-andrews.ac.uk/mms/user/me/Modules?academic_year=2011%2F2
req_url = MMSLib.BASE_URL + "/mms/user/me/Modules?academic_year="
if academic_year != None:
academic_year = academic_year.replace("_", "%2F")
req_url = req_url + academic_year
req_url = req_url + "&unit=&command=Get+My+Modules"
res = self._mms_get(req_url)
modules = _parse_modules_list(res, self)
return modules
def get_module(self, academic_year, module_code):
# TODO. Will require going to module page, and translating
# from textual reps of tools to actual tools, parsing a different table
pass
def _parse_modules_list(html, lib):
""" Given a module overview page, parses the page into a list of MMSModules """
ret = []
parser = BeautifulSoup(html)
modules_entries = parser.findAll("h3", { "class" : "module_heading" })
for entry in modules_entries: # enumerates all modules
# Get link, which gives us easy access to reasonably juicy info
link = entry.a["href"]
# /mms/module/2013_4/Y1/CS4099/
regex = "/mms/module/(.+)/(.+)/(.+)/"
match = re.search(regex, link)
if match:
# If we've got a module match, process the subgroups to get metadata
academic_year = match.group(1)
semester = match.group(2)
code = match.group(3)
name = entry.a.contents[0]
# Then parse the tools
tools = _parse_module_tools(entry, lib)
mms_module = MMSModule(code, name, semester, tools)
ret.append(mms_module)
return ret
def _parse_module_tools(dom_entry, lib):
tools = []
section = dom_entry.next_sibling.next_sibling # 2x nextSibling. why? beats me.
tool_section = section.find("ul", { "class" : "module_resources"})
if tool_section:
tool_links = tool_section.find_all("a")
for tool_link in tool_links:
#print "tl", tool_link
tool_class = tool_link["class"][1]
# Get the data we need, create an MMSTool instance
tool_type = MMSToolType.from_string(tool_class)
link = MMSLib.BASE_URL + tool_link["href"]
tool_name = tool_link.contents[0]
# TODO: Once we add support for more tools, it's likely that it'd be
# best to have subclasses for each tool, like this. For now, this will do...
if tool_type == MMSToolType.Coursework:
tool = MMSCourseworkTool(tool_name, link, lib)
else:
tool = MMSTool(tool_name, tool_type, link, lib)
tools.append(tool)
return tools
def _parse_login(html):
"""Parses the login page. Returns a dictionary of the form { id : form id,
dest : destination url, lt : lt hidden value, eventid : eventId hidden value}."""
parser = BeautifulSoup(html)
# Extracts required information from the page
form = parser.find("form")
id = form["id"]
action_url = form["action"]
lt_hidden = form.find("input", { "type" : "hidden", "name" : "lt" })["value"]
eid_hidden = \
form.find("input", { "type" : "hidden", "name" : "_eventId" })["value"]
return { "id" : id, "dest" : action_url, "lt" : lt_hidden, \
"eventid" : eid_hidden }
def is_float(test_str):
try:
float(test_str)
return True
except ValueError:
return False
def _parse_cwk(html, url, lib):
ret = []
parser = BeautifulSoup(html)
table = parser.find("tbody")
entries = table.findAll("tr") # finds a list of all coursework elements
for entry in entries:
children = entry.findAll("td") # enumerates all attributes
name = children[0].contents[0]
# 30 Sep 10, 23:59
due_date_str = children[1].contents[0]
due_date = time.strptime(due_date_str, "%d %b %y, %H:%M")
# 07 Oct 10
# Parse feedback date
feedback_date_str = children[2].contents[0]
feedback_date = time.strptime(feedback_date_str, "%d %b %y")
file_url_field = children[3]
file_url = None
if file_url_field.a != None:
file_url = url + file_url_field.a["href"]
# 30 Sep 10, 23:59
# Parse submission date. Not always present...
if len(children[4].contents) > 0:
submitted_date_str = children[4].contents[0]
try:
submitted_date = time.strptime(submitted_date_str, "%d %b %y, %H:%M")
except ValueError: # Generally happens if not submitted
submitted_date = None
else:
submitted_date = None
feedback = _parse_cwk_feedback_field(children[5], url)
# Parse grade
grade = None
if len(children[6].contents) > 0:
grade_str = children[6].contents[0]
if is_float(grade_str):
grade = float(grade_str)
# Parse weighting
weighting_str = children[7].contents[0]
weighting = None
try:
weighting_regex = "(\d*) %"
match = re.search(weighting_regex, weighting_str)
if match:
weighting = float(match.group(1))
except ValueError:
weighting = None
# Parse chart link. Some modules might not have charts enabled...
chart_link = None
if children[8].a != None:
chart_link = str(children[8].a["href"])
id = int(children[9].input["value"])
assignment = MMSAssignment(id, str(name), due_date, feedback_date, \
submitted_date, file_url, feedback, grade, \
weighting, chart_link, lib)
#print assignment
ret.append(assignment)
return ret
# The feedback field gives us a URL to the feedback. Adding the parameter
# template_format=application/json gives us the data in a nice JSON format to use.
# Best way to do it, IMO, is to store a list of URLs and retrieve the feedback
# when required, instead of doing all the requests (keep in mind this might be ~10
# for modules such as RPIC / CS1002)
def _parse_cwk_feedback_field(dom_element, url):
feedback_entries = []
ul_element = dom_element.find("ul", {"class" : "horizontal"})
for feedback_element in ul_element.find_all("li"):
if feedback_element.a != None and \
feedback_element.a.contents[0] != "[Add Comment]":
feedback_entries.append(url + feedback_element.a["href"] + \
"&template_format=application/json")
return feedback_entries
# Woo, if only all of MMS had a JSON API! Would make my life easier :)
def _fetch_feedback(feedback_url, lib):
json_data = lib._mms_get(feedback_url)
# The JSON passed back from MMS isn't valid JSON. Boo.
# Some characters are escaped with \ when they shouldn't be. When things
# *are* escaped, they're only escaped with one backslash.
# Nightmare. This is a hack, because I hate dealing with this mess and my
# original regex didn't work. Even though it was correct. Gah.
formatted_json = json_data.replace("\\\"", "<<quote>>")
formatted_json = formatted_json.replace("\\", "\\\\")
formatted_json = formatted_json.replace("<<quote>>", "\\\"")
#print formatted_json
feedback_data = json.loads(formatted_json)
#feedback_data = json.loads(json_data)
date = time.strptime(feedback_data["feedback_date"], "%d/%m/%Y %H:%M")
comment = None
file_url = None
if "feedbackFileURL" in feedback_data:
file_url = feedback_data["feedbackFileURL"]
if "comment" in feedback_data:
comment = feedback_data["comment"]
return MMSFeedback(feedback_data["sender_name"], date, \
comment, file_url)
def unescape(s):
p = htmllib.HTMLParser(None)
p.save_bgn()
p.feed(s)
return p.save_end()
| {
"repo_name": "SimonJF/mmslib",
"path": "src/mmslib.py",
"copies": "1",
"size": "17093",
"license": "bsd-2-clause",
"hash": -5808651350153146000,
"line_mean": 36.3209606987,
"line_max": 88,
"alpha_frac": 0.5962674779,
"autogenerated": false,
"ratio": 3.6539119281744337,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9712537670863862,
"avg_score": 0.007528347042114269,
"num_lines": 458
} |
# API location info
API_SERVER = 'http://services.phila.gov/'
API_BASE = 'PhillyApi/Data/v0.7/Service.svc/'
# Default parameters applied to queries
# The API wants the parameters to start with
# a '$'. This library accepts them without the
# '$' (to make things simpler), then adds the
# '$' when creating the query string.
DEFAULT_PARAMS = {
'format': 'json' # Changing this to XML will break things
}
# Supported URL parameters that need to have a '$' prepended to them
QUERY_PARAMS = [
'expand', # Setting expand to anything but locations breaks the API
'filter',
'format',
'inlinecount', # Can also use count=True, which is a little more intuitive
'orderby',
#'select', # Disabled for now, unclear if it works
'skip',
'top',
]
# Document types supported by the API
DOC_TYPES = [
'appealhearings',
'buildingboardappeals',
'cases',
'hearingdates',
'licenses',
'licensedcontractors',
'lireviewboardappeals',
'locations',
'permits',
'violationdetails',
'zoningboardappeals',
]
# Document types that don't want a singled-quoted doc_id
NUMBER_DOC_TYPE = [
'locations',
'buildingboardappeals',
'appealhearings',
'lireviewboardappeals',
'violationdetails',
'zoningboardappeals'
]
| {
"repo_name": "AxisPhilly/py-li",
"path": "li/settings.py",
"copies": "1",
"size": "1288",
"license": "mit",
"hash": 8608375845709719000,
"line_mean": 25.2857142857,
"line_max": 79,
"alpha_frac": 0.6708074534,
"autogenerated": false,
"ratio": 3.5095367847411443,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4680344238141144,
"avg_score": null,
"num_lines": null
} |
"""API management class and base class for the different end points."""
import logging
from pprint import pformat
from typing import (
Any,
Callable,
ItemsView,
Iterator,
KeysView,
List,
Optional,
ValuesView,
)
import attr
LOGGER = logging.getLogger(__name__)
CONTEXT = "Axis library"
@attr.s
class Body:
"""Create API request body."""
method: str = attr.ib()
apiVersion: str = attr.ib()
context: str = attr.ib(default=CONTEXT)
params: Any = attr.ib(factory=dict)
class APIItem:
"""Base class for all end points using APIItems class."""
def __init__(self, id: str, raw: dict, request: Callable) -> None:
"""Initialize API item."""
self._id = id
self._raw = raw
self._request = request
self.observers: List[Callable] = []
@property
def id(self) -> str:
"""Read only ID."""
return self._id
@property
def raw(self) -> dict:
"""Read only raw data."""
return self._raw
def update(self, raw: dict) -> None:
"""Update raw data and signal new data is available."""
self._raw = raw
for observer in self.observers:
observer()
def register_callback(self, callback: Callable) -> None:
"""Register callback for state updates."""
self.observers.append(callback)
def remove_callback(self, observer: Callable) -> None:
"""Remove observer."""
if observer in self.observers:
self.observers.remove(observer)
class APIItems:
"""Base class for a map of API Items."""
def __init__(self, raw, request, path, item_cls) -> None:
"""Initialize API items."""
self._request = request
self._path = path
self._item_cls = item_cls
self._items: dict = {}
self.process_raw(raw)
LOGGER.debug(pformat(raw))
async def update(self) -> None:
"""Refresh data."""
raw = await self._request("get", self._path)
self.process_raw(raw)
@staticmethod
def pre_process_raw(raw: dict) -> dict:
"""Allow childs to pre-process raw data."""
return raw
def process_raw(self, raw: Any) -> set:
"""Process raw and return a set of new IDs."""
new_items = set()
for id, raw_item in self.pre_process_raw(raw).items():
obj = self._items.get(id)
if obj is not None:
obj.update(raw_item)
else:
self._items[id] = self._item_cls(id, raw_item, self._request)
new_items.add(id)
return new_items
def items(self) -> ItemsView[str, APIItem]:
"""Return items."""
return self._items.items()
def keys(self) -> KeysView[str]:
"""Return item keys."""
return self._items.keys()
def values(self) -> ValuesView[APIItem]:
"""Return item values."""
return self._items.values()
def get(self, obj_id: str, default: Optional[Any] = None):
"""Get item value based on key, return default if no match."""
if obj_id in self:
return self[obj_id]
return default
def __getitem__(self, obj_id: str) -> APIItem:
"""Get item value based on key."""
return self._items[obj_id]
def __iter__(self) -> Iterator[str]:
"""Allow iterate over items."""
return iter(self._items)
def __contains__(self, obj_id: str) -> bool:
"""Validate membership of item ID."""
return obj_id in self._items
def __len__(self) -> int:
"""Return number of items in class."""
return len(self._items)
def __bool__(self) -> bool:
"""Return True.
Needs to define this because __len__ asserts false on length 0.
"""
return True
| {
"repo_name": "Kane610/axis",
"path": "axis/api.py",
"copies": "1",
"size": "3831",
"license": "mit",
"hash": 319395384258110800,
"line_mean": 25.0612244898,
"line_max": 77,
"alpha_frac": 0.5632993996,
"autogenerated": false,
"ratio": 3.994786235662148,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 147
} |
""" API manager implementation.
Responsible for routing and resource registration.
.. code-block:: python
# resources.py
from jsonapi.api import API
from jsonapi.resource import Resource
api = API()
@api.register
class AuthorResource(Resource):
class Meta:
model = 'testapp.author'
# urls.py
urlpatterns = patterns(
'',
url(r'^api', include(api.urls))
)
"""
import json
import logging
import time
from django.http import HttpResponse, HttpResponseNotAllowed
from django.shortcuts import render
from .exceptions import JSONAPIError
from .serializers import DatetimeDecimalEncoder
from .signals import signal_request, signal_response
logger = logging.getLogger(__name__)
class API(object):
""" API handler."""
CONTENT_TYPE = "application/vnd.api+json"
def __init__(self):
self._resources = []
self.base_url = None # base server url
self.api_url = None # api root url
@property
def resource_map(self):
""" Resource map of api.
.. versionadded:: 0.4.1
:return: resource name to resource mapping.
:rtype: dict
"""
return {r.Meta.name: r for r in self._resources}
@property
def model_resource_map(self):
return {
resource.Meta.model: resource
for resource in self.resource_map.values()
if hasattr(resource.Meta, 'model')
}
def register(self, resource=None, **kwargs):
""" Register resource for currnet API.
:param resource: Resource to be registered
:type resource: jsonapi.resource.Resource or None
:return: resource
:rtype: jsonapi.resource.Resource
.. versionadded:: 0.4.1
:param kwargs: Extra meta parameters
"""
if resource is None:
def wrapper(resource):
return self.register(resource, **kwargs)
return wrapper
for key, value in kwargs.items():
setattr(resource.Meta, key, value)
if resource.Meta.name in self.resource_map:
raise ValueError('Resource {} already registered'.format(
resource.Meta.name))
if resource.Meta.name_plural in self.resource_map:
raise ValueError(
'Resource plural name {} conflicts with registered resource'.
format(resource.Meta.name))
resource_plural_names = {
r.Meta.name_plural for r in self.resource_map.values()
}
if resource.Meta.name in resource_plural_names:
raise ValueError(
'Resource name {} conflicts with other resource plural name'.
format(resource.Meta.name)
)
resource.Meta.api = self
self._resources.append(resource)
return resource
@property
def urls(self):
""" Get all of the api endpoints.
NOTE: only for django as of now.
NOTE: urlpatterns are deprecated since Django1.8
:return list: urls
"""
from django.conf.urls import url
urls = [
url(r'^$', self.documentation),
url(r'^map$', self.map_view),
]
for resource_name in self.resource_map:
urls.extend([
url(r'(?P<resource_name>{})$'.format(
resource_name), self.handler_view),
url(r'(?P<resource_name>{})/(?P<ids>[\w\-\,]+)$'.format(
resource_name), self.handler_view),
])
return urls
def update_urls(self, request, resource_name=None, ids=None):
""" Update url configuration.
:param request:
:param resource_name:
:type resource_name: str or None
:param ids:
:rtype: None
"""
http_host = request.META.get('HTTP_HOST', None)
if http_host is None:
http_host = request.META['SERVER_NAME']
if request.META['SERVER_PORT'] not in ('80', '443'):
http_host = "{}:{}".format(
http_host, request.META['SERVER_PORT'])
self.base_url = "{}://{}".format(
request.META['wsgi.url_scheme'],
http_host
)
self.api_url = "{}{}".format(self.base_url, request.path)
self.api_url = self.api_url.rstrip("/")
if ids is not None:
self.api_url = self.api_url.rsplit("/", 1)[0]
if resource_name is not None:
self.api_url = self.api_url.rsplit("/", 1)[0]
def map_view(self, request):
""" Show information about available resources.
.. versionadded:: 0.5.7
Content-Type check
:return django.http.HttpResponse
"""
self.update_urls(request)
resource_info = {
"resources": [{
"id": index + 1,
"href": "{}/{}".format(self.api_url, resource_name),
} for index, (resource_name, resource) in enumerate(
sorted(self.resource_map.items()))
if not resource.Meta.authenticators or
resource.authenticate(request) is not None
]
}
response = json.dumps(resource_info)
return HttpResponse(response, content_type="application/vnd.api+json")
def documentation(self, request):
""" Resource documentation.
.. versionadded:: 0.7.2
Content-Type check
:return django.http.HttpResponse
"""
self.update_urls(request)
context = {
"resources": sorted(self.resource_map.items())
}
return render(request, "jsonapi/index.html", context)
def handler_view_get(self, resource, **kwargs):
items = json.dumps(
resource.get(**kwargs),
cls=resource.Meta.encoder
)
return HttpResponse(items, content_type=self.CONTENT_TYPE)
def handler_view_post(self, resource, **kwargs):
data = resource.post(**kwargs)
if "errors" in data:
response = HttpResponse(
json.dumps(data, cls=DatetimeDecimalEncoder),
content_type=self.CONTENT_TYPE, status=400)
return response
response = HttpResponse(
json.dumps(data, cls=DatetimeDecimalEncoder),
content_type=self.CONTENT_TYPE, status=201)
items = data["data"]
items = items if isinstance(items, list) else [items]
response["Location"] = "{}/{}".format(
resource.Meta.name,
",".join([str(x["id"]) for x in items])
)
return response
def handler_view_put(self, resource, **kwargs):
if 'ids' not in kwargs:
return HttpResponse("Request SHOULD have resource ids", status=400)
data = resource.put(**kwargs)
if "errors" in data:
response = HttpResponse(
json.dumps(data, cls=DatetimeDecimalEncoder),
content_type=self.CONTENT_TYPE, status=400)
return response
response = HttpResponse(
json.dumps(data, cls=DatetimeDecimalEncoder),
content_type=self.CONTENT_TYPE, status=200)
return response
def handler_view_delete(self, resource, **kwargs):
if 'ids' not in kwargs:
return HttpResponse("Request SHOULD have resource ids", status=400)
response = resource.delete(**kwargs)
return HttpResponse(
response, content_type=self.CONTENT_TYPE, status=204)
def handler_view(self, request, resource_name, ids=None):
""" Handler for resources.
.. versionadded:: 0.5.7
Content-Type check
:return django.http.HttpResponse
"""
signal_request.send(sender=self, request=request)
time_start = time.time()
self.update_urls(request, resource_name=resource_name, ids=ids)
resource = self.resource_map[resource_name]
allowed_http_methods = resource.Meta.allowed_methods
if request.method not in allowed_http_methods:
response = HttpResponseNotAllowed(
permitted_methods=allowed_http_methods)
signal_response.send(
sender=self, request=request, response=response,
duration=time.time() - time_start)
return response
if resource.Meta.authenticators and not (
request.method == "GET" and
resource.Meta.disable_get_authentication):
user = resource.authenticate(request)
if user is None or not user.is_authenticated():
response = HttpResponse("Not Authenticated", status=401)
signal_response.send(
sender=self, request=request, response=response,
duration=time.time() - time_start)
return response
kwargs = dict(request=request)
if ids is not None:
kwargs['ids'] = ids.split(",")
try:
if request.method == "GET":
response = self.handler_view_get(resource, **kwargs)
elif request.method == "POST":
response = self.handler_view_post(resource, **kwargs)
elif request.method == "PUT":
response = self.handler_view_put(resource, **kwargs)
elif request.method == "DELETE":
response = self.handler_view_delete(resource, **kwargs)
except JSONAPIError as e:
response = HttpResponse(
json.dumps({"errors": [e.data]}, cls=DatetimeDecimalEncoder),
content_type=self.CONTENT_TYPE, status=e.status)
signal_response.send(sender=self, request=request, response=response,
duration=time.time() - time_start)
return response
| {
"repo_name": "pavlov99/jsonapi",
"path": "jsonapi/api.py",
"copies": "1",
"size": "9893",
"license": "mit",
"hash": -6584571974677727000,
"line_mean": 30.7083333333,
"line_max": 79,
"alpha_frac": 0.5708076418,
"autogenerated": false,
"ratio": 4.450292397660819,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 312
} |
"""apimocker URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from apimocker.mocker.views import ResolveMockedAddressView, CreateMockerView, ProcessMockFormView
urlpatterns = [
url(r'^$', CreateMockerView.as_view(), name="home"),
url(r'^admin/', admin.site.urls),
url(r'process_form/$', ProcessMockFormView.as_view(), name='process_mock_form_view'),
url(r'^(?P<hashed_id>\w{6})(.*)$', ResolveMockedAddressView.as_view(), name='mocked_api_view'),
]
| {
"repo_name": "paveu/api_mocker",
"path": "apimocker/urls.py",
"copies": "1",
"size": "1112",
"license": "mit",
"hash": 4009398518024500700,
"line_mean": 43.48,
"line_max": 99,
"alpha_frac": 0.7059352518,
"autogenerated": false,
"ratio": 3.411042944785276,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4616978196585276,
"avg_score": null,
"num_lines": null
} |
# api/models/user.py
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from itsdangerous import BadSignature, SignatureExpired
from sqlalchemy import or_, and_, desc, asc
from datetime import datetime
from ..core.setup import db, config
from .tag import Tag
from .mark import Mark
from .apikey import ApiKey
from .setting import Setting
import bcrypt
class User(db.Model):
__tablename__ = 'User'
id = db.Column(db.Integer, primary_key=True)
status = db.Column(db.Integer, default=1)
email = db.Column(db.Unicode(255), unique=True, nullable=False)
username = db.Column(db.Unicode(128), unique=True)
password = db.Column(db.Unicode(255), nullable=False)
per_page = db.Column(db.SmallInteger, default=20)
sort_type = db.Column(db.Unicode(255), default=u'clicks')
created = db.Column(db.DateTime, default=datetime.utcnow())
last_logged_in = db.Column(db.DateTime)
marks = db.relationship('Mark', backref='owner', lazy='dynamic')
apikeys = db.relationship('ApiKey', backref='owner', lazy='joined')
settings = db.relationship('Setting', backref='owner', lazy='joined')
smap = {'active': 1, 'inactive': 2}
def __init__(self, email, password):
self.email = email
self.password = bcrypt.hashpw(password, bcrypt.gensalt())
def update(self, args):
for key, value in args.iteritems():
if value:
setattr(self, key, value)
db.session.add(self)
db.session.commit()
return self
def delete(self):
self.status = self.smap['inactive']
for apikey in self.my_apikeys():
apikey.delete()
db.session.add(self)
db.session.commit()
return self
def save(self):
if not User.by_email(self.email):
db.session.add(self)
db.session.commit()
return self
return False
"""
Authentication
"""
@classmethod
def by_email(self, email):
return self.query.filter(and_(User.email == email,
User.status == self.smap['active']))\
.first()
@staticmethod
def verify_api_key(token):
s = Serializer(config['SECRET_KEY'])
try:
data = s.loads(token)
except SignatureExpired:
return None
except BadSignature:
return None
t = ApiKey.query.filter(ApiKey.value == data['uuid']).first()
if t and t.expires > datetime.utcnow():
# renew key
t.renew()
t.update()
if (t.owner.status == User.smap['active']):
return t.owner
return None
def verify_password(self, password):
return bcrypt.hashpw(password, self.password) == self.password
def is_authenticated(self):
return True
def is_active(self):
return self.status == self.smap['active']
def is_anonymous(self):
return True
def get_id(self):
return unicode(self.id)
"""
Marks
"""
def create_mark(self, type, title, url, tags):
m = Mark(self.id, type, title, url)
if tags:
m.update_tags(tags)
db.session.add(m)
db.session.commit()
return m
def my_marks(self):
return Mark.query.filter(and_(Mark.owner_id == self.id,
Mark.status == self.smap['active']))
def get_mark_by_id(self, id):
mark = self.my_marks().filter(and_(Mark.id == id,
Mark.status == self.smap['active']))\
.first()
if mark:
mark.increment_clicks()
return mark
return None
def q_marks_by_url(self, string):
return self.my_marks().filter(Mark.url == string).first()
def marks(self, page, q=False, type=False, tag=False, sort=False):
base = self.my_marks()
if sort and sort in ['clicks', 'dateasc', 'datedesc', 'last_clicked']:
self.sort_type = sort
if type and type in Mark.valid_types:
base = base.filter(Mark.type == type)
if q:
q = "%"+q+"%"
base = base.filter(or_(Mark.title.like(q),
Mark.url.like(q)))
if tag:
base = base.filter(Mark.tags.any(title=tag))
if self.sort_type == u'last_clicked':
base = self.my_marks().filter(Mark.clicks > 0)\
.order_by(desc(Mark.last_clicked))
if self.sort_type == u'clicks':
base = base.order_by(desc(Mark.clicks))\
.order_by(desc(Mark.created))
if self.sort_type == u'dateasc':
base = base.order_by(asc(Mark.created))
if self.sort_type == u'datedesc':
base = base.order_by(desc(Mark.created))
return base.paginate(page, self.per_page, False)
"""
Tokens / ApiKeys
"""
def create_apikey(self, title):
ak = ApiKey(self.id, title)
db.session.add(ak)
db.session.commit()
return ak
def my_apikeys(self):
return ApiKey.query.filter(ApiKey.owner_id == self.id)
def tokens(self, page):
return self.my_apikeys().paginate(page, self.per_page, False)
def get_token_by_key(self, key):
return self.my_apikeys().filter(ApiKey.key == key).first()
"""
Settings
"""
def create_setting(self, name, client, json):
setting = Setting(self.id, name, json)
if client:
setting.client = client
db.session.add(setting)
db.session.commit()
return setting
def my_settings(self):
return Setting.query.filter(Setting.owner_id == self.id)
def settings(self, page):
return self.my_settings().paginate(page, self.per_page, False)
def get_setting_by_name(self, name):
return self.my_settings().filter(Setting.name == name).first()
"""
Tags
"""
def my_tags(self):
return Tag.query.filter(Tag.Mark.any(owner_id=self.id))
def all_tags(self, page):
return self.my_tags().paginate(page, self.per_page, False)
"""
Generic
"""
def json_pager(self, obj):
return {'page': obj.page,
'pages': obj.pages,
'next_num': obj.next_num if obj.has_next else -1,
'prev_num': obj.prev_num if obj.has_prev else -1,
'total': obj.total}
def __repr__(self):
return '<User %r>' % (self.username)
| {
"repo_name": "plastboks/Flaskmarks-API",
"path": "api/models/user.py",
"copies": "1",
"size": "6614",
"license": "mit",
"hash": -760498909283658500,
"line_mean": 29.9065420561,
"line_max": 80,
"alpha_frac": 0.5621409132,
"autogenerated": false,
"ratio": 3.766514806378132,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9828367269133919,
"avg_score": 0.000057690088842736815,
"num_lines": 214
} |
"""api module.
This module contains the entities needed to work with Telegram Bot API.
"""
import re
import sys
import json
from io import BytesIO
from functools import wraps
import requests
from twisted.internet import reactor, threads, task, _sslverify
from twisted.internet.error import ConnectionDone
from twisted.internet.defer import Deferred, inlineCallbacks
from twisted.internet.protocol import Protocol
from twisted.web.http_headers import Headers
from twisted.web.client import Agent, FileBodyProducer, HTTPConnectionPool
from .url import get_url
from .content import (
DictItem, BadRequest, Update, User, Message, UserProfilePhotos, File, Chat,
ChatMember, GameHighScore, _Query, StickerSet
)
_update_id = 0
if sys.platform == 'win32':
_sslverify.platformTrust = lambda: None
def _api_request(return_type=None):
"""Decorator for API methods.
:param return_type: Return type.
:return: Instance of return_type list objects or BadRequest or True.
"""
def inner(func):
@inlineCallbacks
@wraps(func)
def wrapper(bot_obj, **kwargs):
api_method = ''.join(func.__name__.split('_'))
response = yield bot_obj._api.request(api_method, **kwargs)
if isinstance(response, BadRequest):
obj = response
else:
response_dict = json.loads(response.decode())
try:
obj_args = response_dict['result']
if isinstance(obj_args, dict):
obj = return_type(**obj_args)
elif isinstance(obj_args, list):
obj = return_type.from_list(response_dict['result'])
else:
obj = obj_args
except KeyError:
obj = BadRequest(response_dict['error_code'],
response_dict['description'],
response_dict.get('parameters', None))
return obj
return wrapper
return inner
@inlineCallbacks
def _get_updates(bot, handler=None, **kwargs):
"""Calls the method get_updates. Uses in polling mode only.
:param bot: TelegramBot instance.
:param handler: Function.
:param kwargs: kwargs for get_updates method, without offset.
:return: Deferred.
"""
global _update_id
handler = handler or MessageHandler.handler
updates = yield bot.get_updates(offset=_update_id + 1, **kwargs)
if isinstance(updates, BadRequest):
raise APIRequestError(updates.__dict__)
if updates:
if _update_id == 0:
_update_id = updates[-1].id
yield _get_updates(bot, handler, **kwargs)
_update_id = updates[-1].id
obj_list = [update.content for update in updates]
for obj in obj_list:
yield handler(obj)
def polling(bot, interval=10, handler=None, **kwargs):
"""Starts polling.
:param bot: TelegramBot instance.
:param interval: Polling interval in seconds.
:param handler: Function. If uses, then MessageHandler decorator
will not be works. It's need if you want handling all messages in one
function.
:param kwargs: kwargs for get_updates method, without offset.
"""
task.LoopingCall(_get_updates, bot, handler, **kwargs).start(interval)
class APIRequestError(Exception):
pass
class MessageHandler(object):
"""This class is a decorator for functions-handlers defined by the user.
The purpose of this entity is to store the handlers associated with a
content type and extract their.
"""
_handlers = {}
def __init__(self, content=None, command=None):
"""Initial instance.
:param content: List of content names.
Available content names: text, photo, audio, video, video_note,
document, sticker, voice, contact, location, venue, game, invoice,
successful_payment, inline_query, callback_query, shipping_query,
pre_checkout_query, chosen_inline_result.
:param command: List of command names.
"""
self._content = content
if command:
for index, cmd in enumerate(command):
if cmd[0] != '/':
command[index] = '/' + cmd
self._command = command
def __call__(self, func):
if all((self._content, self._command)):
content_list = self._content + self._command
else:
content_list = self._content or self._command
MessageHandler._handlers[func] = content_list
@staticmethod
def handler(message):
"""Calls handler.
:param message: input object.
:return: Deferred.
"""
deferred = threads.deferToThread(MessageHandler._get_handler, message)
@inlineCallbacks
def run_handler(handler):
if handler:
yield handler(message)
deferred.addCallback(run_handler)
return deferred
@staticmethod
def _get_handler(message):
"""Gets handler by content.
:param message: input object.
:return: func or None.
"""
content = None
command = None
if isinstance(message, _Query):
content = message.get_str_type()
if not content:
content = message.content_type
if content == 'text':
command = message.text if message.text[0] == '/' else None
content_handler, command_handler = map(
MessageHandler._get_content_handler, (content, command)
)
if content_handler is command_handler:
handler = command_handler
elif command and not command_handler:
handler = None
else:
handler = command_handler or content_handler
return handler
@staticmethod
def _get_content_handler(content):
"""Searching for the handler.
:param content: content type name.
:return: func or None.
"""
handler = None
for func, content_list in MessageHandler._handlers.items():
if content in content_list:
handler = func
break
return handler
class _HTTPClientProtocol(Protocol):
"""Class for gets payload from response."""
def __init__(self, deferred):
self.deferred = deferred
self.data = b''
def dataReceived(self, data):
self.data += data
def connectionLost(self, reason=ConnectionDone):
self.deferred.callback(self.data)
class _APIRequest(object):
"""Class for requesting the API."""
_url = get_url()
_HEADERS = Headers({'Content-Type': ['application/json']})
_UPLOAD_METHODS = ('sendphoto', 'sendaudio', 'sendvideo',
'senddocument', 'sendsticker', 'sendvoice',
'sendvideonote', 'setchatphoto', 'uploadstickerfile',
'createnewstickerset', 'addstickertoset')
_POST_METHODS = ('send', 'forward', 'kick', 'leave', 'pin', 'unpin', 'add',
'unban', 'answer', 'edit', 'delete', 'set', 'upload',
'create')
def __init__(self, token):
"""Initial instance.
:param token: Bot token.
"""
self._agent = Agent(reactor, pool=HTTPConnectionPool(reactor))
_APIRequest._url = _APIRequest._url.format(token=token,
method='{method}')
def request(self, api_method, **kwargs):
"""Makes request to the API.
:param api_method: API method name.
:param kwargs: method arguments.
:return: Deferred.
"""
deferred = threads.deferToThread(self._get_request_args, api_method,
**kwargs)
def _request(req_data):
if not isinstance(req_data, BadRequest):
if 'files' in req_data:
res_finished = threads.deferToThread(self._upload_request,
**req_data)
else:
res_deferred = self._agent.request(**req_data)
def response_handler(response, dfd):
response.deliverBody(_HTTPClientProtocol(dfd))
res_finished = Deferred()
res_deferred.addCallback(response_handler, res_finished)
else:
res_finished = req_data
return res_finished
deferred.addCallback(_request)
return deferred
@staticmethod
def _upload_request(**kwargs):
"""Uses for upload files.
:param kwargs: request args.
:return bytes.
"""
return requests.post(**kwargs).content
@staticmethod
def _get_url(api_method):
return _APIRequest._url.format(method=api_method)
@staticmethod
def _get_request_args(api_method, **kwargs):
"""Gets arguments for request.
:param api_method: API method name.
:param kwargs: method arguments.
:return: dictionary with the arguments or BadRequest.
"""
req_data = {}
if api_method in _APIRequest._UPLOAD_METHODS:
content = api_method.replace('send', '')
try:
upload_source = kwargs[content]
except KeyError:
req_data = BadRequest(400, '{} is empty'.format(content))
else:
is_file_path = bool(re.search(r'\.[a-zA-Z0-9]+$',
upload_source))
if is_file_path and not upload_source.startswith('http'):
kwargs.pop(content)
file = {content: open(upload_source, 'rb')}
req_data = {
'url': _APIRequest._get_url(api_method),
'data': kwargs,
'files': file,
}
if not req_data:
if list(
filter(lambda method: api_method.startswith(method),
_APIRequest._POST_METHODS)
):
req_method = b'POST'
else:
req_method = b'GET'
url = _APIRequest._get_url(api_method).encode()
body = None
if kwargs:
for arg, value in kwargs.items():
if isinstance(value, DictItem):
kwargs[arg] = value.to_dict()
elif isinstance(value, (list, tuple)):
obj_list = [obj.to_dict() for obj in value
if isinstance(obj, DictItem)]
kwargs[arg] = obj_list or kwargs[arg]
body = FileBodyProducer(
BytesIO(json.dumps(kwargs).encode())
)
req_data = {
'method': req_method,
'uri': url,
'headers': _APIRequest._HEADERS,
'bodyProducer': body
}
return req_data
class TelegramBot(object):
"""This class represents Telegram bot."""
def __init__(self, token):
"""Initial instance.
:param token: Bot token.
"""
self.token = token
self._api = _APIRequest(self.token)
def __str__(self):
return '{}(token:{})'.format(self.__class__.__name__, self.token)
@_api_request(return_type=Update)
def get_updates(self, offset=None, limit=None, timeout=None,
allowed_updates=None):
"""Use this method to receive incoming updates using long polling.
:param offset: Identifier of the first update to be returned.
:param limit: Limits the number of updates to be retrieved.
Values between 1—100 are accepted. Defaults to 100.
:param timeout: Timeout in seconds for long polling.
Defaults to 0, i.e. usual short polling. Should be positive,
short polling should be used for testing purposes only.
:param allowed_updates: List the types of updates you want your bot
to receive. For example, specify ['message',
'edited_channel_post', 'callback_query'] to only receive updates
of these types.
:return: List of Update objects.
"""
pass
@_api_request(return_type=User)
def get_me(self):
"""Returns basic information about the bot.
:return: User.
"""
pass
@_api_request(return_type=Message)
def send_message(self, *, chat_id, text, parse_mode=None,
disable_web_page_preview=None, disable_notification=None,
reply_to_message_id=None, reply_markup=None):
"""Use this method to send text messages.
:param chat_id: Unique identifier for the target chat or username of
the target channel (in the format @channelusername).
:param text: Text of the message to be sent.
:param parse_mode: Send Markdown or HTML, if you want Telegram apps
to show bold, italic, fixed-width text or inline URLs in your
bot's message.
:param disable_web_page_preview: Disables link previews for links in
this message.
:param disable_notification: Sends the message silently. iOS users
will not receive a notification, Android users will receive a
notification with no sound.
:param reply_to_message_id: If the message is a reply, ID of the
original message.
:param reply_markup: Additional interface options (
InlineKeyboardMarkup or ReplyKeyboardMarkup or ReplyKeyboardRemove
or ForceReply).
:return: Message.
"""
pass
@_api_request(return_type=Message)
def forward_message(self, *, chat_id, from_chat_id, message_id,
disable_notification=None):
"""Use this method to forward messages of any kind.
:param chat_id: Unique identifier for the target chat or username of
the target channel (in the format @channelusername).
:param from_chat_id: Unique identifier for the chat where the original
message was sent (or channel username in the format
@channelusername).
:param message_id: Message identifier in the chat specified in
from_chat_id.
:param disable_notification: Sends the message silently. iOS users will
not receive a notification, Android users will receive a
notification with no sound.
:return: Message.
"""
pass
@_api_request(return_type=Message)
def send_photo(self, *, chat_id, photo, caption=None,
disable_notification=None, reply_to_message_id=None,
reply_markup=None):
"""Use this method to send photos.
:param chat_id: Unique identifier for the target chat or username of
the target channel (in the format @channelusername).
:param photo: Photo to send. Pass a file_id as String to send a photo
that exists on the Telegram servers (recommended), pass an HTTP URL
as a String for Telegram to get a photo from the Internet, or
upload a new photo.
:param caption: Photo caption (may also be used when resending photos
by file_id), 0-200 characters.
:param disable_notification: Sends the message silently. iOS users will
not receive a notification, Android users will receive a
notification with no sound.
:param reply_to_message_id: If the message is a reply, ID of the
original message.
:param reply_markup: Additional interface options (
InlineKeyboardMarkup or ReplyKeyboardMarkup or ReplyKeyboardRemove
or ForceReply).
:return: Message.
"""
pass
@_api_request(return_type=Message)
def send_audio(self, *, chat_id, audio, caption=None, duration=None,
performer=None, title=None, disable_notification=None,
reply_to_message_id=None, reply_markup=None):
"""Use this method to send audio files, if you want Telegram clients
to display them in the music player. Your audio must be in the .mp3
format.
:param chat_id: Unique identifier for the target chat or username of
the target channel (in the format @channelusername).
:param audio: Audio file to send. Pass a file_id as String to send an
audio file that exists on the Telegram servers (recommended), pass
an HTTP URL as a String for Telegram to get an audio file from the
Internet, or upload a new.
:param caption: Audio caption, 0-200 characters.
:param duration: Duration of the audio in seconds.
:param performer: Performer.
:param title: Track name.
:param disable_notification: Sends the message silently. iOS users will
not receive a notification, Android users will receive a
notification with no sound.
:param reply_to_message_id: If the message is a reply, ID of the
original message.
:param reply_markup: Additional interface options (
InlineKeyboardMarkup or ReplyKeyboardMarkup or ReplyKeyboardRemove
or ForceReply).
:return: Message.
"""
pass
@_api_request(return_type=Message)
def send_document(self, *, chat_id, document, caption=None,
disable_notification=None, reply_to_message_id=None,
reply_markup=None):
"""Use this method to send general files.
:param chat_id: Unique identifier for the target chat or username of
the target channel (in the format @channelusername).
:param document: File to send. Pass a file_id as String to send a file
that exists on the Telegram servers (recommended), pass an HTTP URL
as a String for Telegram to get a file from the Internet, or upload
a new.
:param caption: Document caption (may also be used when resending
documents by file_id), 0-200 characters.
:param disable_notification: Sends the message silently. iOS users will
not receive a notification, Android users will receive a
notification with no sound.
:param reply_to_message_id: If the message is a reply, ID of the
original message.
:param reply_markup: Additional interface options (
InlineKeyboardMarkup or ReplyKeyboardMarkup or ReplyKeyboardRemove
or ForceReply).
:return: Message.
"""
pass
@_api_request(return_type=Message)
def send_sticker(self, *, chat_id, sticker, disable_notification=None,
reply_to_message_id=None, reply_markup=None):
"""Use this method to send .webp stickers.
:param chat_id: Unique identifier for the target chat or username of
the target channel (in the format @channelusername).
:param sticker: Sticker to send. Pass a file_id as String to send a
file that exists on the Telegram servers (recommended), pass an
HTTP URL as a String for Telegram to get a .webp file from the
Internet, or upload a new.
:param disable_notification: Sends the message silently. iOS users will
not receive a notification, Android users will receive a
notification with no sound.
:param reply_to_message_id: If the message is a reply, ID of the
original message.
:param reply_markup: Additional interface options (
InlineKeyboardMarkup or ReplyKeyboardMarkup or ReplyKeyboardRemove
or ForceReply).
:return: Message.
"""
pass
@_api_request(return_type=Message)
def send_video(self, *, chat_id, video, duration=None, width=None,
height=None, caption=None, disable_notification=None,
reply_to_message_id=None, reply_markup=None):
"""Use this method to send video files, Telegram clients support mp4
videos (other formats may be sent as Document).
:param chat_id: Unique identifier for the target chat or username of
the target channel (in the format @channelusername).
:param video: Video to send. Pass a file_id as String to send a video
that exists on the Telegram servers (recommended), pass an HTTP URL
as a String for Telegram to get a video from the Internet, or
upload a new.
:param duration: Duration of sent video in seconds.
:param width: Video width.
:param height: Video height.
:param caption: Video caption (may also be used when resending videos
by file_id), 0-200 characters.
:param disable_notification: Sends the message silently. iOS users will
not receive a notification, Android users will receive a
notification with no sound.
:param reply_to_message_id: If the message is a reply, ID of the
original message.
:param reply_markup: Additional interface options (
InlineKeyboardMarkup or ReplyKeyboardMarkup or ReplyKeyboardRemove
or ForceReply).
:return:
"""
pass
@_api_request(return_type=Message)
def send_video_note(self, *, chat_id, video_note, duration=None,
length=None, disable_notification=None,
reply_to_message_id=None, reply_markup=None):
"""As of v.4.0, Telegram clients support rounded square mp4 videos of
up to 1 minute long. Use this method to send video messages. On
success, the sent Message is returned.
:param chat_id: Unique identifier for the target chat or username of
the target channel (in the format @channelusername).
:param video_note: Video note to send. Pass a file_id as String to send
a video note that exists on the Telegram servers (recommended) or
upload a new video.
:param duration: Duration of sent video in seconds.
:param length: Video width and height.
:param disable_notification: Sends the message silently. Users will
receive a notification with no sound.
:param reply_to_message_id: If the message is a reply, ID of the
original message.
:param reply_markup: Additional interface options (
InlineKeyboardMarkup or ReplyKeyboardMarkup or ReplyKeyboardRemove
or ForceReply).
:return: Message.
"""
pass
@_api_request(return_type=Message)
def send_voice(self, *, chat_id, voice, caption=None, duration=None,
disable_notification=None, reply_to_message_id=None,
reply_markup=None):
"""Use this method to send audio files, if you want Telegram clients to
display the file as a playable voice message. For this to work, your
audio must be in an .ogg file encoded with OPUS (other formats may be
sent as Audio or Document).
:param chat_id: Unique identifier for the target chat or username of
the target channel (in the format @channelusername).
:param voice: Audio file to send. Pass a file_id as String to send a
file that exists on the Telegram servers (recommended), pass an
HTTP URL as a String for Telegram to get a file from the Internet,
or upload a new.
:param caption: Voice message caption, 0-200 characters.
:param duration: Duration of the voice message in seconds.
:param disable_notification: Sends the message silently. iOS users will
not receive a notification, Android users will receive a
notification with no sound.
:param reply_to_message_id: If the message is a reply, ID of the
original message.
:param reply_markup: Additional interface options (
InlineKeyboardMarkup or ReplyKeyboardMarkup or ReplyKeyboardRemove
or ForceReply).
:return: Message.
"""
pass
@_api_request(return_type=Message)
def send_location(self, *, chat_id, latitude, longitude, live_period=None,
disable_notification=None, reply_to_message_id=None,
reply_markup=None):
"""Use this method to send point on the map.
:param chat_id: Unique identifier for the target chat or username of
the target channel (in the format @channelusername).
:param latitude: Latitude of location.
:param longitude: Longitude of location.
:param live_period: Period in seconds for which the location will be
updated (see Live Locations, should be between 60 and 86400.
:param disable_notification: Sends the message silently. iOS users will
not receive a notification, Android users will receive a
notification with no sound.
:param reply_to_message_id: If the message is a reply, ID of the
original message.
:param reply_markup: Additional interface options (
InlineKeyboardMarkup or ReplyKeyboardMarkup or ReplyKeyboardRemove
or ForceReply).
:return: Message.
"""
pass
@_api_request(return_type=Message)
def send_venue(self, *, chat_id, latitude, longitude, title, address,
foursquare_id=None, disable_notification=None,
reply_to_message_id=None, reply_markup=None):
"""Use this method to send information about a venue.
:param chat_id: Unique identifier for the target chat or username of
the target channel (in the format @channelusername).
:param latitude: Latitude of the venue.
:param longitude: Longitude of the venue.
:param title: Name of the venue.
:param address: Address of the venue.
:param foursquare_id: Foursquare identifier of the venue.
:param disable_notification: Sends the message silently. iOS users will
not receive a notification, Android users will receive a
notification with no sound.
:param reply_to_message_id: If the message is a reply, ID of the
original message.
:param reply_markup: Additional interface options (
InlineKeyboardMarkup or ReplyKeyboardMarkup or ReplyKeyboardRemove
or ForceReply).
:return: Message.
"""
pass
@_api_request(return_type=Message)
def send_contact(self, *, chat_id, phone_number, first_name,
last_name=None, disable_notification=None,
reply_to_message_id=None, reply_markup=None):
"""Use this method to send phone contacts.
:param chat_id: Unique identifier for the target chat or username of
the target channel (in the format @channelusername).
:param phone_number: Contact's phone number.
:param first_name: Contact's first name.
:param last_name: Contact's last name.
:param disable_notification: Sends the message silently. iOS users will
not receive a notification, Android users will receive a
notification with no sound.
:param reply_to_message_id: If the message is a reply, ID of the
original message.
:param reply_markup: Additional interface options (
InlineKeyboardMarkup or ReplyKeyboardMarkup or ReplyKeyboardRemove
or ForceReply).
:return: Message.
"""
pass
@_api_request()
def send_chat_action(self, *, chat_id, action):
"""Use this method when you need to tell the user that something is
happening on the bot's side. The status is set for 5 seconds or less
(when a message arrives from your bot, Telegram clients clear its
typing status).
:param chat_id: Unique identifier for the target chat or username of
the target channel (in the format @channelusername).
:param action: Type of action to broadcast. Choose one, depending on
what the user is about to receive: typing for text messages,
upload_photo for photos, record_video or upload_video for videos,
record_audio or upload_audio for audio files, upload_document for
general files, find_location for location data, record_video_note
or upload_video_note for video notes.
:return: True.
"""
pass
@_api_request(return_type=UserProfilePhotos)
def get_user_profile_photos(self, *, user_id, offset=None, limit=None):
"""Use this method to get a list of profile pictures for a user.
:param user_id: Unique identifier of the target user.
:param offset: Sequential number of the first photo to be returned. By
default, all photos are returned.
:param limit: Limits the number of photos to be retrieved. Values
between 1—100 are accepted. Defaults to 100.
:return: UserProfilePhotos.
"""
pass
@_api_request(return_type=File)
def get_file(self, *, file_id):
"""Use this method to get basic info about a file and prepare it for
downloading.
:param file_id: File identifier to get info about.
:return: File.
"""
pass
@_api_request()
def kick_chat_member(self, *, chat_id, user_id, until_date=None):
"""Use this method to kick a user from a group or a supergroup. In the
case of supergroups, the user will not be able to return to the group
on their own using invite links, etc., unless unbanned first. The bot
must be an administrator in the group for this to work.
:param chat_id: Unique identifier for the target group or username of
the target supergroup (in the format @supergroupusername).
:param user_id: Unique identifier of the target user.
:param until_date: Date when the user will be unbanned, unix time. If
user is banned for more than 366 days or less than 30 seconds from
the current time they are considered to be banned forever.
:return: True.
"""
pass
@_api_request()
def leave_chat(self, *, chat_id):
"""Use this method for your bot to leave a group, supergroup or
channel.
:param chat_id: Unique identifier for the target group or username of
the target supergroup (in the format @supergroupusername).
:return: True.
"""
pass
@_api_request()
def unban_chat_member(self, *, chat_id, user_id):
"""Use this method to unban a previously kicked user in a supergroup.
The user will not return to the group automatically, but will be able
to join via link, etc. The bot must be an administrator in the group
for this to work.
:param chat_id: Unique identifier for the target group or username of
the target supergroup (in the format @supergroupusername).
:param user_id: Unique identifier of the target user.
:return: True.
"""
pass
@_api_request(return_type=Chat)
def get_chat(self, *, chat_id):
"""Use this method to get up to date information about the chat
(current name of the user for one-on-one conversations, current
username of a user, group or channel, etc.).
:param chat_id: Unique identifier for the target group or username of
the target supergroup (in the format @supergroupusername).
:return: Chat.
"""
pass
@_api_request(return_type=ChatMember)
def get_chat_administrators(self, *, chat_id):
"""Use this method to get a list of administrators in a chat.
:param chat_id: Unique identifier for the target chat or username of
the target supergroup or channel (in the format @channelusername).
:return: ChatMember.
"""
pass
@_api_request()
def get_chat_members_count(self, *, chat_id):
"""Use this method to get the number of members in a chat.
:param chat_id: Unique identifier for the target chat or username of
the target supergroup or channel (in the format @channelusername).
:return: int.
"""
pass
@_api_request(return_type=ChatMember)
def get_chat_member(self, *, chat_id, user_id):
"""Use this method to get information about a member of a chat.
:param chat_id: Unique identifier for the target chat or username of
the target supergroup or channel (in the format @channelusername).
:param user_id: Unique identifier of the target user.
:return: ChatMember.
"""
pass
@_api_request()
def answer_callback_query(self, *, callback_query_id, text=None, url=None,
show_alert=None, cache_time=None):
"""Use this method to send answers to callback queries sent from inline
keyboards. The answer will be displayed to the user as a notification
at the top of the chat screen or as an alert.
:param callback_query_id: Unique identifier for the query to be
answered.
:param text: Text of the notification. If not specified, nothing will
be shown to the user, 0-200 characters.
:param url: URL that will be opened by the user's client. If you have
created a Game and accepted the conditions via @Botfather, specify
the URL that opens your game – note that this will only work if the
query comes from a callback_game button.
:param show_alert: If true, an alert will be shown by the client
instead of a notification at the top of the chat screen. Defaults
to false.
:param cache_time: The maximum amount of time in seconds that the
result of the callback query may be cached client-side. Telegram
apps will support caching starting in version 3.14. Defaults to 0.
:return: True.
"""
pass
@_api_request(return_type=Message)
def edit_message_text(self, *, chat_id, text, message_id=None,
inline_message_id=None, parse_mode=None,
disable_web_page_preview=None, reply_markup=None):
"""Use this method to edit text and game messages sent by the bot or
via the bot (for inline bots).
:param chat_id:Required if inline_message_id is not specified. Unique
identifier for the target chat or username of the target channel
(in the format @channelusername).
:param text: New text of the message.
:param message_id: Required if inline_message_id is not specified.
Identifier of the sent message.
:param inline_message_id: Required if chat_id and message_id are not
specified. Identifier of the inline message.
:param parse_mode: Send Markdown or HTML, if you want Telegram apps to
show bold, italic, fixed-width text or inline URLs in your bot's
message.
:param disable_web_page_preview: Disables link previews for links in
this message.
:param reply_markup: InlineKeyboardMarkup.
:return: If edited message is sent by the bot, the edited Message is
returned, otherwise True is returned.
"""
pass
@_api_request(return_type=Message)
def edit_message_caption(self, *, chat_id=None, message_id=None,
inline_message_id=None, caption=None,
reply_markup=None):
"""Use this method to edit captions of messages sent by the bot or via
the bot (for inline bots).
:param chat_id: Required if inline_message_id is not specified. Unique
identifier for the target chat or username of the target channel
(in the format @channelusername).
:param message_id: Required if inline_message_id is not specified.
Identifier of the sent message.
:param inline_message_id: Required if chat_id and message_id are not
specified. Identifier of the inline message.
:param caption: New caption of the message.
:param reply_markup: InlineKeyboardMarkup.
:return: If edited message is sent by the bot, the edited Message is
returned, otherwise True is returned.
"""
pass
@_api_request(return_type=Message)
def edit_message_reply_markup(self, *, chat_id=None, message_id=None,
inline_message_id=None, reply_markup=None):
"""Use this method to edit only the reply markup of messages sent by
the bot or via the bot (for inline bots).
:param chat_id: Required if inline_message_id is not specified. Unique
identifier for the target chat or username of the target channel
(in the format @channelusername).
:param message_id: Required if inline_message_id is not specified.
Identifier of the sent message.
:param inline_message_id: Required if chat_id and message_id are not
specified. Identifier of the inline message.
:param reply_markup: InlineKeyboardMarkup.
:return: If edited message is sent by the bot, the edited Message is
returned, otherwise True is returned.
"""
pass
@_api_request(return_type=Message)
def edit_message_live_location(self, *, latitude, longitude, chat_id=None,
message_id=None, inline_message_id=None,
reply_markup=None):
"""Use this method to edit live location messages sent by the bot or
via the bot (for inline bots). A location can be edited until its
live_period expires or editing is explicitly disabled by a call to
stop_message_live_location. On success, if the edited message was sent
by the bot, the edited Message is returned, otherwise True is returned.
:param latitude: Latitude of new location.
:param longitude: Longitude of new location.
:param chat_id: Required if inline_message_id is not specified. Unique
identifier for the target chat or username of the target channel
(in the format @channelusername).
:param message_id: Required if inline_message_id is not specified.
Identifier of the sent message.
:param inline_message_id: Required if chat_id and message_id are not
specified. Identifier of the inline message.
:param reply_markup: A JSON-serialized object for a new inline
keyboard.
"""
pass
@_api_request(return_type=Message)
def stop_message_live_location(self, *, chat_id=None, message_id=None,
inline_message_id=None, reply_markup=None):
"""Use this method to stop updating a live location message sent by the
bot or via the bot (for inline bots) before live_period expires. On
success, if the message was sent by the bot, the sent Message is
returned, otherwise True is returned.
:param chat_id: Required if inline_message_id is not specified. Unique
identifier for the target chat or username of the target channel
(in the format @channelusername).
:param message_id: Required if inline_message_id is not specified.
Identifier of the sent message.
:param inline_message_id: Required if chat_id and message_id are not
specified. Identifier of the inline message.
:param reply_markup: A JSON-serialized object for a new inline
keyboard.
"""
pass
@_api_request()
def delete_message(self, *, chat_id, message_id):
"""Use this method to delete a message, including service messages,
with the following limitations:
- A message can only be deleted if it was sent less than 48 hours
ago.
- Bots can delete outgoing messages in groups and supergroups.
- Bots granted can_post_messages permissions can delete outgoing
messages in channels.
- If the bot is an administrator of a group, it can delete any
message there.
- If the bot has can_delete_messages permission in a supergroup or
a channel, it can delete any message there.
Returns True on success.
:param chat_id: Unique identifier for the target chat or username of
the target channel (in the format @channelusername).
:param message_id: Identifier of the message to delete.
:return: True.
"""
pass
@_api_request()
def answer_inline_query(self, *, inline_query_id, results,
cache_timer=None, is_personal=None,
next_offset=None, switch_pm_text=None,
switch_pm_parameter=None):
"""Use this method to send answers to an inline query.
:param inline_query_id: Unique identifier for the answered query.
:param results: List of results for the inline query.
:param cache_timer: The maximum amount of time in seconds that the
result of the inline query may be cached on the server. Defaults
to 300.
:param is_personal: Pass True, if results may be cached on the server
side only for the user that sent the query. By default, results may
be returned to any user who sends the same query.
:param next_offset: Pass the offset that a client should send in the
next query with the same text to receive more results. Pass an
empty string if there are no more results or if you don‘t support
pagination. Offset length can’t exceed 64 bytes.
:param switch_pm_text: If passed, clients will display a button with
specified text that switches the user to a private chat with the
bot and sends the bot a start message with the parameter
switch_pm_parameter.
:param switch_pm_parameter: Deep-linking parameter for the /start
message sent to the bot when user presses the switch button.
1-64 characters, only A-Z, a-z, 0-9, _ and - are allowed.
:return: True.
"""
pass
@_api_request(return_type=Message)
def send_game(self, *, chat_id, game_short_name, disable_notification=None,
reply_to_message_id=None, reply_markup=None):
"""Use this method to send a game.
:param chat_id: Unique identifier for the target chat.
:param game_short_name: Short name of the game, serves as the unique
identifier for the game. Set up your games via Botfather.
:param disable_notification: Sends the message silently. iOS users will
not receive a notification, Android users will receive a
notification with no sound.
:param reply_to_message_id: If the message is a reply, ID of the
original message.
:param reply_markup: InlineKeyboardMarkup.
:return: Message.
"""
pass
@_api_request(return_type=Message)
def set_game_score(self, *, user_id, score, force=None,
disable_edit_message=None, chat_id=None,
message_id=None, inline_message_id=None):
"""Use this method to set the score of the specified user in a game.
:param user_id: User identifier.
:param score: New score, must be non-negative.
:param force: Pass True, if the high score is allowed to decrease.
This can be useful when fixing mistakes or banning cheaters.
:param disable_edit_message: Pass True, if the game message should not
be automatically edited to include the current scoreboard.
:param chat_id: Required if inline_message_id is not specified. Unique
identifier for the target chat.
:param message_id: Required if inline_message_id is not specified.
Identifier of the sent message.
:param inline_message_id: Required if chat_id and message_id are not
specified. Identifier of the inline message.
:return: if the message was sent by the bot, returns the edited
Message, otherwise returns True. Returns an error, if the new score
is not greater than the user's current score in the chat and force
is False.
"""
pass
@_api_request(return_type=GameHighScore)
def get_game_high_scores(self, *, user_id, chat_id=None, message_id=None,
inline_message_id=None):
"""Use this method to get data for high score tables.
:param user_id: Target user id.
:param chat_id: Required if inline_message_id is not specified. Unique
identifier for the target chat.
:param message_id: Required if inline_message_id is not specified.
Identifier of the sent message.
:param inline_message_id: Required if chat_id and message_id are not
specified. Identifier of the inline message.
:return: List of GameHighScore.
"""
pass
@_api_request(return_type=Message)
def send_invoice(self, *, chat_id, title, payload, provider_token, prices,
start_parameter, currency, photo_url=None, need_name=None,
description=None, photo_size=None, photo_width=None,
photo_height=None, need_phone_number=None,
need_email=None, need_shipping_address=None,
is_flexible=None, disable_notification=None,
reply_to_message_id=None, reply_markup=None):
"""Use this method to send invoices.
:param chat_id: Unique identifier for the target private chat.
:param title: Product name, 1-32 characters.
:param payload: Bot-defined invoice payload, 1-128 bytes. This will not
be displayed to the user, use for your internal processes.
:param provider_token: Payments provider token, obtained via Botfather.
:param prices: List of LabeledPrice. Price breakdown, a list of
components (e.g. product price, tax, discount, delivery cost,
delivery tax, bonus, etc.).
:param start_parameter: Unique deep-linking parameter that can be used
to generate this invoice when used as a start parameter.
:param currency: Three-letter ISO 4217 currency code.
:param photo_url: URL of the product photo for the invoice. Can be a
photo of the goods or a marketing image for a service. People like
it better when they see what they are paying for.
:param need_name: Pass True, if you require the user's full name to
complete the order.
:param description: Product description, 1-255 characters.
:param photo_size: Photo size.
:param photo_width: Photo width.
:param photo_height: Photo height.
:param need_phone_number: Pass True, if you require the user's phone
number to complete the order.
:param need_email: Pass True, if you require the user's email to
complete the order.
:param need_shipping_address: Pass True, if you require the user's
shipping address to complete the order.
:param is_flexible: Pass True, if the final price depends on the
shipping method.
:param disable_notification: Sends the message silently. Users will
receive a notification with no sound.
:param reply_to_message_id: If the message is a reply, ID of the
original message.
:param reply_markup: InlineKeyboardMarkup.
:return: Message.
"""
pass
@_api_request()
def answer_shipping_query(self, shipping_query_id, ok,
shipping_options=None, error_message=None):
"""If you sent an invoice requesting a shipping address and the
parameter is_flexible was specified, the Bot API will send an Update
with a shipping_query field to the bot. Use this method to reply to
shipping queries. On success, True is returned.
:param shipping_query_id: Unique identifier for the query to be
answered.
:param ok: Specify True if delivery to the specified address is
possible and False if there are any problems (for example, if
delivery to the specified address is not possible).
:param shipping_options: List of ShippingOption. Required if ok is
True.
:param error_message: Required if ok is False. Error message in human
readable form that explains why it is impossible to complete the
order (e.g. "Sorry, delivery to your desired address is
unavailable'). Telegram will display this message to the user.
:return: True.
"""
pass
@_api_request()
def answer_pre_checkout_query(self, pre_checkout_query_id, ok,
error_message=None):
"""Once the user has confirmed their payment and shipping details,
the Bot API sends the final confirmation in the form of an Update with
the field pre_checkout_query. Use this method to respond to such
pre-checkout queries. On success, True is returned. Note: The Bot API
must receive an answer within 10 seconds after the pre-checkout query
was sent.
:param pre_checkout_query_id: Unique identifier for the query to be
answered.
:param ok: Specify True if everything is alright (goods are available,
etc.) and the bot is ready to proceed with the order. Use False if
there are any problems.
:param error_message: Required if ok is False. Error message in human
readable form that explains the reason for failure to proceed with
the checkout (e.g. "Sorry, somebody just bought the last of our
amazing black T-shirts while you were busy filling out your payment
details. Please choose a different color or garment!"). Telegram
will display this message to the user.
:return: True.
"""
pass
@_api_request()
def restrict_chat_member(self, *, chat_id, user_id, until_date=None,
can_send_messages=None,
can_send_media_messages=None,
can_send_other_messages=None,
can_add_web_page_previews=None):
"""Use this method to restrict a user in a supergroup. The bot must be
an administrator in the supergroup for this to work and must have the
appropriate admin rights. Pass True for all boolean parameters to lift
restrictions from a user. Returns True on success.
:param chat_id: Unique identifier for the target chat or username of
the target supergroup (in the format @supergroupusername).
:param user_id: Unique identifier of the target user.
:param until_date: Date when restrictions will be lifted for the user,
unix time. If user is restricted for more than 366 days or less
than 30 seconds from the current time, they are considered to be
restricted forever.
:param can_send_messages: Pass True, if the user can send text
messages, contacts, locations and venues.
:param can_send_media_messages: Pass True, if the user can send audios,
documents, photos, videos, video notes and voice notes, implies
can_send_messages.
:param can_send_other_messages: Pass True, if the user can send
animations, games, stickers and use inline bots, implies
can_send_media_messages.
:param can_add_web_page_previews: Pass True, if the user may add web
page previews to their messages, implies can_send_media_messages.
:return: True.
"""
pass
@_api_request()
def promote_chat_member(self, *, chat_id, user_id, can_change_info=None,
can_post_messages=None, can_edit_messages=None,
can_delete_messages=None, can_invite_users=None,
can_restrict_members=None, can_pin_messages=None,
can_promote_members=None):
"""se this method to promote or demote a user in a supergroup or a
channel. The bot must be an administrator in the chat for this to work
and must have the appropriate admin rights. Pass False for all boolean
parameters to demote a user. Returns True on success.
:param chat_id: Unique identifier for the target chat or username of
the target channel (in the format @channelusername).
:param user_id: Unique identifier of the target user.
:param can_change_info: Pass True, if the administrator can change chat
title, photo and other settings.
:param can_post_messages: Pass True, if the administrator can create
channel posts, channels only.
:param can_edit_messages: Pass True, if the administrator can edit
messages of other users, channels only.
:param can_delete_messages: Pass True, if the administrator can delete
messages of other users.
:param can_invite_users: Pass True, if the administrator can invite new
users to the chat.
:param can_restrict_members: Pass True, if the administrator can
restrict, ban or unban chat members.
:param can_pin_messages: Pass True, if the administrator can pin
messages, supergroups only.
:param can_promote_members: Pass True, if the administrator can add new
administrators with a subset of his own privileges or demote
administrators that he has promoted, directly or indirectly
(promoted by administrators that were appointed by him).
:return: True.
"""
pass
@_api_request()
def export_chat_invite_link(self, *, chat_id):
"""Use this method to export an invite link to a supergroup or a
channel. The bot must be an administrator in the chat for this to work
and must have the appropriate admin rights. Returns exported invite
link as String on success.
:param chat_id: Unique identifier for the target chat or username of
the target channel (in the format @channelusername).
:return: str.
"""
pass
@_api_request()
def set_chat_photo(self, *, chat_id, photo):
"""Use this method to set a new profile photo for the chat. Photos
can't be changed for private chats. The bot must be an administrator in
the chat for this to work and must have the appropriate admin rights.
Returns True on success.
:param chat_id: Unique identifier for the target chat or username of
the target channel (in the format @channelusername).
:param photo: Image file.
:return: True.
"""
pass
@_api_request()
def set_chat_sticker_set(self, *, chat_id, sticker_set_name):
"""Use this method to set a new group sticker set for a supergroup.
The bot must be an administrator in the chat for this to work and must
have the appropriate admin rights. Use the field can_set_sticker_set
optionally returned in getChat requests to check if the bot can use
this method. Returns True on success.
:param chat_id: Unique identifier for the target chat or username of
the target supergroup (in the format @supergroupusername).
:param sticker_set_name: Name of the sticker set to be set as the group
sticker set.
"""
pass
@_api_request()
def delete_chat_sticker_set(self, *, chat_id):
"""Use this method to delete a group sticker set from a supergroup.
The bot must be an administrator in the chat for this to work and must
have the appropriate admin rights. Use the field can_set_sticker_set
optionally returned in getChat requests to check if the bot can use
this method. Returns True on success.
:param chat_id: Unique identifier for the target chat or username of
the target supergroup (in the format @supergroupusername).
"""
pass
@_api_request()
def delete_chat_photo(self, *, chat_id):
"""Use this method to delete a chat photo. Photos can't be changed for
private chats. The bot must be an administrator in the chat for this to
work and must have the appropriate admin rights.
Returns True on success.
:param chat_id: Unique identifier for the target chat or username of
the target channel (in the format @channelusername).
:return: True.
"""
pass
@_api_request()
def set_chat_title(self, *, chat_id, title):
"""Use this method to change the title of a chat. Titles can't be
changed for private chats. The bot must be an administrator in the chat
for this to work and must have the appropriate admin rights.
Returns True on success.
:param chat_id: Unique identifier for the target chat or username of
the target channel (in the format @channelusername).
:param title: New chat title, 1-255 characters.
:return: True.
"""
pass
@_api_request()
def set_chat_description(self, *, chat_id, description=None):
"""Use this method to change the description of a supergroup or a
channel. The bot must be an administrator in the chat for this to work
and must have the appropriate admin rights. Returns True on success.
:param chat_id: Unique identifier for the target chat or username of
the target channel (in the format @channelusername).
:param description: New chat description, 0-255 characters.
:return: True.
"""
pass
@_api_request()
def pin_chat_message(self, *, chat_id, message_id,
disable_notification=None):
"""Use this method to pin a message in a supergroup. The bot must be an
administrator in the chat for this to work and must have the
appropriate admin rights. Returns True on success.
:param chat_id: Unique identifier for the target chat or username of
the target channel (in the format @channelusername).
:param message_id: Identifier of a message to pin.
:param disable_notification: Pass True, if it is not necessary to send
a notification to all group members about the new pinned message.
:return: True
"""
pass
@_api_request()
def unpin_chat_message(self, *, chat_id):
"""Use this method to unpin a message in a supergroup chat. The bot
must be an administrator in the chat for this to work and must have the
appropriate admin rights. Returns True on success.
:param chat_id: Unique identifier for the target chat or username of
the target channel (in the format @channelusername).
:return: True.
"""
pass
@_api_request(return_type=StickerSet)
def get_sticker_set(self, *, name):
"""Use this method to get a sticker set. On success, a StickerSet
object is returned.
:param name: Name of the sticker set.
:return: StickerSet.
"""
pass
@_api_request(return_type=File)
def upload_sticker_file(self, *, user_id, png_sticker):
"""Use this method to upload a .png file with a sticker for later use
in create_new_sticker_set and add_sticker_to_set methods (can be used
multiple times). Returns the uploaded File on success.
:param user_id: User identifier of sticker file owner.
:param png_sticker: PNG image with the sticker, must be up to 512
kilobytes in size, dimensions must not exceed 512px, and either
width or height must be exactly 512px.
"""
pass
@_api_request()
def create_new_sticker_set(self, *, user_id, name, title, png_sticker,
emojis, contains_masks=None,
mask_position=None):
"""Use this method to create new sticker set owned by a user. The bot
will be able to edit the created sticker set. Returns True on success.
:param user_id: User identifier of created sticker set owner.
:param name: Short name of sticker set, to be used in t.me/addstickers/
URLs (e.g., animals). Can contain only english letters, digits and
underscores. Must begin with a letter, can't contain consecutive
underscores and must end in “_by_<bot username>”. <bot_username> is
case insensitive. 1-64 characters.
:param title: Sticker set title, 1-64 characters.
:param png_sticker: Png image with the sticker, must be up to 512
kilobytes in size, dimensions must not exceed 512px, and either
width or height must be exactly 512px. Pass a file_id as a string
to send a file that already exists on the Telegram servers, pass an
HTTP URL as a string for Telegram to get a file from the Internet,
or upload a new one using multipart/form-data.
:param emojis: One or more emoji corresponding to the sticker.
:param contains_masks: Pass True, if a set of mask stickers should be
created.
:param mask_position: MaskPosition instance.
"""
pass
@_api_request()
def add_sticker_to_set(self, *, user_id, name, png_sticker, emojis,
mask_position=None):
"""Use this method to add a new sticker to a set created by the bot.
Returns True on success.
:param user_id: User identifier of sticker set owner.
:param name: Sticker set name.
:param png_sticker: PNG image with the sticker, must be up to 512
kilobytes in size, dimensions must not exceed 512px, and either
width or height must be exactly 512px. Pass a file_id as a string
to send a file that already exists on the Telegram servers, pass an
HTTP URL as a string for Telegram to get a file from the Internet,
or upload a new one using multipart/form-data.
:param emojis: One or more emoji corresponding to the sticker.
:param mask_position: MaskPosition instance.
"""
pass
@_api_request()
def set_sticker_position_in_set(self, *, sticker, position):
"""Use this method to move a sticker in a set created by the bot to a
specific position . Returns True on success.
:param sticker: File identifier of the sticker.
:param position: New sticker position in the set, zero-based.
"""
pass
@_api_request()
def delete_sticker_from_set(self, *, sticker):
"""Use this method to delete a sticker from a set created by the bot.
Returns True on success.
:param sticker: File identifier of the sticker.
"""
pass
@_api_request(return_type=Chat)
def get_chat(self, *, chat_id):
"""Use this method to get up to date information about the chat
(current name of the user for one-on-one conversations, current
username of a user, group or channel, etc.). Returns a Chat object on
success.
:param chat_id: Unique identifier for the target chat or username of t
he target supergroup or channel (in the format @channelusername)
"""
pass
| {
"repo_name": "artcom-net/pytgram",
"path": "pytgram/api.py",
"copies": "1",
"size": "64747",
"license": "mit",
"hash": -3873275472338315300,
"line_mean": 41.9263925729,
"line_max": 79,
"alpha_frac": 0.6205181283,
"autogenerated": false,
"ratio": 4.545537532476652,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5666055660776652,
"avg_score": null,
"num_lines": null
} |
"""APi-mongo.
Infracoders meetup example
"""
from flask import Flask, request
from flask.ext.pymongo import PyMongo
from bson.json_util import dumps
from bson.objectid import ObjectId
import os
app = Flask(__name__)
# connect to another MongoDB server altogether
app.config['MONGO_HOST'] = os.getenv('MONGO_HOST')
app.config['MONGO_PORT'] = int(os.getenv('MONGO_PORT',))
app.config['MONGO_DBNAME'] = os.getenv('MONGO_DBNAME', 'infratest')
mongo = PyMongo(app, config_prefix='MONGO')
@app.route('/')
def hello_infra():
"""Hello function."""
return 'Hello Infracoders!'
@app.route('/entries', methods=['POST'])
def save_entry():
"""Save entry."""
app.logger.debug(request.get_json())
content = request.get_json()
new_object = mongo.db.entries.insert(
{
'body': content.get('body').encode("utf-8"),
'title': content.get('title', '').encode("utf-8")
}
)
app.logger.debug(new_object)
return dumps(new_object)
@app.route('/entries/<entry>')
def get_entry(entry):
"""Get entry."""
entry = mongo.db.entries.find_one_or_404({'_id': ObjectId(entry)})
return dumps(entry)
@app.route('/entries', methods=['GET'])
def get_entries():
"""Get entries."""
entries = mongo.db.entries.find()
return dumps(entries)
@app.route('/_status/healthz', methods=['GET'])
def get_healthz():
"""Get entries."""
return "ok"
@app.route('/_status/mongo/healthz', methods=['GET'])
def get_mongo_healthz():
"""Get entries."""
entries = mongo.db.entries.find()
app.logger.debug(dumps(entries))
return dumps(entries)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True, threaded=True)
| {
"repo_name": "ipedrazas/infracoders-meetup",
"path": "api-mongo/api-mongo.py",
"copies": "1",
"size": "1718",
"license": "mit",
"hash": -7909922486783555000,
"line_mean": 22.2162162162,
"line_max": 70,
"alpha_frac": 0.6327124563,
"autogenerated": false,
"ratio": 3.2172284644194757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4349940920719476,
"avg_score": null,
"num_lines": null
} |
# API
from csp.impl.buffers import FixedBuffer, DroppingBuffer, SlidingBuffer
from csp.impl.channels import ManyToManyChannel as Channel, CLOSED
from csp.impl.channels import put_then_callback, take_then_callback
from csp.impl.process import put, take, sleep, alts, stop
from csp.impl.timers import timeout
from csp.impl.select import DEFAULT
import csp.impl.process
from twisted.internet.defer import Deferred
def go(f, *args, **kwargs):
process = csp.impl.process.Process(f(*args, **kwargs))
process.run()
def go_channel(f, *args, **kwargs):
channel = Channel(1)
def done(value):
if value == CLOSED:
channel.close()
else:
# TODO: Clearly define and test the differences of
# this vs. signaling closing right away (not after the
# put is done)
put_then_callback(channel, value, lambda ok: channel.close())
process = csp.impl.process.Process(f(*args, **kwargs), done)
process.run()
return channel
def go_deferred(f, *args, **kwargs):
d = Deferred()
process = csp.impl.process.Process(f(*args, **kwargs), d.callback)
process.run()
return d
def process_channel(f):
def returning_channel(*args, **kwargs):
return go_channel(f, *args, **kwargs)
return returning_channel
def process_deferred(f):
def returning_deferred(*args, **kwargs):
return go_deferred(f, *args, **kwargs)
return returning_deferred
def process(f):
def returning(*args, **kwargs):
return go(f, *args, **kwargs)
return returning
# For API consistency (sort of)
def close(channel):
"""Closes a channel.
- Pending puts are ignored.
- Pending takes are flushed with None.
- Future puts succeed immediately.
- Future takes receive immediately None.
"""
return channel.close()
| {
"repo_name": "ubolonton/twisted-csp",
"path": "csp/__init__.py",
"copies": "1",
"size": "1848",
"license": "epl-1.0",
"hash": -1197618474047451600,
"line_mean": 26.1764705882,
"line_max": 73,
"alpha_frac": 0.66504329,
"autogenerated": false,
"ratio": 3.681274900398406,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48463181903984065,
"avg_score": null,
"num_lines": null
} |
# API:
# getSettingsManager()
# SettingsManager.setValue(name, value)
# SettingsManager.getValue(name)
# SettingsManager.registerListener(callbackFunction)
# FIXME:
# 1. Use dictionary or hash table instead of list
# DONE 2. Get rid of pyinotify?
import os
settingsManager = None
class SettingsManager:
def __init__(self):
self.settingsFilename = os.path.join(
os.path.expanduser('~'), '.opengrafik')
self.checkSettingsFile()
self.listeners = []
self.config = self.loadSettings()
def settingsFileModified(self):
# self.config = self.loadSettings()
for l in self.listeners:
l()
def checkSettingsFile(self):
if os.path.exists(self.settingsFilename) != True:
f = open(self.settingsFilename, "w")
f.close()
def registerListener(self, callback):
self.listeners.append(callback)
def setValue(self, name, value):
assert(name != None)
assert(value != None)
found = False
for c in self.config:
if c[0] == name:
c[1] = value
found = True
break
if found != True:
self.config.append([name, value])
self.saveSettings()
self.settingsFileModified()
def getValue(self, name):
assert(name != None)
for c in self.config:
if c[0] == name:
return c[1]
raise Exception()
def saveSettings(self):
data = ""
for c in self.config:
if type(c) == type([]):
data = data + c[0] + "=" + c[1] + "\n"
f = open(self.settingsFilename, "w")
f.writelines(data)
f.close()
def loadSettings(self):
f = open(self.settingsFilename, "r")
data = f.readlines()
f.close()
config = []
for d in data:
if d != "":
config.append(d.split("=", 1))
return config
def getSettingsManager():
global settingsManager
if settingsManager == None:
settingsManager = SettingsManager()
return settingsManager
| {
"repo_name": "fredmorcos/attic",
"path": "projects/opengrafik/opengrafik_20090719_python_gtk/SettingsManager.py",
"copies": "1",
"size": "1800",
"license": "isc",
"hash": -3519997813803878000,
"line_mean": 18.7802197802,
"line_max": 52,
"alpha_frac": 0.6627777778,
"autogenerated": false,
"ratio": 3.0716723549488054,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8979608362962526,
"avg_score": 0.05096835395725608,
"num_lines": 91
} |
import csv, json
import sqlite3
import os
# Functions
def clean_units(units_text):
if units_text == 'incidents per year' or units_text.lower() == 'count' or units_text == 'persons per square km':
return 'count'
if '%' in units_text or units_text == 'percentage' or 'per 100' in units_text:
return '%'
if '$' in units_text or units_text == 'USD':
return '$'
if 'per 1,000' in units_text:
return 'per 1,000'
if '10,000' in units_text:
return 'per 10,000'
if '100,000' in units_text:
return 'per 100,000'
if units_text == 'thousands':
return '1,000'
if units_text == 'millions':
return '1,000,000'
if units_text == ',000$ USD':
return '$1,000'
if 'km' in units_text:
return 'km'
if units_text == 'years':
return 'time'
if units_text == 'rank':
return 'rank'
if units_text == 'Fraction':
return 'fraction'
if units_text == 'index':
return 'index'
if units_text == 'total':
return 'total'
if units_text == 'uno':
return 'uno'
if units_text == 'both sexes':
return 'both sexes'
return None
# read old database
connection = sqlite3.connect('data/denormalized_db_old.sqlite')
connection.text_factory = str
cursor = connection.cursor()
cursor.execute('SELECT indicator_name, region, period, value, units, dsID FROM dataset_denorm ORDER BY period')
# read new indicator table
csvfile = open('data/indicator.csv', 'rb')
creader = csv.reader(csvfile, delimiter=',', quotechar='"')
indid_list = []
for line in creader:
indid_list.append([line[0], line[1]])
def getIndicatorID(text):
for one in indid_list:
if one[1] == text:
return one[0]
return None
# read country name index
countries = json.load(open('../WFP/regional.json'))
def getRegionName(region):
for one in countries:
if one['alpha-3'] == region:
return one['name']
return None
# Create CSV export
csv_export = [['indicator_name', 'region_name', 'admin1_name', 'admin2_name', 'period', 'value', 'region', 'admin1', 'admin2', 'indID', 'units', 'units_text']]
# read wfp file
wfpFile = open('../WFP/wfp_data.csv', 'rb')
wfpReader = csv.reader(wfpFile, delimiter=',', quotechar='"')
FIRST_LINE = True
for line in wfpReader:
if FIRST_LINE:
FIRST_LINE = False
else:
csv_export.append([line[0], line[1], line[2], line[3], line[4], line[5], line[6], line[7], line[8], line[9], '%', 'Percent'])
# add other indicators
for row in cursor:
indid_name = row[0]
indid = getIndicatorID(indid_name)
region = row[1]
region_name = getRegionName(region)
period = row[2]
value = row[3]
dsID = row[5]
units_text = row[4]
units = clean_units(units_text)
print 'name: '+ indid_name
print indid
print units_text
print units
if region_name and indid and units:
csv_export.append([indid_name, region_name, 'NA', 'NA', period, value, region, 'NA', 'NA', indid, units, units_text])
with open('all_data.csv', 'wb') as csvfile:
dbwriter = csv.writer(csvfile, delimiter=',', quotechar='"')
for one in csv_export:
try:
dbwriter.writerow(one)
except:
print 'ERROR'
csvfile.close()
connection.close()
| {
"repo_name": "xyfeng/ocha-subnational",
"path": "ALL/unify.py",
"copies": "1",
"size": "3044",
"license": "mit",
"hash": 5727683421614516000,
"line_mean": 25.2413793103,
"line_max": 159,
"alpha_frac": 0.664586071,
"autogenerated": false,
"ratio": 2.767272727272727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3931858798272727,
"avg_score": null,
"num_lines": null
} |
import csv, json, operator
Scheme = {
".POP.": "Population",
".ECO.": "Economic",
".HTH.": "Health",
".FOS.": "Food Security",
".NUT.": "Nutrition",
".EDU.": "Education",
".PRO.": "Protection",
".HLP.": "Housing, Land and Property",
".MIN.": "Mine Action",
".CDT.": "Coordination",
".WSH.": "Water, Sanitation and Hygiene",
".SHE.": "Emergency Shelter and NFI",
".LOG.": "Logistics",
".FUN.": "Funding",
".HUM.": "Humanitarian Profile",
".EAR.": "Early Recovery",
".GEN.": "Gender-based violence",
".CAM.": "Camp Coordination / Management",
".TEL.": "Emergency Telecommunications",
".OTH.": "Others"
}
result = {}
# read all data table
csvfile = open('all_data.csv', 'rb')
creader = csv.reader(csvfile, delimiter=',', quotechar='"')
firstLine = True
for line in creader:
if firstLine:
firstLine = False
else:
indid_name = line[0]
print indid_name
indid = line[9]
topic = ''
for key in Scheme:
if key in indid:
topic = Scheme[key]
break
if topic not in result:
result[topic] = [{
'indid': indid,
'name': indid_name
}]
else:
FOUND = False
for one in result[topic]:
if one['indid'] == indid:
FOUND = True
break
if not FOUND:
result[topic].append({
'indid': indid,
'name': indid_name
})
result[topic].sort(key=operator.itemgetter('name'))
csvfile.close()
with open('topics.json', 'w') as outfile:
json.dump(result, outfile)
| {
"repo_name": "xyfeng/ocha-subnational",
"path": "ALL/create_topic.py",
"copies": "1",
"size": "1449",
"license": "mit",
"hash": 2758767299692768000,
"line_mean": 20,
"line_max": 59,
"alpha_frac": 0.5990338164,
"autogenerated": false,
"ratio": 2.5691489361702127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8393869029765899,
"avg_score": 0.05486274456086277,
"num_lines": 69
} |
"""API
This module contains functions to interact with different system entities
"""
import os
from base64 import b64decode, b64encode
from hashlib import sha256
from librekpi.model import *
def get_salt(self):
"""Generates a cryptographically random salt and sets its Base64 encoded
version to the salt column, and returns the encoded salt.
"""
if not self.id and not self._salt:
self._salt = b64encode(os.urandom(8))
if isinstance(self._salt, str):
self._salt = self._salt.encode('UTF-8')
return self._salt
def encrypt_password(password, salt):
"""
Encrypts the password with the given salt using SHA-256. The salt must
be cryptographically random bytes.
:param password: the password that was provided by the user to try and
authenticate. This is the clear text version that we
will need to match against the encrypted one in the
database.
:type password: basestring
:param salt: the salt is used to strengthen the supplied password
against dictionary attacks.
:type salt: an 8-byte long cryptographically random byte string
"""
if isinstance(password, str):
password_bytes = password.encode("UTF-8")
else:
password_bytes = password
hashed_password = sha256()
hashed_password.update(password_bytes)
hashed_password.update(salt)
hashed_password = hashed_password.hexdigest()
if not isinstance(hashed_password, str):
hashed_password = hashed_password.decode("UTF-8")
return hashed_password
def validate_password(self, password):
"""Check the password against existing credentials.
:type password: str
:param password: clear text password
:rtype: bool
"""
return self._password == encrypt_password(password,
b64decode(str(self._salt)))
def create_user(writer, **kwargs):
def user_create_callback(student):
st = {}
for field in student._reverse_db_field_map:
if field not in ['_salt', '_password']:
st[field] = student.get_field_value(field)
writer(st)
student = User(**kwargs)
get_salt(student)
student._password = encrypt_password(student.password,
b64decode(str(student._salt)))
student.save(user_create_callback)
def authenticate_user(writer, **kwargs):
def user_auth_callback(student):
try:
student = student[0]
except KeyError:
#writer({})
raise IOError
#import ipdb; ipdb.set_trace()
if validate_password(student, kwargs['password']):
st = {}
for field in student._reverse_db_field_map:
if field not in ['_salt', '_password']:
st[field] = student.get_field_value(field)
writer(st)
else:
#writer({})
raise IOError
student = User.objects \
.limit(1) \
.filter(email = kwargs['email']) \
.find_all(callback=user_auth_callback)
def logout_user(**kwargs):
pass
def create_university(**kwargs):
pass
def get_universities(**kwargs):
pass
def create_group(**kwargs):
pass
def get_groups(**kwargs):
pass
def create_class(**kwargs):
pass
def create_timetable(**kwargs):
pass
def create_comment(**kwargs):
pass
| {
"repo_name": "LibreKPI/librekpi",
"path": "src/librekpi/api.py",
"copies": "1",
"size": "3493",
"license": "mit",
"hash": 4915429001104848000,
"line_mean": 23.95,
"line_max": 79,
"alpha_frac": 0.6097910106,
"autogenerated": false,
"ratio": 4.296432964329643,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0018618797361041185,
"num_lines": 140
} |
"""API
This module contains functions to interact with different system entities
"""
import os
from base64 import b64decode, b64encode
from hashlib import sha256
from scheduler.model import *
def get_salt(self):
"""Generates a cryptographically random salt and sets its Base64 encoded
version to the salt column, and returns the encoded salt.
"""
if not self.id and not self._salt:
self._salt = b64encode(os.urandom(8))
if isinstance(self._salt, str):
self._salt = self._salt.encode('UTF-8')
return self._salt
def encrypt_password(password, salt):
"""
Encrypts the password with the given salt using SHA-256. The salt must
be cryptographically random bytes.
:param password: the password that was provided by the user to try and
authenticate. This is the clear text version that we
will need to match against the encrypted one in the
database.
:type password: basestring
:param salt: the salt is used to strengthen the supplied password
against dictionary attacks.
:type salt: an 8-byte long cryptographically random byte string
"""
if isinstance(password, str):
password_bytes = password.encode("UTF-8")
else:
password_bytes = password
hashed_password = sha256()
hashed_password.update(password_bytes)
hashed_password.update(salt)
hashed_password = hashed_password.hexdigest()
if not isinstance(hashed_password, str):
hashed_password = hashed_password.decode("UTF-8")
return hashed_password
def validate_password(self, password):
"""Check the password against existing credentials.
:type password: str
:param password: clear text password
:rtype: bool
"""
return self._password == encrypt_password(password,
b64decode(str(self._salt)))
def create_user(writer, **kwargs):
def user_create_callback(student):
st = {}
for field in student._reverse_db_field_map:
if field not in ['_salt', '_password']:
st[field] = student.get_field_value(field)
writer(st)
student = User(**kwargs)
get_salt(student)
student._password = encrypt_password(student.password,
b64decode(str(student._salt)))
student.save(user_create_callback)
def authenticate_user(writer, **kwargs):
def user_auth_callback(student):
try:
student = student[0]
except KeyError:
#writer({})
raise IOError
import ipdb; ipdb.set_trace()
if validate_password(student, kwargs['password']):
st = {}
for field in student._reverse_db_field_map:
if field not in ['_salt', '_password']:
st[field] = student.get_field_value(field)
writer(st)
else:
#writer({})
raise IOError
student = User.objects \
.limit(1) \
.filter(email = kwargs['email']) \
.find_all(callback=user_auth_callback)
def logout_user(**kwargs):
pass
def create_university(**kwargs):
pass
def get_universities(**kwargs):
pass
def create_group(**kwargs):
pass
def get_groups(**kwargs):
pass
def create_class(**kwargs):
pass
def create_timetable(**kwargs):
pass
def create_comment(**kwargs):
pass
| {
"repo_name": "kyiv-team-hacktbilisi/web-app",
"path": "src/scheduler/api.py",
"copies": "1",
"size": "3493",
"license": "mit",
"hash": 2298353069493485300,
"line_mean": 23.95,
"line_max": 79,
"alpha_frac": 0.6100772975,
"autogenerated": false,
"ratio": 4.3176761433868975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0018666994777659656,
"num_lines": 140
} |
import csv, json
import sqlite3
import os
# Create the database
connection = sqlite3.connect('wfp_data.sqlite')
connection.text_factory = str
cursor = connection.cursor()
# Create the table
cursor.execute('DROP TABLE IF EXISTS wfp')
cursor.execute('CREATE TABLE wfp ( indicator_name text, region_name text, admin1_name text, admin2_name text, period text, value real, region text, admin1 text, admin2 text, indID text, units text) ')
connection.commit()
# Create CSV export
csv_export = [['indicator_name', 'region_name', 'admin1_name', 'admin2_name', 'period', 'value', 'region', 'admin1', 'admin2', 'indID', 'units']]
# Load the CSV file into CSV reader
indicators = []
csvfile = open('indicator.csv', 'rb')
creader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in creader:
indicators.append(row)
csvfile = open('value.csv', 'rb')
creader = csv.reader(csvfile, delimiter=',', quotechar='"')
countries = json.load(open('regional.json'))
states = json.load(open('../FAO/fao_state.json'))
cities = json.load(open('../FAO/fao_city.json'))
def find_state_list(country):
files = [int(f) for f in os.listdir('../FAO/country/'+country) if f != '.DS_Store' and '.json' not in f]
result = []
for f in files:
for s in states:
if s['code'] == f:
result.append(s)
break
return result
def find_city_list(country, state):
files = [int(f.replace('.json','')) for f in os.listdir('../FAO/country/'+country+'/'+str(state)) if f != '.DS_Store']
result = []
for f in files:
for c in cities:
if c['code'] == f:
result.append(c)
break
return result
def find_state_code(country, name):
state_list = find_state_list(country)
for s in state_list:
if s['name'].encode('UTF-8') == name:
return s['code']
for s in state_list:
if name in s['name'].encode('UTF-8'):
print 'looking for: ' + name
print 'found: ' + s['name']
return s['code']
# for s in state_list:
# if s['name'].encode('UTF-8') in name:
# print 'found: ' + s['name']
# return s['code']
return 0
def find_city_code(country, state, name):
city_list = find_city_list(country, state)
for c in city_list:
if c['name'].encode('UTF-8') == name:
return c['code']
return 0
# print find_state_list('RWA')
# Iterate through the CSV reader, inserting values into the database
skiptitle = True
for row in creader:
if skiptitle:
skiptitle = False
else:
region = row[0]
for one in countries:
if one['alpha-3'] == region:
region_name = one['name']
admin1 = row[1]
admin1_name = 'NA'
if admin1 != 'NA':
# print admin1
code = find_state_code(region, admin1)
if code != 0:
admin1 = code
for one in states:
if one['code'] == admin1:
admin1_name = one['name']
else:
continue
admin2 = row[2]
admin2_name = 'NA'
if admin2 != 'NA':
code = find_city_code(region, admin1, admin2)
if code != 0:
admin2 = code
for one in cities:
if one['code'] == admin2:
admin2_name = one['name']
else:
continue
period = row[3]
indID = row[5]
value = float(row[6])
indicator_name = ''
units = ''
for one in indicators:
if one[0] == indID:
indicator_name = one[1]
units = one[2]
break
cursor.execute('INSERT INTO wfp VALUES (?,?,?,?,?,?,?,?,?,?,?)', (indicator_name, region_name, admin1_name, admin2_name, period, value, region, admin1, admin2, indID, units))
csv_export.append([indicator_name, region_name, admin1_name, admin2_name, period, value, region, admin1, admin2, indID, units])
# Close the csv file, commit changes, and close the connection
csvfile.close()
connection.commit()
connection.close()
with open('wfp_data.csv', 'wb') as csvfile:
dbwriter = csv.writer(csvfile, delimiter=',', quotechar='"')
for one in csv_export:
dbwriter.writerow(one)
| {
"repo_name": "xyfeng/ocha-subnational",
"path": "WFP/create_db.py",
"copies": "1",
"size": "3969",
"license": "mit",
"hash": 8882535707084702000,
"line_mean": 28.4,
"line_max": 200,
"alpha_frac": 0.6409674981,
"autogenerated": false,
"ratio": 2.8451612903225807,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8740310398329201,
"avg_score": 0.04916367801867619,
"num_lines": 135
} |
Api = object # Base API class
Event = object # A simple event/signal system
############
# LONG FORM: Separate classes for definition & implementation allow
# putting definitions in a common python package available
# across all apps
############
# client.py
class AuthApi(Api):
user_registered = Event()
user_account_closed = Event()
def get_user(self, username: str) -> dict:
pass
def check_password(self, password: str) -> bool:
pass
api = AuthApi.as_client()
# server.py
class AuthImplementation(AuthApi): # Inherits from client definition
def get_user(self, username: str) -> dict:
# Actual implementation
return {"name": "Test User", "email": "test@example.com"}
def check_password(self, password: str) -> dict:
return password == "Passw0rd!"
api = AuthImplementation.as_server()
##############
# ALTERNATIVE: Can combine both definitions if separation is not required.
##############
# client_server.py
class AuthImplementation(Api):
user_registered = Event()
user_account_closed = Event()
def get_user(self, username: str) -> dict:
# Actual implementation
return {"name": "Test User", "email": "test@example.com"}
def check_password(self, password: str) -> dict:
return password == "Passw0rd!"
client = AuthImplementation.as_client()
server = AuthImplementation.as_server()
# Pros:
# - Personal preference: I find this more readable
# - IDE's will warn about definition/implementation signatures not matching
# - Makes our implementation different(/easier?)
# - Has the option of being DRY where client/server separation is not required
# Cons:
# - Not DRY in it's long form
# - Forcing an OO design
#######################
# Additional thoughts #
#######################
# We could have a top level apis.py, much like Django's urls.py:
# /apis.py
apis = [
SparePartsApi.as_server(), # This is the spare parts application, so serve its API
AuthApi.as_client(), # We need the Auth API in order to authenticate clients
CustomersApi.as_client(only=["support_ticket_opened"]), # Select only certain events
MetricsApi.as_client(
exclude=["page_view"]
), # Filter out high-volume events we don't care about
]
# Warren would be able to read this list of APIs and setup the necessary AMQP bindings.
# Each API gets its own queue to avoid high activity on one API blocking all others.
| {
"repo_name": "adamcharnock/lightbus",
"path": "lightbus_experiments/potential_api_oo.py",
"copies": "1",
"size": "2488",
"license": "apache-2.0",
"hash": -1530005384090367700,
"line_mean": 26.0434782609,
"line_max": 89,
"alpha_frac": 0.6551446945,
"autogenerated": false,
"ratio": 3.792682926829268,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4947827621329268,
"avg_score": null,
"num_lines": null
} |
"""API package."""
from __future__ import absolute_import
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
def _empty(value):
"""Check if value is empty and need to be removed."""
return (value is None or
value is False or
value == {} or
value == [])
def normalize(rsrc):
"""Returns normalized representation of the resource.
- all null attributes are removed recursively.
- all null array members are remove.
"""
if isinstance(rsrc, dict):
return normalize_dict(rsrc)
elif isinstance(rsrc, list):
return normalize_list(rsrc)
else:
return rsrc
def normalize_dict(rsrc):
"""Normalize dict."""
norm = {key: value for key, value in rsrc.iteritems() if not _empty(value)}
for key, value in norm.iteritems():
norm[key] = normalize(value)
return norm
def normalize_list(rsrc):
"""Normalize list."""
return [normalize(item)
for item in rsrc if not _empty(item)]
| {
"repo_name": "toenuff/treadmill",
"path": "lib/python/treadmill/api/__init__.py",
"copies": "1",
"size": "1030",
"license": "apache-2.0",
"hash": 3566220777943222000,
"line_mean": 22.9534883721,
"line_max": 79,
"alpha_frac": 0.6087378641,
"autogenerated": false,
"ratio": 4.17004048582996,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5278778349929959,
"avg_score": null,
"num_lines": null
} |
"""API package.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
def _empty(value):
"""Check if value is empty and need to be removed."""
return (value is None or
value is False or
value == {} or
value == [])
def normalize(rsrc):
"""Returns normalized representation of the resource.
- all null attributes are removed recursively.
- all null array members are remove.
"""
if isinstance(rsrc, dict):
return normalize_dict(rsrc)
elif isinstance(rsrc, list):
return normalize_list(rsrc)
else:
return rsrc
def normalize_dict(rsrc):
"""Normalize dict."""
norm = {key: value for key, value in rsrc.items() if not _empty(value)}
for key, value in norm.items():
norm[key] = normalize(value)
return norm
def normalize_list(rsrc):
"""Normalize list."""
return [normalize(item)
for item in rsrc if not _empty(item)]
| {
"repo_name": "captiosus/treadmill",
"path": "treadmill/api/__init__.py",
"copies": "1",
"size": "1134",
"license": "apache-2.0",
"hash": -7909260669956667000,
"line_mean": 22.625,
"line_max": 75,
"alpha_frac": 0.6199294533,
"autogenerated": false,
"ratio": 4.123636363636364,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5243565816936364,
"avg_score": null,
"num_lines": null
} |
"""API package.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
from treadmill import authz as authz_mod
from treadmill.journal import plugin as jplugin
from treadmill import journal
def _empty(value):
"""Check if value is empty and need to be removed."""
return (value is None or
value is False or
value == {} or
value == [])
def normalize(rsrc):
"""Returns normalized representation of the resource.
- all null attributes are removed recursively.
- all null array members are remove.
"""
if isinstance(rsrc, dict):
return normalize_dict(rsrc)
elif isinstance(rsrc, list):
return normalize_list(rsrc)
else:
return rsrc
def normalize_dict(rsrc):
"""Normalize dict."""
norm = {
key: value
for key, value in six.iteritems(rsrc)
if not _empty(value)
}
for key, value in six.iteritems(norm):
norm[key] = normalize(value)
return norm
def normalize_list(rsrc):
"""Normalize list."""
return [
normalize(item)
for item in rsrc
if not _empty(item)
]
class Context:
"""API context."""
def __init__(self, authorizer=None, journaler=None):
self.authorizer = (authz_mod.NullAuthorizer()
if authorizer is None else authorizer)
self.journaler = (jplugin.NullJournaler()
if journaler is None else journaler)
def build_api(self, api_cls, kwargs=None):
""" build api with decoration """
if not kwargs:
kwargs = {}
return authz_mod.wrap(
journal.wrap(
api_cls(**kwargs),
self.journaler
),
self.authorizer
)
| {
"repo_name": "ceache/treadmill",
"path": "lib/python/treadmill/api/__init__.py",
"copies": "2",
"size": "1904",
"license": "apache-2.0",
"hash": 4243037307308888600,
"line_mean": 23.1012658228,
"line_max": 65,
"alpha_frac": 0.5850840336,
"autogenerated": false,
"ratio": 4.166301969365427,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5751386002965426,
"avg_score": null,
"num_lines": null
} |
''' API planning for a hypothetical auto-fixturing library.
Some thoughts:
+ What about defining the interfaces somewhere as an ABC-esque metaclass
or something? That would let us define the public API somewhere
standardized.
+ What about bidirectional interface spec? As in, for test generation,
how do you make sure that the required external APIs from other
components get called appropriately?
+ Can do the decorators without substantial performance hit by just
adding fixture() function as an attribute to the method object. BUT,
that requires a different method name!
Actually, can set anything as attr, and decorator can return the
original object. IE,
@interface
def func(self, *args, **kwargs):
pass
func.__interface__ = True
func.fixture = decorator
func.__fixture__ = fixture
func.spec = decorator
func.__spec__ = spec
but both fixture and spec return the same func object, just with their
respective attributes filled in.
------
Furthermore, you don't want to define your ABC/interface in a separate
place. Yes, you may want to enforce interface consistency, but that's a
process control question, not a coding problem.
'''
from .hypothetical import API
from .hypothetical import public_api
from .hypothetical import fixture_api
from .hypothetical import noop_fixture
class ComponentWithFixturableAPI(metaclass=API):
''' This is a component that we would like to automatically generate
fixtures for. We would also like to have testing be some degree of
automated.
'''
@public_api
def __init__(self, *args, **kwargs):
''' Do the standard init for the object.
'''
@__init__.fixture
def __init__(self, *args, **kwargs):
''' This is converted into the __init__ method for the fixture.
It is not required.
'''
@fixture_api
def some_fixture_method(self, *args, **kwargs):
''' The @fixture_api decorator converts this into something that
is only available to the fixture (and not the normal object).
Use it for testing-specific code, like resetting the fixture to
its pristine state.
'''
@public_api
def no_fixture_method_here(self, *args, **kwargs):
''' This is a normal method that needs no fixture.
'''
@noop_fixture
@public_api
def noop_fixture_here(self, *args, **kwargs):
''' This is a normal method that needs no fixture.
'''
@public_api
def some_method_here(self, *args, **kwargs):
''' This is the normal call for a fixtured method. When it is
called, actual code runs.
'''
@some_method_here.fixture
def some_method_here(self, *args, **kwargs):
''' This is the fixtured method. When it is called, fixture code
runs. It will not exist in the object class, and will only be
available through cls.__fixture__().some_method_here().
'''
@some_method_here.interface
def some_method_here(self, *args, **kwargs):
''' This defines the ABC. It's automatically injected into the
cls.__interface__() object as an abc.abstractmethod, but
additionally, it can define functions that must be called from
within the method.
'''
# To use it, use a metaclass-injected classmethod:
# This creates a fixture, for use in testing
component_fixture = ComponentWithFixturableAPI.__fixture__()
# This creates an abc-like-spec, for use in GIT process control / commit checks
component_interface = ComponentWithFixturableAPI.__interface__()
''' Notes on interfaces!
+ @interface can inject another __attr__ on top of abc.abstractmethod.
+ The actul interface class should probably use a subclass of
abc.ABCMeta as a metaclass.
'''
| {
"repo_name": "Badg/py_hypergolix",
"path": "docs/hypothetical_scratchpad.py",
"copies": "2",
"size": "3811",
"license": "unlicense",
"hash": -6264513762986390000,
"line_mean": 32.7256637168,
"line_max": 79,
"alpha_frac": 0.6775124639,
"autogenerated": false,
"ratio": 4.243875278396437,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01671583087512291,
"num_lines": 113
} |
"""API properties.
"""
from __future__ import print_function
from __future__ import unicode_literals
from tabulate import tabulate, tabulate_formats, simple_separated_format
from platform import python_version_tuple
from common import SkipTest
try:
if python_version_tuple() >= ('3','3','0'):
from inspect import signature, _empty
else:
from funcsigs import signature, _empty
except ImportError:
signature = None
_empty = None
def test_tabulate_formats():
"API: tabulate_formats is a list of strings"""
supported = tabulate_formats
print("tabulate_formats = %r" % supported)
assert type(supported) is list
for fmt in supported:
assert type(fmt) is type("")
def _check_signature(function, expected_sig):
if not signature:
raise SkipTest()
actual_sig = signature(function)
print("expected: %s\nactual: %s\n" % (expected_sig, str(actual_sig)))
for (e, ev), (a, av) in zip(expected_sig, actual_sig.parameters.items()):
assert e == a and ev == av.default
def test_tabulate_signature():
"API: tabulate() type signature is unchanged"""
assert type(tabulate) is type(lambda: None)
expected_sig = [("tabular_data", _empty),
("headers", ()),
("tablefmt", "simple"),
("floatfmt", "g"),
("numalign", "decimal"),
("stralign", "left"),
("missingval", "")]
_check_signature(tabulate, expected_sig)
def test_simple_separated_format_signature():
"API: simple_separated_format() type signature is unchanged"""
assert type(simple_separated_format) is type(lambda: None)
expected_sig = [("separator", _empty)]
_check_signature(simple_separated_format, expected_sig)
| {
"repo_name": "kyokley/tabulate",
"path": "test/test_api.py",
"copies": "1",
"size": "1802",
"license": "mit",
"hash": -2111069642404786000,
"line_mean": 30.6140350877,
"line_max": 77,
"alpha_frac": 0.6254162042,
"autogenerated": false,
"ratio": 3.9431072210065645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5068523425206564,
"avg_score": null,
"num_lines": null
} |
""" ApiProperties """
#
# g_properties
#
def g_properties(group_uri):
""" """
properties = {
'add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri,
},
'association_groups': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/groups/' + group_uri + '/{0}/groups', # group id
},
'association_group_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}/groups/{1}/{2}', # group id, group type, group id
},
'association_group_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}/groups/{1}/{2}', # group id, group type, group id
},
'association_indicators': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/groups/' + group_uri + '/{0}/indicators', # group id
},
'association_indicator_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/{0}/{1}/groups/' + group_uri + '/{2}', # indicator type, indicator_value
},
'association_indicator_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/{0}/{1}/groups/' + group_uri + '/{2}', # indicator type, indicator_value
},
'association_victims': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/groups/' + group_uri + '/{0}/victims', # group id
},
'association_victim_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}/victims/{1}', # group id, victim id
},
'association_victim_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}/victims/{1}', # group id, victim id
},
'attributes': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}/attributes', # group id
},
'attribute_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}/attributes', # group id
},
'attribute_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}/attributes/{1}', # group id, attribute id
},
'attribute_update': {
'http_method': 'PUT',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}/attributes/{1}', # group id, attribute id
},
'base': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/groups/' + group_uri + '',
},
'delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}', # group id
},
'document_download': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/documents/{0}/download', # document id
},
'document_upload': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/documents/{0}/upload', # document id
},
'filters': [
'add_adversary_id',
'add_email_id',
'add_document_id',
'add_id',
'add_incident_id',
'add_indicator',
'add_security_label',
'add_signature_id',
'add_threat_id',
'add_tag',
'add_victim_id',
# post filters
'add_pf_name',
'add_pf_date_added',
'add_pf_file_type',
],
'groups': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/groups/{0}/{1}/groups/' + group_uri # group type, group id
},
'id': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}', # group id
},
'indicators': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators/{0}/{1}/groups/' + group_uri, # group id
},
'signature_download': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/signatures/{0}/download', # signature id
},
'signature_upload': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/signatures/{0}/upload', # signature id
},
'security_label_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/groups/' + group_uri + '/{0}/securityLabels/{1}', # group id, security label
},
'security_label_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/groups/' + group_uri + '/{0}/securityLabels/{1}', # group id, security label
},
'security_label_load': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/groups/' + group_uri + '/{0}/securityLabels', # group id
},
'security_labels': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/securityLabels/{0}/groups/' + group_uri # security labels
},
'tag_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}/tags/{1}', # group id, security label
},
'tag_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}/tags/{1}', # group id, security label
},
'tags': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tags/{0}/groups/' + group_uri, # tag name
},
'tags_load': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/groups/' + group_uri + '/{0}/tags', # group id
},
'update': {
'http_method': 'PUT',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/groups/' + group_uri + '/{0}', # group id
},
'victims': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/victims/{0}/groups/' + group_uri # victim id
},
}
return properties
#
# i_properties
#
def i_properties(indicator_uri):
""" """
properties = {
'add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri,
},
'association_groups': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/groups', # indicator value
},
'association_group_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/groups/{1}/{2}', # indicator value, group type, group id
},
'association_group_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/groups/{1}/{2}', # indicator value, group type, group id
},
'association_indicators': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/indicators', # indicator value
},
'association_victims': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/victims', # indicator value
},
'attributes': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/attributes', # indicator value
},
'attribute_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/attributes', # indicator value
},
'attribute_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/attributes/{1}', # indicator value, attribute id
},
'attribute_update': {
'http_method': 'PUT',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/attributes/{1}', # indicator value, attribute id
},
'base': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators/' + indicator_uri + '',
},
'delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}', # indicator value
},
'filters': [
'add_adversary_id',
'add_document_id',
'add_email_id',
'add_incident_id',
'add_indicator',
'add_security_label',
# 'add_signature_id',
'add_tag',
'add_threat_id',
'add_victim_id',
# post filters
'add_pf_attribute',
'add_pf_confidence',
'add_pf_date_added',
'add_pf_last_modified',
'add_pf_rating',
'add_pf_tag',
'add_pf_threat_assess_confidence',
'add_pf_threat_assess_rating',
'add_pf_type'],
'groups': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/groups/{0}/{1}/indicators/' + indicator_uri # group type, group value
},
'indicator': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}', # indicator value
},
'id': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}', # indicator value
},
'security_label_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/securityLabels/{1}', # indicator value, security label
},
'security_label_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/securityLabels/{1}', # indicator value, security label
},
'security_label_load': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/securityLabels', # indicator value
},
'security_labels': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/securityLabels/{0}/indicators/' + indicator_uri # security labels
},
'tag_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/tags/{1}', # indicator value, security label
},
'tag_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/tags/{1}', # indicator value, security label
},
'tags': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tags/{0}/indicators/' + indicator_uri, # tag name
},
'tags_load': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/indicators/' + indicator_uri + '/{0}/tags', # indicator value
},
'update': {
'http_method': 'PUT',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/' + indicator_uri + '/{0}', # indicator value
},
'victims': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/victims/{0}/indicators/' + indicator_uri # victim id
},
}
if indicator_uri == 'files':
properties['file_occurrence'] = {
'http_method': 'GET',
'uri': '/v2/indicators/files/{0}/fileOccurrences/{1}', # hash, occurrence id
'owner_allowed': True,
'pagination': False
}
properties['file_occurrence_add'] = {
'http_method': 'POST',
'uri': '/v2/indicators/files/{0}/fileOccurrences', # hash
'owner_allowed': True,
'pagination': False,
}
properties['file_occurrence_delete'] = {
'http_method': 'DELETE',
'uri': '/v2/indicators/files/{0}/fileOccurrences/{1}', # hash, occurrence id
'owner_allowed': True,
'pagination': False,
}
properties['file_occurrence_update'] = {
'http_method': 'PUT',
'uri': '/v2/indicators/files/{0}/fileOccurrences/{1}', # hash, occurrence id
'owner_allowed': True,
'pagination': False,
}
properties['file_occurrences'] = {
'http_method': 'GET',
'uri': '/v2/indicators/files/{0}/fileOccurrences', # hash
'owner_allowed': True,
'pagination': False,
}
if indicator_uri == 'hosts':
properties['dns_resolution'] = {
'http_method': 'GET',
'uri': '/v2/indicators/hosts/{0}/dnsResolutions', # indicator value
'owner_allowed': True,
'pagination': True,
}
return properties
#
# groups
#
groups_properties = {
'base': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/groups',
},
'groups': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/groups/{0}/{1}/groups', # group type, group value
},
'filters': [
'add_adversary_id',
'add_document_id',
'add_email_id',
'add_incident_id',
'add_indicator',
'add_security_label',
'add_signature_id',
'add_threat_id',
'add_tag',
'add_victim_id',
# post filters
'add_pf_name',
'add_pf_date_added',
'add_pf_type'
],
'indicators': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/{0}/{1}/groups', # indicator type, indicator value
},
'tags': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tags/{0}/groups', # tag name
},
'security_labels': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/securityLabels/{0}/groups', # security labels
},
'victims': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/victims/{0}/groups', # victim id
},
}
#
# indicators
#
indicators_properties = {
'base': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/indicators',
},
'bulk': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/bulk/json',
},
'groups': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/groups/{0}/{1}/indicators', # group type, group value
},
'filters': [
'add_adversary_id',
'add_email_id',
'add_incident_id',
'add_indicator',
'add_security_label',
'add_signature_id',
'add_tag',
'add_threat_id',
'add_victim_id',
'add_pf_attribute',
'add_pf_confidence',
'add_pf_date_added',
'add_pf_last_modified',
'add_pf_rating',
'add_pf_tag',
'add_pf_threat_assess_confidence',
'add_pf_threat_assess_rating',
'add_pf_type'],
'indicator': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/{0}/{1}', # indicator type, indicator value
},
'tags': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/tags/{0}/indicators', # tag name
},
'security_labels': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/securityLabels/{0}/indicators', # security labels
},
'victims': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/victims/{0}/indicators', # victim id
},
}
#
# owners
#
owners_properties = {
'base': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': False,
'uri': '/v2/owners',
},
'filters': [
'add_indicator',
'add_pf_name',
'add_pf_type',
],
'indicators': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': False,
'uri': '/v2/indicators/{0}/{1}/owners', # indicator type, indicator value
},
}
#
# victims
#
victims_properties = {
'add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims',
},
'assets': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}/victimAssets', # victim id
},
'asset_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}/victimAssets/{1}', # victim id, asset type
},
'asset_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}/victimAssets/{1}/{2}', # victim id, asset type, asset id
},
'asset_update': {
'http_method': 'PUT',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}/victimAssets/{1}/{2}', # victim id, asset type, asset id
},
'association_groups': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/victims/{0}/groups', # victim id
},
'association_group_add': {
'http_method': 'POST',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}/groups/{1}/{2}', # victim id, group type, group id
},
'association_group_delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}/groups/{1}/{2}', # victim id, group type, group id
},
'association_indicators': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/victims/{0}/indicators', # victim id
},
'base': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': True,
'uri': '/v2/victims',
},
'delete': {
'http_method': 'DELETE',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}',
},
'groups': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': True,
'uri': '/v2/groups/{0}/{1}/victims', # group type, group id
},
'filters': [
'add_adversary_id',
'add_document_id',
'add_email_id',
'add_id',
'add_incident_id',
'add_indicator',
'add_signature_id',
'add_threat_id',
],
'id': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}', # victim id
},
'indicators': {
'http_method': 'GET',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/indicators/{0}/{1}/victims', # indicator type, indicator value
},
'update': {
'http_method': 'PUT',
'owner_allowed': True,
'pagination': False,
'uri': '/v2/victims/{0}',
},
}
#
# batch jobs
#
batch_job_properties = {
'add': {
'http_method': 'POST',
'owner_allowed': False,
'pagination': False,
'uri': '/v2/batch',
},
'id': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': False,
'uri': '/v2/batch/{0}', # batch id
},
'batch_error_download': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': False,
'uri': '/v2/batch/{0}/errors', # batch id
},
'batch_job_upload': {
'http_method': 'POST',
'owner_allowed': False,
'pagination': False,
'uri': '/v2/batch/{0}', # batch id
},
'filters': [
'add_id'
]
}
#
# attributes
#
attribute_properties = {
'load_security_labels': {
'http_method': 'GET',
'owner_allowed': False,
'pagination': False,
'uri': '{0}/attributes/{1}/securityLabels'
},
'delete_security_label': {
'http_method': 'DELETE',
'owner_allowed': False,
'pagination': False,
'uri': '{0}/attributes/{1}/securityLabels/{2}'
},
'add_security_label': {
'http_method': 'POST',
'owner_allowed': False,
'pagination': False,
'uri': '{0}/attributes/{1}/securityLabels/{2}'
},
}
api_properties = {
'ADDRESSES': {
'properties': i_properties('addresses'),
'resource_key': 'address',
'uri_attribute': 'addresses',
},
'ADVERSARIES': {
'properties': g_properties('adversaries'),
'resource_key': 'adversary',
'uri_attribute': 'adversaries',
},
'DOCUMENTS': {
'properties': g_properties('documents'),
'resource_key': 'document',
'uri_attribute': 'documents',
},
'EMAIL_ADDRESSES': {
'properties': i_properties('emailAddresses'),
'resource_key': 'emailAddress',
'uri_attribute': 'emailAddresses',
},
'EMAILS': {
'properties': g_properties('emails'),
'resource_key': 'email',
'uri_attribute': 'emails',
},
'FILES': {
'properties': i_properties('files'),
'resource_key': 'file',
'uri_attribute': 'files',
},
'GROUPS': {
'properties': groups_properties,
'resource_key': 'group',
'uri_attribute': 'groups',
},
'HOSTS': {
'properties': i_properties('hosts'),
'resource_key': 'host',
'uri_attribute': 'hosts',
},
'INCIDENTS': {
'properties': g_properties('incidents'),
'resource_key': 'incident',
'uri_attribute': 'incidents',
},
'INDICATORS': {
'properties': indicators_properties,
'resource_key': 'indicator',
'uri_attribute': 'indicators',
},
'OWNERS': {
'properties': owners_properties,
'resource_key': 'owner',
'uri_attribute': 'owners',
},
# 'SECURITY_LABELS': {
# 'properties': 'security_labels_properties',
# 'resource_key': 'securityLabel',
# 'uri_attribute': 'securityLabels',
# },
# 'TAGS': {
# 'properties': 'tags_properties',
# 'resource_key': 'tag',
# 'uri_attribute': 'tags',
# },
'SIGNATURES': {
'properties': g_properties('signatures'),
'resource_key': 'signature',
'uri_attribute': 'signatures',
},
'THREATS': {
'properties': g_properties('threats'),
'resource_key': 'threat',
'uri_attribute': 'threats',
},
'URLS': {
'properties': i_properties('urls'),
'resource_key': 'url',
'uri_attribute': 'urls',
},
'VICTIMS': {
'properties': victims_properties,
'resource_key': 'victim',
'uri_attribute': 'victims',
},
'BATCH_JOBS': {
'properties': batch_job_properties,
'resource_key': 'batchJob',
'uri_attribute': 'batchJobs'
},
'ATTRIBUTES': {
'properties': attribute_properties,
'resource_key': 'attribute',
'uri_attribute': 'attributes'
}
}
| {
"repo_name": "percipient/threatconnect-python",
"path": "threatconnect/ApiProperties.py",
"copies": "1",
"size": "26867",
"license": "apache-2.0",
"hash": -7127784903395945000,
"line_mean": 30.0242494226,
"line_max": 118,
"alpha_frac": 0.4667435888,
"autogenerated": false,
"ratio": 3.6598556055033376,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46265991943033374,
"avg_score": null,
"num_lines": null
} |
"""API properties.
"""
from __future__ import print_function
from __future__ import unicode_literals
from tabulate import tabulate, tabulate_formats, simple_separated_format
from platform import python_version_tuple
from common import skip
try:
if python_version_tuple() >= ("3", "3", "0"):
from inspect import signature, _empty
else:
signature = None
_empty = None
except ImportError:
signature = None
_empty = None
def test_tabulate_formats():
"API: tabulate_formats is a list of strings" ""
supported = tabulate_formats
print("tabulate_formats = %r" % supported)
assert type(supported) is list
for fmt in supported:
assert type(fmt) is type("") # noqa
def _check_signature(function, expected_sig):
if not signature:
skip("")
actual_sig = signature(function)
print("expected: %s\nactual: %s\n" % (expected_sig, str(actual_sig)))
for (e, ev), (a, av) in zip(expected_sig, actual_sig.parameters.items()):
assert e == a and ev == av.default
def test_tabulate_signature():
"API: tabulate() type signature is unchanged" ""
assert type(tabulate) is type(lambda: None) # noqa
expected_sig = [
("tabular_data", _empty),
("headers", ()),
("tablefmt", "simple"),
("floatfmt", "g"),
("numalign", "default"),
("stralign", "default"),
("missingval", ""),
]
_check_signature(tabulate, expected_sig)
def test_simple_separated_format_signature():
"API: simple_separated_format() type signature is unchanged" ""
assert type(simple_separated_format) is type(lambda: None) # noqa
expected_sig = [("separator", _empty)]
_check_signature(simple_separated_format, expected_sig)
| {
"repo_name": "kawamon/hue",
"path": "desktop/core/ext-py/tabulate-0.8.9/test/test_api.py",
"copies": "2",
"size": "1825",
"license": "apache-2.0",
"hash": 5212414391985718000,
"line_mean": 28.4166666667,
"line_max": 77,
"alpha_frac": 0.6131506849,
"autogenerated": false,
"ratio": 3.8179916317991633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5431142316699163,
"avg_score": null,
"num_lines": null
} |
""" api.py: Contains all API-paths. """
#TODO: Make API client-agnostic
from flaskpost import app, login_manager, db
from flaskpost.model import Blogpost, Metadata, User
from flaskpost.additional import ssl_required, ConfigSingleton
from flask import request, redirect
from flask_login import login_required, login_user, logout_user
from passlib.hash import sha256_crypt
import json
""" Flask-Login needs this to load users properly. """
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
@app.route("/api/more_posts", methods=["POST"])
def api_more_posts():
current_limit = request.get_json()["current_limit"] + 1
result = []
posts = Blogpost.query.all()
filtered_posts = [post for post in posts if post.id >= current_limit and post.id <= current_limit + 10]
for post in filtered_posts:
result.append((post.id, post.title, post.post, post.date))
return json.JSONEncoder().encode(result)
""" Checks credentials, then logs in user if correct. """
@app.route("/api/login", methods=["POST"])
@ssl_required
def api_login():
username = request.form["username"]
password = request.form["password"]
user = User.query.filter_by(username=username).first()
if sha256_crypt.verify(password, user.password):
login_user(user)
return redirect("/")
else:
return abort(401)
""" Logs out the currently logged in user. """
@app.route("/api/logout")
@login_required
def api_logout():
logout_user()
return redirect("/")
""" Saves settings to database metadata table, then redirects to main. """
@app.route("/api/setup", methods=["POST"])
@ssl_required
def api_setup():
blog_title = request.form["blog_title"]
admin_username = request.form["admin_username"]
admin_password = request.form["admin_password"]
db.session.add(Metadata("blog_title", blog_title))
db.session.add(Metadata("needs_setup", False))
db.session.add(User(admin_username, admin_password))
db.session.commit()
config = ConfigSingleton()
config.update(blog_title, False)
return redirect("/")
""" Inserts a post into the database, then redirects to the blog front page.
Blogpost data is retrieved through a web form (served at /post), with a
timestamp generated inside the function. """
@app.route("/api/post", methods=["POST"])
@login_required
def api_post():
title = request.form["title"]
postbody = request.form["postbody"]
db.session.add(Blogpost(title, postbody))
db.session.commit()
return redirect("/")
@app.route("/api/admin", methods=["POST"])
@login_required
def api_admin():
title = request.form["blog_title"]
config = ConfigSingleton()
config.update(title, False)
return redirect("/")
| {
"repo_name": "oscarnyl/flaskpost",
"path": "flaskpost/api.py",
"copies": "1",
"size": "2760",
"license": "mit",
"hash": 6504305291468561000,
"line_mean": 30.3636363636,
"line_max": 107,
"alpha_frac": 0.6873188406,
"autogenerated": false,
"ratio": 3.6702127659574466,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4857531606557447,
"avg_score": null,
"num_lines": null
} |
"""api.py: Implemente rfc5766-turn-server HTTP server """
from django.http import HttpResponse
from django.utils import simplejson
import time
import hmac
from hashlib import sha1 as hash_alg
import base64
from turnapi import settings
import redis
import logging
# REST API or dynamic Long term credentials
if settings.TURN_AUTH:
from rfc5766_turn_server_auth import calc_key
else:
from rfc5766_turn_server import calc_key
from shared_key_updater import Start
logger = logging.getLogger( 'turnapi.api' )
if settings.TURN_REDIS_URL is not None:
redis_server = redis.StrictRedis.from_url( settings.TURN_REDIS_URL )
elif settings.TURN_REDIS_UNIX_DOMAIN_SOCKET_PATH is None:
redis_con = redis.StrictRedis(
host = settings.TURN_REDIS_HOST,
port = settings.TURN_REDIS_PORT,
db = settings.TURN_REDIS_DB,
password = settings.TURN_REDIS_PASSWORD
)
else:
redis_server = redis.StrictRedis(
unix_socket_path = settings.TURN_REDIS_UNIX_DOMAIN_SOCKET_PATH,
db = settings.TURN_REDIS_DB,
password = settings.TURN_REDIS_PASSWORD,
)
realm = settings.TURN_REALM
# Move this configurations to anywhere else!
# SHared Secret
shared_secret = settings.TURN_SHARED_SECRET
shared_secret += '0'
data = {
'shared secret' : shared_secret,
'redis connection' : redis_con,
}
Start( data )
def turn( req ):
query = req.GET
service = query.get( 'service', None )
username = query.get( 'username', None )
ttl = query.get( 'ttl' , settings.TURN_KEY_TTL )
# username: the TURN username to use, which is a colon-delimited
# combination of the expiration timestamp and the username parameter
# from the request (if specified)
timestamp = str( int( time.time() ) + ttl )
username = str( timestamp + settings.TURN_SEPARATOR + username if username else timestamp )
# Call key
shared_secret = data['shared secret']
temp_pass = calc_key( username, realm, shared_secret )
if not settings.TURN_AUTH:
pKey = 'turn/user/%s/key' % ( username )
kto = settings.TURN_CREDENTIAS_TIMEOUT # seconds
# Store credentials into Redis
redis_con.setex( pKey, kto, temp_pass )
# return plain text pass
temp_pass = shared_secret
# end if
items = {
"username" : username,
"password" : temp_pass,
"ttl" : ttl,
"uris" : settings.TURN_API_URLS
}
logger.debug( 'Response: ' + str(items) )
# Send JSON response
items = simplejson.dumps( items )
response = HttpResponse( items, content_type = 'application/json; charset=utf8' )
return response
# end turn function
| {
"repo_name": "pmarques/django-turnapi",
"path": "turnapi/api.py",
"copies": "1",
"size": "2611",
"license": "mit",
"hash": -2059403035577988600,
"line_mean": 25.11,
"line_max": 93,
"alpha_frac": 0.690157028,
"autogenerated": false,
"ratio": 3.3821243523316062,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9448581074328497,
"avg_score": 0.024740061200621888,
"num_lines": 100
} |
# api.py - library user interface
"""Main interface for the library user."""
from . import backend
from . import models
from . import oauth2
from . import tools
from . import urls
__all__ = ['Sheets']
# TODO: get worksheet
class Sheets(object):
"""Collection of spreadsheets available from given OAuth 2.0 credentials
or API key.
"""
@classmethod
@tools.doctemplate(oauth2.SECRETS, oauth2.STORAGE, oauth2.SCOPES)
def from_files(cls, secrets=None, storage=None, scopes=None, *,
no_webserver=False):
"""Return a spreadsheet collection making OAauth 2.0 credentials.
Args:
secrets (str): location of secrets file (default: ``%r``)
storage (str): location of storage file (default: ``%r``)
scopes: scope URL(s) or ``'read'`` or ``'write'`` (default: ``%r``)
no_webserver (bool): URL/code prompt instead of webbrowser auth
Returns:
Sheets: new Sheets instance with OAauth 2.0 credentials
"""
creds = oauth2.get_credentials(scopes, secrets, storage,
no_webserver=no_webserver)
return cls(creds)
@classmethod
def from_developer_key(cls, developer_key):
"""Return a spreadsheet collection using an API key.
Args:
developer_key (str): Google API key authorized for Drive and Sheets APIs
Returns:
Sheets: new Sheets instance using the specified key
"""
return cls(credentials=None, developer_key=developer_key)
def __init__(self, credentials=None, developer_key=None):
"""To access private data, you must provide OAuth2 credentials with
access to the resource.
To access public data, you may provide either an API key or
OAuth2 credentials.
Args:
credentials (google.oauth2.credentials.Credentials):
OAauth 2.0 credentials
developer_key (str): Google API key authorized for Drive
and Sheets APIs
Raises:
ValueEreror: If both ``credentials`` and ``developer_key`` are ``None``.
"""
if credentials is None and developer_key is None:
raise ValueError('need credentials or developer_key')
self._creds = credentials
self._developer_key = developer_key
@tools.lazyproperty
def _sheets(self):
"""Google sheets API service endpoint (v4)."""
return backend.build_service('sheets', credentials=self._creds,
developerKey=self._developer_key)
@tools.lazyproperty
def _drive(self):
"""Google drive API service endpoint (v3)."""
return backend.build_service('drive', credentials=self._creds,
developerKey=self._developer_key)
def __len__(self):
"""Return the number of available spreadsheets.
Returns:
int: number of spreadsheets
"""
return sum(1 for _ in backend.iterfiles(self._drive))
def __iter__(self):
"""Fetch and yield all available spreadsheets.
Yields:
new SpreadSheet spreadsheet instances
"""
return (self[id] for id, _ in backend.iterfiles(self._drive))
def __contains__(self, id):
"""Return if there is a spreadsheet with the given id.
Args:
id (str): unique alphanumeric id of the spreadsheet
Returns:
bool: ``True`` if it can be fetched else ``False``
"""
try:
backend.spreadsheet(self._sheets, id)
except KeyError:
return False
else:
return True
def __getitem__(self, id):
"""Fetch and return the spreadsheet with the given id.
Args:
id (str): unique alphanumeric id of the spreadsheet
Returns:
SpreadSheet: new SpreadSheet instance
Raises:
KeyError: if no spreadsheet with the given ``id`` is found
"""
if id == slice(None, None):
return list(self)
response = backend.spreadsheet(self._sheets, id)
result = models.SpreadSheet._from_response(response, self._sheets)
result._api = self
return result
def get(self, id_or_url, default=None):
"""Fetch and return the spreadsheet with the given id or url.
Args:
id_or_url (str): unique alphanumeric id or URL of the spreadsheet
Returns:
New SpreadSheet instance or given default if none is found
Raises:
ValueError: if an URL is given from which no id could be extracted
"""
if '/' in id_or_url:
id = urls.SheetUrl.from_string(id_or_url).id
else:
id = id_or_url
try:
return self[id]
except KeyError:
return default
def find(self, title):
"""Fetch and return the first spreadsheet with the given title.
Args:
title(str): title/name of the spreadsheet to return
Returns:
SpreadSheet: new SpreadSheet instance
Raises:
KeyError: if no spreadsheet with the given ``title`` is found
"""
files = backend.iterfiles(self._drive, name=title)
try:
return next(self[id] for id, _ in files)
except StopIteration:
raise KeyError(title)
def findall(self, title=None):
"""Fetch and return a list of spreadsheets with the given title.
Args:
title(str): title/name of the spreadsheets to return, or ``None`` for all
Returns:
list: list of new SpreadSheet instances (possibly empty)
"""
if title is None:
return list(self)
files = backend.iterfiles(self._drive, name=title)
return [self[id] for id, _ in files]
def iterfiles(self):
"""Yield ``(id, title)`` pairs for all available spreadsheets.
Yields:
pairs of unique id (``str``) and title/name (``str``)
"""
return backend.iterfiles(self._drive)
def ids(self):
"""Return a list of all available spreadsheet ids.
Returns:
list: list of unique alphanumeric id strings
"""
return [id for id, _ in self.iterfiles()]
def titles(self, unique=False):
"""Return a list of all available spreadsheet titles.
Args:
unique (bool): drop duplicates
Returns:
list: list of title/name strings
"""
if unique:
return tools.uniqued(title for _, title in self.iterfiles())
return [title for _, title in self.iterfiles()]
| {
"repo_name": "xflr6/gsheets",
"path": "gsheets/api.py",
"copies": "1",
"size": "6774",
"license": "mit",
"hash": 1476651048067758300,
"line_mean": 32.043902439,
"line_max": 85,
"alpha_frac": 0.5823737821,
"autogenerated": false,
"ratio": 4.54936198791135,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.563173577001135,
"avg_score": null,
"num_lines": null
} |
# api.py
# -*- coding: utf-8 -*-
import getpass
import os
import pandas as pd
import requests
from .utils import *
gh_api = 'https://api.github.com/'
class Admin:
'''
Class for primary GitHub Classrom management functions
'''
def __init__(self, ghid = None, token_file = None, token = None,
protocol = None, proturl = None):
self.ghid = ghid
self.token_file = token_file
self.__token = token
self.protocol = protocol
self.proturl = proturl
def __str__(self):
t = None if self.__token is None else '[ Hidden ]'
text = '\nGitHub ID: {}\n'.format(self.ghid)
text += 'Token path: {}\n'.format(self.token_file)
text += 'Token: {}\n'.format(t)
return text
class Org:
'''
Class for GitHub organization
'''
def __init__(self, name = None, repos = None, members = None, teams = None):
self.name = name
self.repos = repos if isinstance(repos, dict) else {}
self.members = members if isinstance(members, dict) else {}
self.teams = teams if isinstance(teams, dict) else {}
def __str__(self):
text = '\nOrg Name: {0}\n'.format(self.name)
if self.repos:
text += '\nRepos:\n'
for k,v in sorted(self.repos.items()):
text += str(Repository(name = v.name, repo_id = v.repo_id))
if self.members:
text += '\nMembers:\n'
for k,v in sorted(self.members.items()):
text += str(Member(ghid = v.ghid, role = v.role))
if self.teams:
text += '\nTeams:\n'
for k,v in sorted(self.teams.items()):
text += str(Team(name = v.name, team_id = v.team_id,
repos = v.repos, members = v.members))
return text
class Repository:
'''
Class for repository information
'''
def __init__(self, name = None, repo_id = None):
self.name = name
self.repo_id = repo_id
def __str__(self):
return '\nRepo Name: {}; Repo ID: {}\n'.format(self.name, self.repo_id)
class Member:
'''
Class for GitHub organization member
'''
def __init__(self, ghid = None, role = None):
self.ghid = ghid
self.role = role
def __str__(self):
if self.role:
return '{} [{}]\n'.format(self.ghid, self.role)
else:
return '{}\n'.format(self.ghid)
class Team:
'''
Class for GitHub organization team
'''
def __init__(self, team_id = None, name = None, repos = None, members = None):
self.team_id = team_id
self.name = name
self.repos = repos if isinstance(repos, dict) else {}
self.members = members if isinstance(members, dict) else {}
def __str__(self):
text = '\nTeam Name: {}; Team ID: {}\n'.format(self.name, self.team_id)
for k,v in sorted(self.repos.items()):
text += str(Repository(name = v.name, repo_id = v.repo_id))
for k,v in sorted(self.members.items()):
text += str(Member(ghid = v.ghid, role = v.role))
return text
class Roster:
'''
Class for course roster information
'''
def __init__(self, path = None):
self.path = path
self.students = {}
def __str__(self):
text = '\nPath: {0}\n'.format(self.path)
text += 'Students: '
for k,v in sorted(self.students.items()):
text += str(Student(first_name = v.first_name,
last_name = v.last_name,
ghid = v.ghid))
return text
class Student:
'''
Class for single student information
'''
def __init__(self, first_name = None, last_name = None, ghid = None):
self.first_name = first_name
self.last_name = last_name
self.ghid = ghid
def __str__(self):
return '{} {} [{}]; '.format(self.first_name, self.last_name, self.ghid)
class RemoteGit:
'''
Remote git management operations
'''
def __init__(self):
self.admin = None
self.org = None
self.roster = None
def __str__(self):
text = '\nAdmin:\n {}\n'.format(self.admin)
text += '\nOrganization:\n {}\n'.format(self.org)
text += '\nRoster:\n {}\n'.format(self.roster)
return text
# ------------------------
# Admin functions
# ------------------------
def setLogin(self, ghid = None, **kwargs):
if not ghid:
return input('Please enter your GitHub id: ').strip()
else:
return ghid
def setToken(self, tokenfile = None, **kwargs):
if tokenfile is not None:
tfp = os.path.expanduser(tokenfile)
with open(tfp) as f:
return tfp, f.read().strip()
while True:
tfp = input('Please enter path to token file: ').strip()
tfp = os.path.expanduser(tfp)
if os.path.isfile(tfp):
with open(tfp) as f:
return tfp, f.read().strip()
else:
errorMessage('Not a proper file!')
continue
def setProtocol(self, protocol = None, **kwargs):
if not protocol:
mess = 'Do you use HTTPS or SSH to interact with GitHub via the CLI?'
choice = pickOpt(mess, ['HTTPS [default]', 'SSH'])
protocol = 'HTTPS' if choice == 0 else 'SSH'
proturl = 'git@github.com:' if protocol == 'SSH' else 'https://github.com/'
return protocol, proturl
def setAPICreds(self, **kwargs):
admin = Admin()
admin.ghid = self.setLogin(**kwargs)
admin.token_file, admin._Admin__token = self.setToken(**kwargs)
admin.protocol, admin.proturl = self.setProtocol(**kwargs)
self.admin = admin
def setOrg(self, **kwargs):
org = Org(**kwargs)
if not org.name:
mess = 'Please enter your organization name: '
org.name = input(mess)
self.org = org
# ------------------------
# Roster functions
# ------------------------
def readRosterCSV(self, rosterpath):
rpath = os.path.expanduser(rosterpath)
df = pd.read_csv(rpath)
return df, rpath
def buildRoster(self, rosterfile = None, **kwargs):
rost = Roster(**kwargs)
if rosterfile:
roster, rpath = self.readRosterCSV(rosterfile)
else:
while True:
mess = 'Please enter path to roster CSV file: '
rosterfile = os.path.expanduser(input(mess).strip())
if os.path.isfile(rosterfile):
roster, rpath = self.readRosterCSV(rosterfile)
break
else:
errorMessage('Not a proper file!')
continue
rost.path = rpath
# allow for different column names in roster file
icols = list(roster.columns.values)
ocols = {'first_name': 'first name',
'last_name': 'last name',
'ghid': 'GitHub ID'}
miss = list(set(['first_name','last_name','ghid']) - set(icols))
corr = {}
for k,v in ocols.items():
if k in miss:
mess = 'Which roster column is the student\'s ' + v + '?'
choice = pickOpt(mess, icols)
corr[k] = choice
else:
corr[k] = roster.columns.get_loc(k)
for index, row in roster.iterrows():
student = Student(first_name = row[corr['first_name']],
last_name = row[corr['last_name']],
ghid = row[corr['ghid']])
key = student.last_name + '_' + student.first_name
rost.students[key] = student
self.roster = rost
# option to rename roster names to match GitRoom requirements
if len(miss) > 0:
p = 'Do you want rename you roster file columns with GitRoom names?'
choice = pickOpt(p, ['Yes', 'No'])
if choice == 0:
old = [icols[corr['last_name']],
icols[corr['first_name']],
icols[corr['ghid']]]
roster.rename(columns = {old[0]:'last_name',
old[1]:'first_name',
old[2]:'ghid'},
inplace = True)
roster.to_csv(rpath, index = False)
# ------------------------
# Requests API
# ------------------------
def getGR(self, url, **kwargs):
auth = (self.admin.ghid, self.admin._Admin__token)
resp = requests.get(url, auth = auth, **kwargs)
return resp.json()
def postGR(self, url, **kwargs):
auth = (self.admin.ghid, self.admin._Admin__token)
resp = requests.post(url, auth = auth, **kwargs)
return resp
def putGR(self, url, **kwargs):
auth = (self.admin.ghid, self.admin._Admin__token)
resp = requests.put(url, auth = auth, **kwargs)
return resp
# ------------------------
# Org functions: get
# ------------------------
def getMembers(self):
url = gh_api + 'orgs/' + self.org.name + '/members'
resp = self.getGR(url, params = {'role':'admin'})
ad_members = {}
for r in resp:
ghid = r['login']
member = Member(ghid = ghid, role = 'admin')
ad_members[ghid] = member
resp = self.getGR(url, params = {'role':'member'})
members = {}
for r in resp:
ghid = r['login']
member = Member(ghid = ghid, role = 'member')
members[ghid] = member
members.update(ad_members)
self.org.members = members
def getTeams(self):
url = gh_api + 'orgs/' + self.org.name + '/teams'
resp = self.getGR(url)
teams = {}
for r in resp:
team = Team(team_id = r['id'], name = r['name'])
url = gh_api + 'teams/' + str(team.team_id) + '/members'
resp = self.getGR(url)
members = {}
for r in resp:
ghid = r['login']
member = Member(ghid = ghid)
members[ghid] = member
team.members = members
url = gh_api + 'teams/' + str(team.team_id) + '/repos'
resp = self.getGR(url)
repos = {}
for r in resp:
repo = Repository(name = r['name'], repo_id = r['id'])
repos[r['name']] = repo
team.repos = repos
teams[team.name] = team
self.org.teams = teams
def getRepos(self):
url = gh_api + 'orgs/' + self.org.name + '/repos'
resp = self.getGR(url)
repos = {}
for r in resp:
repo = Repository(name = r['name'], repo_id = r['id'])
repos[r['name']] = repo
self.org.repos = repos
# ------------------------
# Org functions: post/put
# ------------------------
def createRemoteRepo(self, repo_name, private = False):
url = gh_api + 'orgs/' + self.org.name + '/repos'
json = {'name': repo_name}
if private == True:
json['private'] = 'true'
return self.postGR(url, json = json)
def addMember(self, member, role = 'member'):
mid = str(self.roster.students[member].ghid)
url = gh_api + 'orgs/' + self.org.name + '/memberships/' + mid
params = {'role': role}
return self.putGR(url, params = params)
def addAdmin(self, github_id):
url = gh_api + 'orgs/' + self.org.name + '/memberships/' + github_id
params = {'role': 'admin'}
return self.putGR(url, params = params)
def createTeam(self, team_name, permission = 'push'):
url = gh_api + 'orgs/' + self.org.name + '/teams'
json = {'name': team_name, 'permission': permission}
return self.postGR(url, json = json)
def addMemberToTeam(self, team_name, member, role = 'member'):
mid = str(self.roster.students[member].ghid)
tid = str(self.org.teams[team_name].team_id)
url = gh_api + 'teams/' + tid + '/memberships/' + mid
params = {'role': role}
return self.putGR(url, params = params)
def addRepoToTeam(self, team_name, repo_name):
tid = str(self.org.teams[team_name].team_id)
url = gh_api + 'teams/' + tid + '/repos/' + self.org.name
url += '/' + repo_name
params = {'permission': 'push'}
return self.putGR(url, params = params)
| {
"repo_name": "btskinner/grm",
"path": "grm/api.py",
"copies": "1",
"size": "12908",
"license": "mit",
"hash": -7293024985385688000,
"line_mean": 31.5138539043,
"line_max": 83,
"alpha_frac": 0.4973659746,
"autogenerated": false,
"ratio": 3.837098692033294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48344646666332936,
"avg_score": null,
"num_lines": null
} |
import requests
import os
import sys
import appdirs
import pkg_resources
import json
from .api_exceptions import (AudioFileError,
AudioFileFormatError)
from .utils import (abstract_http_method,
perform_voice_output,
get_audio_file_mime)
# GET /
def get_kalliope_version(base_uri, username, password):
""" Return Kalliope's version.
"""
return abstract_http_method(lambda: requests.get(base_uri + "/",
auth=(username, password)))
# GET /synapses
def get_synapses(base_uri, username, password):
""" Returns a list of all available synapses and their details.
"""
return abstract_http_method(lambda: requests.get(base_uri + "/synapses",
auth=(username, password)))
# GET /synapses/<synapse_name>
def get_synapse(base_uri, username, password, synapse_name):
""" Returns the selected synapse and its details.
"""
return abstract_http_method(lambda:
requests.get(base_uri + "/synapses" + "/" + synapse_name,
auth=(username, password)))
# GET /mute
def get_listening_status(base_uri, username, password):
""" Tells if Kalliope is ready to listen for vocal orders.
"""
return abstract_http_method(lambda:
requests.get(base_uri + "/mute",
auth=(username, password)))
# POST /synapses/start/id/<synapse_name>
def execute_by_name(base_uri, username, password, synapse_name, voice):
""" Executes a synapse with the specified name.
"""
payload = {'no_voice': perform_voice_output(voice)}
return abstract_http_method(lambda:
requests.post(base_uri + "/synapses/start/id" + "/" + synapse_name,
json=payload,
auth=(username, password)))
# POST /synapses/start/order
def execute_by_order(base_uri, username, password, order, voice):
""" Execute the specified order by text.
"""
payload = {'order': order, 'no_voice': perform_voice_output(voice)}
return abstract_http_method(lambda:
requests.post(base_uri + "/synapses/start/order",
json=payload,
auth=(username, password)))
# POST /synapses/start/audio
# Supported file types: WAV, MP3
def execute_by_audio(base_uri, username, password, audio_file, voice):
""" Execute the specified order by audio.
"""
try:
mime_of_file = get_audio_file_mime(audio_file)
# The audio file will be sent in binary mode, along with the mime type.
files = {'file': (args.audio_file, open(audio_file, 'rb'),
mime_of_file, {'Expires': '0'})}
payload = {'no_voice': perform_voice_output(voice)}
return abstract_http_method(lambda:
requests.post(base_uri + "/synapses/start/audio",
files=files,
data=payload,
auth=(username, password)))
except (IOError, FileNotFoundError):
raise AudioFileError
except AudioFileFormatError:
raise
'''
# POST /mute
def set_mute_status(args):
payload = {'mute': mute}
return abstract_http_method(lambda:
requests.post(base_uri + "/mute",
json=payload,
auth=(username, password)))
'''
if __name__ == '__main__':
pass
| {
"repo_name": "frnmst/kalliope-rest",
"path": "kalliope_rest/api.py",
"copies": "1",
"size": "4559",
"license": "mit",
"hash": 6882366179813051000,
"line_mean": 35.1825396825,
"line_max": 81,
"alpha_frac": 0.6317174819,
"autogenerated": false,
"ratio": 4.066904549509367,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5198622031409367,
"avg_score": null,
"num_lines": null
} |
# api.py
from flask import Flask, request
from flask_restful import Resource, Api, fields, marshal_with
from flask_sqlalchemy import SQLAlchemy
from config import BaseConfig
from deepdiff import DeepDiff
import base64
app = Flask(__name__)
app.config.from_object(BaseConfig)
api = Api(app)
db = SQLAlchemy(app)
from ManageDocument import *
from models import *
#Handles all the request (PUT/POST) for the left endpoint
class LeftAPI(Resource):
#Handles post requests
def post(self, id_doc):
json64 = request.form['data']
status = AnalyseData(id_doc,json64,'left')
return {'id':id_doc, 'json64': json64, 'source': 'left'}, status
#Handles put requests
def put(self, id_doc):
json64 = request.form['data']
status = AnalyseData(id_doc,json64,'left')
return {'id':id_doc, 'json64': json64, 'source': 'left'}, status
class RightAPI(Resource):
#Handle post requests
def post(self, id_doc):
json64 = request.form['data']
status = AnalyseData(id_doc,json64,'right')
return {'id':id_doc, 'json64': json64, 'source': 'right'}, status
#Handles put requests
def put(self, id_doc):
json64 = request.form['data']
status = AnalyseData(id_doc,json64,'right')
return {'id':id_doc, 'json64': json64, 'source': 'right'}, status
#Handles the request to return result of the diff
class ResultAPI(Resource):
def get(self,id_doc):
left = GetDocumentBySource(id_doc,'left')
right = GetDocumentBySource(id_doc,'right')
#Check if exists data to compare JSON
if left and right:
leftDecoded = base64.b64decode(left.json64)
rightDecoded = base64.b64decode(right.json64)
diff = DeepDiff(leftDecoded, rightDecoded)
if diff:
result = diff
else:
result = {'message': 'Json are equal'}
else:
result = {'message': 'There is no enough data'}
return result
#Add classes who handle all requests to API resource
api.add_resource(LeftAPI, '/v1/diff/<int:id_doc>/left', endpoint = 'left')
api.add_resource(RightAPI, '/v1/diff/<int:id_doc>/right', endpoint = 'right')
api.add_resource(ResultAPI, '/v1/diff/<int:id_doc>', endpoint = 'result')
#If script's called directly from bash, then executes
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0') | {
"repo_name": "IAPOLINARIO/python-diff",
"path": "app/api.py",
"copies": "1",
"size": "2522",
"license": "mit",
"hash": -3859035844772961000,
"line_mean": 32.64,
"line_max": 77,
"alpha_frac": 0.6149881047,
"autogenerated": false,
"ratio": 3.671033478893741,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4786021583593741,
"avg_score": null,
"num_lines": null
} |
# api.py
import web
from libweasyl.models.meta import Base
from libweasyl.models.api import APIToken
from libweasyl import security
from weasyl import define as d
def is_api_user():
return 'HTTP_X_WEASYL_API_KEY' in web.ctx.env or 'HTTP_AUTHORIZATION' in web.ctx.env
def get_api_keys(userid):
with Base.dbsession().connection() as db:
return db.execute("SELECT token, description FROM api_tokens WHERE userid = %s", userid)
def add_api_key(userid, description):
db = Base.dbsession()
db.add(APIToken(userid=userid, token=security.generate_key(48), description=description))
db.flush()
def delete_api_keys(userid, keys):
if not keys:
return
(APIToken.query
.filter(APIToken.userid == userid)
.filter(APIToken.token.in_(keys))
.delete(synchronize_session='fetch'))
def tidy_media(item):
ret = {
'url': d.absolutify_url(item['display_url']),
'mediaid': item.get('mediaid'),
}
if item.get('described'):
ret['links'] = tidy_all_media(item['described'])
return ret
def tidy_all_media(d):
# We suppress thumbnail-legacy currently.
hidden_keys = ['thumbnail-legacy']
ret = {k: map(tidy_media, v) for k, v in d.iteritems() if k not in hidden_keys}
thumbnail_value = ret.get('thumbnail-custom') or ret.get('thumbnail-generated')
if thumbnail_value:
ret['thumbnail'] = thumbnail_value
return ret
| {
"repo_name": "dzamie/weasyl",
"path": "weasyl/api.py",
"copies": "1",
"size": "1454",
"license": "apache-2.0",
"hash": -3302818784132411400,
"line_mean": 26.9615384615,
"line_max": 96,
"alpha_frac": 0.6581843191,
"autogenerated": false,
"ratio": 3.3579676674364896,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45161519865364896,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.