content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
# Copyright 2021 AIPlan4EU project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
import unified_planning as up
import unified_planning.engines as engines
from unified_planning.plans import Plan
from unified_planning.model import ProblemKind
from unified_planning.exceptions import UPUsageError
from unified_planning.engines.results import LogLevel, PlanGenerationResultStatus, Result, ValidationResult, PlanGenerationResult
from typing import IO, Callable, Dict, List, Optional, Tuple, Type, cast
from fractions import Fraction
from multiprocessing import Process, Queue
class Parallel(engines.engine.Engine,
engines.mixins.OneshotPlannerMixin,
engines.mixins.PlanValidatorMixin):
"""Create a parallel instance of multiple Engines."""
def __init__(self, engines: List[Tuple[Type[engines.engine.Engine], Dict[str, str]]]):
self.engines = engines
@property
def name(self) -> str:
return 'Parallel'
@staticmethod
def supports(problem_kind: 'ProblemKind') -> bool:
# The supported features depends on its actual engines
return True
def _run_parallel(self, fname, *args) -> List[Result]:
signaling_queue: Queue = Queue()
processes = []
for idx, (engine_class, opts) in enumerate(self.engines):
options = opts
_p = Process(name=str(idx),
target=_run,
args=(idx, engine_class, options,
signaling_queue, fname, *args))
processes.append(_p)
_p.start()
processes_alive = len(processes)
results: List[Result] = []
definitive_result_found: bool = False
while True:
if processes_alive == 0: # Every planner gave a result
break
(idx, res) = signaling_queue.get(block=True)
processes_alive -= 1
if isinstance(res, BaseException):
raise res
else:
assert isinstance(res, Result)
# If the planner is sure about the result (optimality of the result or impossibility of the problem or the problem does not need optimality) exit the loop
if res.is_definitive_result(*args):
definitive_result_found = True
break
else:
results.append(res)
for p in processes:
p.terminate()
if definitive_result_found: # A planner found a definitive result
return [res]
return results
def _solve(self, problem: 'up.model.AbstractProblem',
callback: Optional[Callable[['up.engines.results.PlanGenerationResult'], None]] = None,
timeout: Optional[float] = None,
output_stream: Optional[IO[str]] = None) -> 'up.engines.results.PlanGenerationResult':
for engine, _ in self.engines:
assert issubclass(engine, engines.mixins.OneshotPlannerMixin)
if not engine.supports(problem.kind):
raise UPUsageError('Parallel engines cannot solve this kind of problem!')
if callback is not None:
warnings.warn('Parallel engines do not support the callback system.', UserWarning)
if output_stream is not None:
warnings.warn('Parallel engines do not support the output stream system.', UserWarning)
final_reports = self._run_parallel('solve', problem, None, timeout, None)
result_order: List[PlanGenerationResultStatus] = [
PlanGenerationResultStatus.SOLVED_OPTIMALLY, # List containing the results in the order we prefer them
PlanGenerationResultStatus.UNSOLVABLE_PROVEN,
PlanGenerationResultStatus.SOLVED_SATISFICING,
PlanGenerationResultStatus.UNSOLVABLE_INCOMPLETELY,
PlanGenerationResultStatus.TIMEOUT,
PlanGenerationResultStatus.MEMOUT,
PlanGenerationResultStatus.INTERNAL_ERROR,
PlanGenerationResultStatus.UNSUPPORTED_PROBLEM]
final_result: Optional[PlanGenerationResult] = None
result_found: bool = False
for ro in result_order:
if result_found:
break
for r in final_reports:
pgr = cast(PlanGenerationResult, r)
if pgr.status == ro:
result_found = True
final_result = pgr
break
logs = [up.engines.LogMessage(LogLevel.INFO, str(fr)) for fr in final_reports]
# if no results are given by the planner, we create a default one
if final_result is None:
return up.engines.PlanGenerationResult(PlanGenerationResultStatus.UNSOLVABLE_INCOMPLETELY,
None, self.name, log_messages=logs)
new_plan = problem.normalize_plan(final_result.plan) if final_result.plan is not None else None
if final_result.log_messages is not None:
logs = final_result.log_messages + logs
return up.engines.results.PlanGenerationResult(
final_result.status,
new_plan,
final_result.engine_name,
final_result.metrics,
logs
)
def _validate(self, problem: 'up.model.AbstractProblem',
plan: Plan) -> 'up.engines.results.ValidationResult':
for engine, _ in self.engines:
assert issubclass(engine, engines.mixins.PlanValidatorMixin)
if not engine.supports(problem.kind):
raise UPUsageError('Parallel engines cannot validate this kind of problem!')
return cast(ValidationResult, self._run_parallel('validate', problem, plan)[0])
def _run(idx: int, EngineClass: type, options: Dict[str, str], signaling_queue: Queue, fname: str, *args):
with EngineClass(**options) as s:
try:
local_res = getattr(s, fname)(*args)
except Exception as ex:
signaling_queue.put((idx, ex))
return
signaling_queue.put((idx, local_res))
| unified_planning/engines/parallel.py | 6,688 | Create a parallel instance of multiple Engines.
Copyright 2021 AIPlan4EU project Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. The supported features depends on its actual engines Every planner gave a result If the planner is sure about the result (optimality of the result or impossibility of the problem or the problem does not need optimality) exit the loop A planner found a definitive result List containing the results in the order we prefer them if no results are given by the planner, we create a default one | 994 | en | 0.86482 |
# coding: utf-8
"""
Purity//FB REST Client
Client for Purity//FB REST API (1.0), developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.5
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class HardwareConnectorResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pagination_info': 'PaginationInfo',
'items': 'list[HardwareConnector]'
}
attribute_map = {
'pagination_info': 'pagination_info',
'items': 'items'
}
def __init__(self, pagination_info=None, items=None):
"""
HardwareConnectorResponse - a model defined in Swagger
"""
self._pagination_info = None
self._items = None
if pagination_info is not None:
self.pagination_info = pagination_info
if items is not None:
self.items = items
@property
def pagination_info(self):
"""
Gets the pagination_info of this HardwareConnectorResponse.
pagination information, only available in GET requests
:return: The pagination_info of this HardwareConnectorResponse.
:rtype: PaginationInfo
"""
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
"""
Sets the pagination_info of this HardwareConnectorResponse.
pagination information, only available in GET requests
:param pagination_info: The pagination_info of this HardwareConnectorResponse.
:type: PaginationInfo
"""
self._pagination_info = pagination_info
@property
def items(self):
"""
Gets the items of this HardwareConnectorResponse.
a list of hardware connectors
:return: The items of this HardwareConnectorResponse.
:rtype: list[HardwareConnector]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this HardwareConnectorResponse.
a list of hardware connectors
:param items: The items of this HardwareConnectorResponse.
:type: list[HardwareConnector]
"""
self._items = items
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, HardwareConnectorResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| purity_fb/purity_fb_1dot5/models/hardware_connector_response.py | 4,266 | NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Returns true if both objects are equal
HardwareConnectorResponse - a model defined in Swagger
Returns true if both objects are not equal
For `print` and `pprint`
Gets the items of this HardwareConnectorResponse.
a list of hardware connectors
:return: The items of this HardwareConnectorResponse.
:rtype: list[HardwareConnector]
Sets the items of this HardwareConnectorResponse.
a list of hardware connectors
:param items: The items of this HardwareConnectorResponse.
:type: list[HardwareConnector]
Gets the pagination_info of this HardwareConnectorResponse.
pagination information, only available in GET requests
:return: The pagination_info of this HardwareConnectorResponse.
:rtype: PaginationInfo
Sets the pagination_info of this HardwareConnectorResponse.
pagination information, only available in GET requests
:param pagination_info: The pagination_info of this HardwareConnectorResponse.
:type: PaginationInfo
Returns the model properties as a dict
Returns the string representation of the model
Purity//FB REST Client
Client for Purity//FB REST API (1.0), developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.5
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 | 1,467 | en | 0.686063 |
#####################
# IMPORT DEPENDENCIES
######################
# flask (server)
from flask import(
Flask,
render_template,
jsonify,
request,
redirect)
#######################
# FLASK SET-UP
#######################
app = Flask(__name__)
#######################
# FLASK ROUTES
#######################
@app.route("/")
def index():
return render_template("index.html")
# @app.route("/outcomes")
# def charts():
# return render_template("outcomes.html")
if __name__ == "__main__":
app.run(debug = True) | app.py | 542 | IMPORT DEPENDENCIES flask (server) FLASK SET-UP FLASK ROUTES @app.route("/outcomes") def charts(): return render_template("outcomes.html") | 142 | en | 0.35529 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Felix Wunsch, Communications Engineering Lab (CEL) / Karlsruhe Institute of Technology (KIT) <wunsch.felix@googlemail.com>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import ieee802_15_4_swig as ieee802_15_4
import numpy as np
class qa_dqpsk_soft_demapper_cc (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
pi=np.pi
data_in = [0, pi/2, pi, -pi/2, pi/2, -pi/2, -pi/2, 0, 0, pi, pi/2, pi/2]
data_in = [np.exp(1j*i) for i in data_in]
data_in = [i*np.exp(1j*pi/4) for i in data_in]
self.src = blocks.vector_source_c(data_in)
self.dqpsk = ieee802_15_4.dqpsk_soft_demapper_cc(framelen=6)
self.snk = blocks.vector_sink_c(1)
self.tb.connect(self.src, self.dqpsk, self.snk)
self.tb.run ()
# check data
data_out = self.snk.data()
ref = [0, pi/2, pi, -pi/2, pi/2, pi, -pi/2, 0, 0, pi, pi, pi/2]
ref = np.array([np.exp(1j*i) for i in ref])
print "angle in:", np.angle(data_in)/pi*180
print "angle out:", np.angle(data_out)/pi*180
print "angle ref:", np.angle(ref)/pi*180
self.assertFloatTuplesAlmostEqual(ref, data_out, 5)
if __name__ == '__main__':
gr_unittest.run(qa_dqpsk_soft_demapper_cc)
| gr-ieee802-15-4/python/qa_dqpsk_soft_demapper_cc.py | 2,147 | !/usr/bin/env python -*- coding: utf-8 -*- Copyright 2015 Felix Wunsch, Communications Engineering Lab (CEL) / Karlsruhe Institute of Technology (KIT) <wunsch.felix@googlemail.com>. This is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This software is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this software; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Street, Boston, MA 02110-1301, USA. set up fg check data | 875 | en | 0.857501 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All right reserved.
#
from snowflake.connector.util_text import construct_hostname
def test_construct_hostname_basic():
assert (
construct_hostname("eu-central-1", "account1")
== "account1.eu-central-1.snowflakecomputing.com"
)
assert construct_hostname("", "account1") == "account1.snowflakecomputing.com"
assert construct_hostname(None, "account1") == "account1.snowflakecomputing.com"
assert (
construct_hostname("as-east-3", "account1")
== "account1.as-east-3.snowflakecomputing.com"
)
assert (
construct_hostname("as-east-3", "account1.eu-central-1")
== "account1.as-east-3.snowflakecomputing.com"
)
assert (
construct_hostname("", "account1.eu-central-1")
== "account1.eu-central-1.snowflakecomputing.com"
)
assert (
construct_hostname(None, "account1.eu-central-1")
== "account1.eu-central-1.snowflakecomputing.com"
)
assert (
construct_hostname(None, "account1-jkabfvdjisoa778wqfgeruishafeuw89q.global")
== "account1-jkabfvdjisoa778wqfgeruishafeuw89q.global.snowflakecomputing.com"
)
| test/unit/test_construct_hostname.py | 1,253 | !/usr/bin/env python -*- coding: utf-8 -*- Copyright (c) 2012-2021 Snowflake Computing Inc. All right reserved. | 111 | en | 0.581805 |
#
# -*- coding: utf-8 -*-
#
import unittest
import os
import shutil
import yaml
import tensorflow as tf
from neural_compressor.experimental import model_conversion
tf.compat.v1.enable_eager_execution()
from tensorflow import keras
from tensorflow.python.framework import graph_util
from neural_compressor.adaptor.tf_utils.util import disable_random
def build_fake_yaml():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
device: cpu
model_conversion:
source: qat
destination: default
'''
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml.yaml', "w", encoding="utf-8") as f:
yaml.dump(y, f)
f.close()
def prepare_dataset():
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
return train_images, train_labels
def prepare_model(model_out_path, train_images, train_labels):
# Define the model architecture.
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation='relu'),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10)
])
# Train the digit classification model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
epochs=1,
validation_split=0.1,
)
model.save(model_out_path)
def prepare_qat_model(model_in_path, model_out_path, train_images, train_labels):
import tensorflow_model_optimization as tfmot
quantize_model = tfmot.quantization.keras.quantize_model
# q_aware stands for for quantization aware.
model = tf.keras.models.load_model(model_in_path)
q_aware_model = quantize_model(model)
# `quantize_model` requires a recompile.
q_aware_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
train_images_subset = train_images[0:1000] # out of 60000
train_labels_subset = train_labels[0:1000]
q_aware_model.fit(train_images_subset, train_labels_subset,
batch_size=500, epochs=1, validation_split=0.1)
q_aware_model.save(model_out_path)
@unittest.skipIf(tf.version.VERSION < '2.4.0', "Only supports tf 2.4.0 or above")
class TestModelConversion(unittest.TestCase):
@classmethod
def setUpClass(self):
self._baseline_temp_path = './temp_baseline'
self._qat_temp_path = './temp_qat'
self._quantized_temp_path = './temp_quantized'
build_fake_yaml()
train_images, train_labels = prepare_dataset()
prepare_model(self._baseline_temp_path, train_images, train_labels)
prepare_qat_model(self._baseline_temp_path, self._qat_temp_path, train_images, train_labels)
@classmethod
def tearDownClass(self):
os.remove('fake_yaml.yaml')
shutil.rmtree(self._qat_temp_path, ignore_errors=True)
shutil.rmtree(self._baseline_temp_path, ignore_errors=True)
shutil.rmtree(self._quantized_temp_path, ignore_errors=True)
def test_model_conversion(self):
from neural_compressor.experimental import ModelConversion, common
from neural_compressor.conf.config import Conf
conversion = ModelConversion()
conversion.source = 'qat'
conversion.destination = 'default'
conversion.model = self._qat_temp_path
q_model = conversion.fit()
q_model.save(self._quantized_temp_path)
conf = Conf('fake_yaml.yaml')
conversion = ModelConversion(conf)
conversion.source = 'qat'
conversion.destination = 'default'
conversion.model = self._qat_temp_path
q_model = conversion.fit()
conversion = ModelConversion('fake_yaml.yaml')
conversion.source = 'qat'
conversion.destination = 'default'
conversion.model = self._qat_temp_path
q_model = conversion.fit()
graph = tf.compat.v1.Graph()
with graph.as_default():
with tf.compat.v1.Session() as sess:
meta_graph=tf.compat.v1.saved_model.loader.load(sess, [tf.compat.v1.saved_model.tag_constants.SERVING], self._quantized_temp_path)
print(meta_graph.graph_def.node)
for i in meta_graph.graph_def.node:
if 'MatMul' in i.op:
self.assertTrue('QuantizedMatMul' in i.op)
if 'MaxPool' in i.op:
self.assertTrue('QuantizedMaxPool' in i.op)
if 'Conv2D' in i.op:
self.assertTrue('QuantizedConv2D' in i.op)
if __name__ == "__main__":
unittest.main()
| test/test_model_conversion.py | 5,228 | -*- coding: utf-8 -*- Load MNIST dataset Normalize the input image so that each pixel value is between 0 to 1. Define the model architecture. Train the digit classification model q_aware stands for for quantization aware. `quantize_model` requires a recompile. out of 60000 | 273 | en | 0.781932 |
"""Template helper methods for rendering strings with Home Assistant data."""
from __future__ import annotations
from ast import literal_eval
import asyncio
import base64
import collections.abc
from contextlib import suppress
from contextvars import ContextVar
from datetime import datetime, timedelta
from functools import partial, wraps
import json
import logging
import math
from operator import attrgetter
import random
import re
import sys
from typing import Any, Generator, Iterable, cast
from urllib.parse import urlencode as urllib_urlencode
import weakref
import jinja2
from jinja2 import contextfilter, contextfunction
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2.utils import Namespace # type: ignore
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
LENGTH_METERS,
STATE_UNKNOWN,
)
from homeassistant.core import (
HomeAssistant,
State,
callback,
split_entity_id,
valid_entity_id,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import entity_registry, location as loc_helper
from homeassistant.helpers.typing import TemplateVarsType
from homeassistant.loader import bind_hass
from homeassistant.util import convert, dt as dt_util, location as loc_util
from homeassistant.util.async_ import run_callback_threadsafe
from homeassistant.util.thread import ThreadWithException
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
_RENDER_INFO = "template.render_info"
_ENVIRONMENT = "template.environment"
_ENVIRONMENT_LIMITED = "template.environment_limited"
_ENVIRONMENT_STRICT = "template.environment_strict"
_RE_JINJA_DELIMITERS = re.compile(r"\{%|\{\{|\{#")
# Match "simple" ints and floats. -1.0, 1, +5, 5.0
_IS_NUMERIC = re.compile(r"^[+-]?(?!0\d)\d*(?:\.\d*)?$")
_RESERVED_NAMES = {"contextfunction", "evalcontextfunction", "environmentfunction"}
_GROUP_DOMAIN_PREFIX = "group."
_COLLECTABLE_STATE_ATTRIBUTES = {
"state",
"attributes",
"last_changed",
"last_updated",
"context",
"domain",
"object_id",
"name",
}
ALL_STATES_RATE_LIMIT = timedelta(minutes=1)
DOMAIN_STATES_RATE_LIMIT = timedelta(seconds=1)
template_cv: ContextVar[str | None] = ContextVar("template_cv", default=None)
@bind_hass
def attach(hass: HomeAssistant, obj: Any) -> None:
"""Recursively attach hass to all template instances in list and dict."""
if isinstance(obj, list):
for child in obj:
attach(hass, child)
elif isinstance(obj, collections.abc.Mapping):
for child_key, child_value in obj.items():
attach(hass, child_key)
attach(hass, child_value)
elif isinstance(obj, Template):
obj.hass = hass
def render_complex(
value: Any, variables: TemplateVarsType = None, limited: bool = False
) -> Any:
"""Recursive template creator helper function."""
if isinstance(value, list):
return [render_complex(item, variables) for item in value]
if isinstance(value, collections.abc.Mapping):
return {
render_complex(key, variables): render_complex(item, variables)
for key, item in value.items()
}
if isinstance(value, Template):
return value.async_render(variables, limited=limited)
return value
def is_complex(value: Any) -> bool:
"""Test if data structure is a complex template."""
if isinstance(value, Template):
return True
if isinstance(value, list):
return any(is_complex(val) for val in value)
if isinstance(value, collections.abc.Mapping):
return any(is_complex(val) for val in value.keys()) or any(
is_complex(val) for val in value.values()
)
return False
def is_template_string(maybe_template: str) -> bool:
"""Check if the input is a Jinja2 template."""
return _RE_JINJA_DELIMITERS.search(maybe_template) is not None
class ResultWrapper:
"""Result wrapper class to store render result."""
render_result: str | None
def gen_result_wrapper(kls):
"""Generate a result wrapper."""
class Wrapper(kls, ResultWrapper):
"""Wrapper of a kls that can store render_result."""
def __init__(self, *args: tuple, render_result: str | None = None) -> None:
super().__init__(*args)
self.render_result = render_result
def __str__(self) -> str:
if self.render_result is None:
# Can't get set repr to work
if kls is set:
return str(set(self))
return cast(str, kls.__str__(self))
return self.render_result
return Wrapper
class TupleWrapper(tuple, ResultWrapper):
"""Wrap a tuple."""
# This is all magic to be allowed to subclass a tuple.
def __new__(cls, value: tuple, *, render_result: str | None = None) -> TupleWrapper:
"""Create a new tuple class."""
return super().__new__(cls, tuple(value))
# pylint: disable=super-init-not-called
def __init__(self, value: tuple, *, render_result: str | None = None):
"""Initialize a new tuple class."""
self.render_result = render_result
def __str__(self) -> str:
"""Return string representation."""
if self.render_result is None:
return super().__str__()
return self.render_result
RESULT_WRAPPERS: dict[type, type] = {
kls: gen_result_wrapper(kls) # type: ignore[no-untyped-call]
for kls in (list, dict, set)
}
RESULT_WRAPPERS[tuple] = TupleWrapper
def _true(arg: Any) -> bool:
return True
def _false(arg: Any) -> bool:
return False
class RenderInfo:
"""Holds information about a template render."""
def __init__(self, template):
"""Initialise."""
self.template = template
# Will be set sensibly once frozen.
self.filter_lifecycle = _true
self.filter = _true
self._result: str | None = None
self.is_static = False
self.exception: TemplateError | None = None
self.all_states = False
self.all_states_lifecycle = False
self.domains = set()
self.domains_lifecycle = set()
self.entities = set()
self.rate_limit: timedelta | None = None
self.has_time = False
def __repr__(self) -> str:
"""Representation of RenderInfo."""
return f"<RenderInfo {self.template} all_states={self.all_states} all_states_lifecycle={self.all_states_lifecycle} domains={self.domains} domains_lifecycle={self.domains_lifecycle} entities={self.entities} rate_limit={self.rate_limit}> has_time={self.has_time}"
def _filter_domains_and_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific domains or entities."""
return (
split_entity_id(entity_id)[0] in self.domains or entity_id in self.entities
)
def _filter_entities(self, entity_id: str) -> bool:
"""Template should re-render if the entity state changes when we match specific entities."""
return entity_id in self.entities
def _filter_lifecycle_domains(self, entity_id: str) -> bool:
"""Template should re-render if the entity is added or removed with domains watched."""
return split_entity_id(entity_id)[0] in self.domains_lifecycle
def result(self) -> str:
"""Results of the template computation."""
if self.exception is not None:
raise self.exception
return cast(str, self._result)
def _freeze_static(self) -> None:
self.is_static = True
self._freeze_sets()
self.all_states = False
def _freeze_sets(self) -> None:
self.entities = frozenset(self.entities)
self.domains = frozenset(self.domains)
self.domains_lifecycle = frozenset(self.domains_lifecycle)
def _freeze(self) -> None:
self._freeze_sets()
if self.rate_limit is None:
if self.all_states or self.exception:
self.rate_limit = ALL_STATES_RATE_LIMIT
elif self.domains or self.domains_lifecycle:
self.rate_limit = DOMAIN_STATES_RATE_LIMIT
if self.exception:
return
if not self.all_states_lifecycle:
if self.domains_lifecycle:
self.filter_lifecycle = self._filter_lifecycle_domains
else:
self.filter_lifecycle = _false
if self.all_states:
return
if self.domains:
self.filter = self._filter_domains_and_entities
elif self.entities:
self.filter = self._filter_entities
else:
self.filter = _false
class Template:
"""Class to hold a template and manage caching and rendering."""
__slots__ = (
"__weakref__",
"template",
"hass",
"is_static",
"_compiled_code",
"_compiled",
"_exc_info",
"_limited",
"_strict",
)
def __init__(self, template, hass=None):
"""Instantiate a template."""
if not isinstance(template, str):
raise TypeError("Expected template to be a string")
self.template: str = template.strip()
self._compiled_code = None
self._compiled: jinja2.Template | None = None
self.hass = hass
self.is_static = not is_template_string(template)
self._exc_info = None
self._limited = None
self._strict = None
@property
def _env(self) -> TemplateEnvironment:
if self.hass is None:
return _NO_HASS_ENV
if self._limited:
wanted_env = _ENVIRONMENT_LIMITED
elif self._strict:
wanted_env = _ENVIRONMENT_STRICT
else:
wanted_env = _ENVIRONMENT
ret: TemplateEnvironment | None = self.hass.data.get(wanted_env)
if ret is None:
ret = self.hass.data[wanted_env] = TemplateEnvironment(self.hass, self._limited, self._strict) # type: ignore[no-untyped-call]
return ret
def ensure_valid(self) -> None:
"""Return if template is valid."""
if self.is_static or self._compiled_code is not None:
return
try:
self._compiled_code = self._env.compile(self.template) # type: ignore[no-untyped-call]
except jinja2.TemplateError as err:
raise TemplateError(err) from err
def render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
return run_callback_threadsafe(
self.hass.loop,
partial(self.async_render, variables, parse_result, limited, **kwargs),
).result()
@callback
def async_render(
self,
variables: TemplateVarsType = None,
parse_result: bool = True,
limited: bool = False,
strict: bool = False,
**kwargs: Any,
) -> Any:
"""Render given template.
This method must be run in the event loop.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
"""
if self.is_static:
if not parse_result or self.hass.config.legacy_templates:
return self.template
return self._parse_result(self.template)
compiled = self._compiled or self._ensure_compiled(limited, strict)
if variables is not None:
kwargs.update(variables)
try:
render_result = _render_with_context(self.template, compiled, **kwargs)
except Exception as err:
raise TemplateError(err) from err
render_result = render_result.strip()
if self.hass.config.legacy_templates or not parse_result:
return render_result
return self._parse_result(render_result)
def _parse_result(self, render_result: str) -> Any: # pylint: disable=no-self-use
"""Parse the result."""
try:
result = literal_eval(render_result)
if type(result) in RESULT_WRAPPERS:
result = RESULT_WRAPPERS[type(result)](
result, render_result=render_result
)
# If the literal_eval result is a string, use the original
# render, by not returning right here. The evaluation of strings
# resulting in strings impacts quotes, to avoid unexpected
# output; use the original render instead of the evaluated one.
# Complex and scientific values are also unexpected. Filter them out.
if (
# Filter out string and complex numbers
not isinstance(result, (str, complex))
and (
# Pass if not numeric and not a boolean
not isinstance(result, (int, float))
# Or it's a boolean (inherit from int)
or isinstance(result, bool)
# Or if it's a digit
or _IS_NUMERIC.match(render_result) is not None
)
):
return result
except (ValueError, TypeError, SyntaxError, MemoryError):
pass
return render_result
async def async_render_will_timeout(
self,
timeout: float,
variables: TemplateVarsType = None,
strict: bool = False,
**kwargs: Any,
) -> bool:
"""Check to see if rendering a template will timeout during render.
This is intended to check for expensive templates
that will make the system unstable. The template
is rendered in the executor to ensure it does not
tie up the event loop.
This function is not a security control and is only
intended to be used as a safety check when testing
templates.
This method must be run in the event loop.
"""
if self.is_static:
return False
compiled = self._compiled or self._ensure_compiled(strict=strict)
if variables is not None:
kwargs.update(variables)
self._exc_info = None
finish_event = asyncio.Event()
def _render_template() -> None:
try:
_render_with_context(self.template, compiled, **kwargs)
except TimeoutError:
pass
except Exception: # pylint: disable=broad-except
self._exc_info = sys.exc_info()
finally:
run_callback_threadsafe(self.hass.loop, finish_event.set)
try:
template_render_thread = ThreadWithException(target=_render_template)
template_render_thread.start()
await asyncio.wait_for(finish_event.wait(), timeout=timeout)
if self._exc_info:
raise TemplateError(self._exc_info[1].with_traceback(self._exc_info[2]))
except asyncio.TimeoutError:
template_render_thread.raise_exc(TimeoutError)
return True
finally:
template_render_thread.join()
return False
@callback
def async_render_to_info(
self, variables: TemplateVarsType = None, strict: bool = False, **kwargs: Any
) -> RenderInfo:
"""Render the template and collect an entity filter."""
assert self.hass and _RENDER_INFO not in self.hass.data
render_info = RenderInfo(self) # type: ignore[no-untyped-call]
# pylint: disable=protected-access
if self.is_static:
render_info._result = self.template.strip()
render_info._freeze_static()
return render_info
self.hass.data[_RENDER_INFO] = render_info
try:
render_info._result = self.async_render(variables, strict=strict, **kwargs)
except TemplateError as ex:
render_info.exception = ex
finally:
del self.hass.data[_RENDER_INFO]
render_info._freeze()
return render_info
def render_with_possible_json_value(self, value, error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
"""
if self.is_static:
return self.template
return run_callback_threadsafe(
self.hass.loop,
self.async_render_with_possible_json_value,
value,
error_value,
).result()
@callback
def async_render_with_possible_json_value(
self, value, error_value=_SENTINEL, variables=None
):
"""Render template with value exposed.
If valid JSON will expose value_json too.
This method must be run in the event loop.
"""
if self.is_static:
return self.template
if self._compiled is None:
self._ensure_compiled()
variables = dict(variables or {})
variables["value"] = value
with suppress(ValueError, TypeError):
variables["value_json"] = json.loads(value)
try:
return _render_with_context(
self.template, self._compiled, **variables
).strip()
except jinja2.TemplateError as ex:
if error_value is _SENTINEL:
_LOGGER.error(
"Error parsing value: %s (value: %s, template: %s)",
ex,
value,
self.template,
)
return value if error_value is _SENTINEL else error_value
def _ensure_compiled(
self, limited: bool = False, strict: bool = False
) -> jinja2.Template:
"""Bind a template to a specific hass instance."""
self.ensure_valid()
assert self.hass is not None, "hass variable not set on template"
assert (
self._limited is None or self._limited == limited
), "can't change between limited and non limited template"
assert (
self._strict is None or self._strict == strict
), "can't change between strict and non strict template"
assert not (strict and limited), "can't combine strict and limited template"
self._limited = limited
self._strict = strict
env = self._env
self._compiled = cast(
jinja2.Template,
jinja2.Template.from_code(env, self._compiled_code, env.globals, None),
)
return self._compiled
def __eq__(self, other):
"""Compare template with another."""
return (
self.__class__ == other.__class__
and self.template == other.template
and self.hass == other.hass
)
def __hash__(self) -> int:
"""Hash code for template."""
return hash(self.template)
def __repr__(self) -> str:
"""Representation of Template."""
return 'Template("' + self.template + '")'
class AllStates:
"""Class to expose all HA states as attributes."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize all states."""
self._hass = hass
def __getattr__(self, name):
"""Return the domain state."""
if "." in name:
return _get_state_if_valid(self._hass, name)
if name in _RESERVED_NAMES:
return None
if not valid_entity_id(f"{name}.entity"):
raise TemplateError(f"Invalid domain name '{name}'")
return DomainStates(self._hass, name)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_all(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states = True
def _collect_all_lifecycle(self) -> None:
render_info = self._hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.all_states_lifecycle = True
def __iter__(self):
"""Return all states."""
self._collect_all()
return _state_generator(self._hass, None)
def __len__(self) -> int:
"""Return number of states."""
self._collect_all_lifecycle()
return self._hass.states.async_entity_ids_count()
def __call__(self, entity_id):
"""Return the states."""
state = _get_state(self._hass, entity_id)
return STATE_UNKNOWN if state is None else state.state
def __repr__(self) -> str:
"""Representation of All States."""
return "<template AllStates>"
class DomainStates:
"""Class to expose a specific HA domain as attributes."""
def __init__(self, hass: HomeAssistant, domain: str) -> None:
"""Initialize the domain states."""
self._hass = hass
self._domain = domain
def __getattr__(self, name):
"""Return the states."""
return _get_state_if_valid(self._hass, f"{self._domain}.{name}")
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
__getitem__ = __getattr__
def _collect_domain(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains.add(self._domain)
def _collect_domain_lifecycle(self) -> None:
entity_collect = self._hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.domains_lifecycle.add(self._domain)
def __iter__(self):
"""Return the iteration over all the states."""
self._collect_domain()
return _state_generator(self._hass, self._domain)
def __len__(self) -> int:
"""Return number of states."""
self._collect_domain_lifecycle()
return self._hass.states.async_entity_ids_count(self._domain)
def __repr__(self) -> str:
"""Representation of Domain States."""
return f"<template DomainStates('{self._domain}')>"
class TemplateState(State):
"""Class to represent a state object in a template."""
__slots__ = ("_hass", "_state", "_collect")
# Inheritance is done so functions that check against State keep working
# pylint: disable=super-init-not-called
def __init__(self, hass: HomeAssistant, state: State, collect: bool = True) -> None:
"""Initialize template state."""
self._hass = hass
self._state = state
self._collect = collect
def _collect_state(self) -> None:
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
# Jinja will try __getitem__ first and it avoids the need
# to call is_safe_attribute
def __getitem__(self, item):
"""Return a property as an attribute for jinja."""
if item in _COLLECTABLE_STATE_ATTRIBUTES:
# _collect_state inlined here for performance
if self._collect and _RENDER_INFO in self._hass.data:
self._hass.data[_RENDER_INFO].entities.add(self._state.entity_id)
return getattr(self._state, item)
if item == "entity_id":
return self._state.entity_id
if item == "state_with_unit":
return self.state_with_unit
raise KeyError
@property
def entity_id(self):
"""Wrap State.entity_id.
Intentionally does not collect state
"""
return self._state.entity_id
@property
def state(self):
"""Wrap State.state."""
self._collect_state()
return self._state.state
@property
def attributes(self):
"""Wrap State.attributes."""
self._collect_state()
return self._state.attributes
@property
def last_changed(self):
"""Wrap State.last_changed."""
self._collect_state()
return self._state.last_changed
@property
def last_updated(self):
"""Wrap State.last_updated."""
self._collect_state()
return self._state.last_updated
@property
def context(self):
"""Wrap State.context."""
self._collect_state()
return self._state.context
@property
def domain(self):
"""Wrap State.domain."""
self._collect_state()
return self._state.domain
@property
def object_id(self):
"""Wrap State.object_id."""
self._collect_state()
return self._state.object_id
@property
def name(self):
"""Wrap State.name."""
self._collect_state()
return self._state.name
@property
def state_with_unit(self) -> str:
"""Return the state concatenated with the unit if available."""
self._collect_state()
unit = self._state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
return f"{self._state.state} {unit}" if unit else self._state.state
def __eq__(self, other: Any) -> bool:
"""Ensure we collect on equality check."""
self._collect_state()
return self._state.__eq__(other)
def __repr__(self) -> str:
"""Representation of Template State."""
return f"<template TemplateState({self._state.__repr__()})>"
def _collect_state(hass: HomeAssistant, entity_id: str) -> None:
entity_collect = hass.data.get(_RENDER_INFO)
if entity_collect is not None:
entity_collect.entities.add(entity_id)
def _state_generator(hass: HomeAssistant, domain: str | None) -> Generator:
"""State generator for a domain or all states."""
for state in sorted(hass.states.async_all(domain), key=attrgetter("entity_id")):
yield TemplateState(hass, state, collect=False)
def _get_state_if_valid(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
state = hass.states.get(entity_id)
if state is None and not valid_entity_id(entity_id):
raise TemplateError(f"Invalid entity ID '{entity_id}'") # type: ignore
return _get_template_state_from_state(hass, entity_id, state)
def _get_state(hass: HomeAssistant, entity_id: str) -> TemplateState | None:
return _get_template_state_from_state(hass, entity_id, hass.states.get(entity_id))
def _get_template_state_from_state(
hass: HomeAssistant, entity_id: str, state: State | None
) -> TemplateState | None:
if state is None:
# Only need to collect if none, if not none collect first actual
# access to the state properties in the state wrapper.
_collect_state(hass, entity_id)
return None
return TemplateState(hass, state)
def _resolve_state(
hass: HomeAssistant, entity_id_or_state: Any
) -> State | TemplateState | None:
"""Return state or entity_id if given."""
if isinstance(entity_id_or_state, State):
return entity_id_or_state
if isinstance(entity_id_or_state, str):
return _get_state(hass, entity_id_or_state)
return None
def result_as_boolean(template_result: str | None) -> bool:
"""Convert the template result to a boolean.
True/not 0/'1'/'true'/'yes'/'on'/'enable' are considered truthy
False/0/None/'0'/'false'/'no'/'off'/'disable' are considered falsy
"""
try:
# Import here, not at top-level to avoid circular import
from homeassistant.helpers import ( # pylint: disable=import-outside-toplevel
config_validation as cv,
)
return cv.boolean(template_result)
except vol.Invalid:
return False
def expand(hass: HomeAssistant, *args: Any) -> Iterable[State]:
"""Expand out any groups into entity states."""
search = list(args)
found = {}
while search:
entity = search.pop()
if isinstance(entity, str):
entity_id = entity
entity = _get_state(hass, entity)
if entity is None:
continue
elif isinstance(entity, State):
entity_id = entity.entity_id
elif isinstance(entity, collections.abc.Iterable):
search += entity
continue
else:
# ignore other types
continue
if entity_id.startswith(_GROUP_DOMAIN_PREFIX):
# Collect state will be called in here since it's wrapped
group_entities = entity.attributes.get(ATTR_ENTITY_ID)
if group_entities:
search += group_entities
else:
_collect_state(hass, entity_id)
found[entity_id] = entity
return sorted(found.values(), key=lambda a: a.entity_id)
def device_entities(hass: HomeAssistant, device_id: str) -> Iterable[str]:
"""Get entity ids for entities tied to a device."""
entity_reg = entity_registry.async_get(hass)
entries = entity_registry.async_entries_for_device(entity_reg, device_id)
return [entry.entity_id for entry in entries]
def closest(hass, *args):
"""Find closest entity.
Closest to home:
closest(states)
closest(states.device_tracker)
closest('group.children')
closest(states.group.children)
Closest to a point:
closest(23.456, 23.456, 'group.children')
closest('zone.school', 'group.children')
closest(states.zone.school, 'group.children')
As a filter:
states | closest
states.device_tracker | closest
['group.children', states.device_tracker] | closest
'group.children' | closest(23.456, 23.456)
states.device_tracker | closest('zone.school')
'group.children' | closest(states.zone.school)
"""
if len(args) == 1:
latitude = hass.config.latitude
longitude = hass.config.longitude
entities = args[0]
elif len(args) == 2:
point_state = _resolve_state(hass, args[0])
if point_state is None:
_LOGGER.warning("Closest:Unable to find state %s", args[0])
return None
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Closest:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
entities = args[1]
else:
latitude = convert(args[0], float)
longitude = convert(args[1], float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Closest:Received invalid coordinates: %s, %s", args[0], args[1]
)
return None
entities = args[2]
states = expand(hass, entities)
# state will already be wrapped here
return loc_helper.closest(latitude, longitude, states)
def closest_filter(hass, *args):
"""Call closest as a filter. Need to reorder arguments."""
new_args = list(args[1:])
new_args.append(args[0])
return closest(hass, *new_args)
def distance(hass, *args):
"""Calculate distance.
Will calculate distance from home to a point or between points.
Points can be passed in using state objects or lat/lng coordinates.
"""
locations = []
to_process = list(args)
while to_process:
value = to_process.pop(0)
if isinstance(value, str) and not valid_entity_id(value):
point_state = None
else:
point_state = _resolve_state(hass, value)
if point_state is None:
# We expect this and next value to be lat&lng
if not to_process:
_LOGGER.warning(
"Distance:Expected latitude and longitude, got %s", value
)
return None
value_2 = to_process.pop(0)
latitude = convert(value, float)
longitude = convert(value_2, float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Distance:Unable to process latitude and longitude: %s, %s",
value,
value_2,
)
return None
else:
if not loc_helper.has_location(point_state):
_LOGGER.warning(
"Distance:State does not contain valid location: %s", point_state
)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
locations.append((latitude, longitude))
if len(locations) == 1:
return hass.config.distance(*locations[0])
return hass.config.units.length(
loc_util.distance(*locations[0] + locations[1]), LENGTH_METERS
)
def is_state(hass: HomeAssistant, entity_id: str, state: State) -> bool:
"""Test if a state is a specific value."""
state_obj = _get_state(hass, entity_id)
return state_obj is not None and state_obj.state == state
def is_state_attr(hass, entity_id, name, value):
"""Test if a state's attribute is a specific value."""
attr = state_attr(hass, entity_id, name)
return attr is not None and attr == value
def state_attr(hass, entity_id, name):
"""Get a specific attribute from a state."""
state_obj = _get_state(hass, entity_id)
if state_obj is not None:
return state_obj.attributes.get(name)
return None
def now(hass):
"""Record fetching now."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.now()
def utcnow(hass):
"""Record fetching utcnow."""
render_info = hass.data.get(_RENDER_INFO)
if render_info is not None:
render_info.has_time = True
return dt_util.utcnow()
def forgiving_round(value, precision=0, method="common"):
"""Round accepted strings."""
try:
# support rounding methods like jinja
multiplier = float(10 ** precision)
if method == "ceil":
value = math.ceil(float(value) * multiplier) / multiplier
elif method == "floor":
value = math.floor(float(value) * multiplier) / multiplier
elif method == "half":
value = round(float(value) * 2) / 2
else:
# if method is common or something else, use common rounding
value = round(float(value), precision)
return int(value) if precision == 0 else value
except (ValueError, TypeError):
# If value can't be converted to float
return value
def multiply(value, amount):
"""Filter to convert value to float and multiply it."""
try:
return float(value) * amount
except (ValueError, TypeError):
# If value can't be converted to float
return value
def logarithm(value, base=math.e):
"""Filter to get logarithm of the value with a specific base."""
try:
return math.log(float(value), float(base))
except (ValueError, TypeError):
return value
def sine(value):
"""Filter to get sine of the value."""
try:
return math.sin(float(value))
except (ValueError, TypeError):
return value
def cosine(value):
"""Filter to get cosine of the value."""
try:
return math.cos(float(value))
except (ValueError, TypeError):
return value
def tangent(value):
"""Filter to get tangent of the value."""
try:
return math.tan(float(value))
except (ValueError, TypeError):
return value
def arc_sine(value):
"""Filter to get arc sine of the value."""
try:
return math.asin(float(value))
except (ValueError, TypeError):
return value
def arc_cosine(value):
"""Filter to get arc cosine of the value."""
try:
return math.acos(float(value))
except (ValueError, TypeError):
return value
def arc_tangent(value):
"""Filter to get arc tangent of the value."""
try:
return math.atan(float(value))
except (ValueError, TypeError):
return value
def arc_tangent2(*args):
"""Filter to calculate four quadrant arc tangent of y / x."""
try:
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
return math.atan2(float(args[0]), float(args[1]))
except (ValueError, TypeError):
return args
def square_root(value):
"""Filter to get square root of the value."""
try:
return math.sqrt(float(value))
except (ValueError, TypeError):
return value
def timestamp_custom(value, date_format=DATE_STR_FORMAT, local=True):
"""Filter to convert given timestamp to format."""
try:
date = dt_util.utc_from_timestamp(value)
if local:
date = dt_util.as_local(date)
return date.strftime(date_format)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_local(value):
"""Filter to convert given timestamp to local date/time."""
try:
return dt_util.as_local(dt_util.utc_from_timestamp(value)).strftime(
DATE_STR_FORMAT
)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_utc(value):
"""Filter to convert given timestamp to UTC date/time."""
try:
return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def forgiving_as_timestamp(value):
"""Try to convert value to timestamp."""
try:
return dt_util.as_timestamp(value)
except (ValueError, TypeError):
return None
def strptime(string, fmt):
"""Parse a time string to datetime."""
try:
return datetime.strptime(string, fmt)
except (ValueError, AttributeError, TypeError):
return string
def fail_when_undefined(value):
"""Filter to force a failure when the value is undefined."""
if isinstance(value, jinja2.Undefined):
value()
return value
def forgiving_float(value):
"""Try to convert value to a float."""
try:
return float(value)
except (ValueError, TypeError):
return value
def regex_match(value, find="", ignorecase=False):
"""Match value using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.match(find, value, flags))
def regex_replace(value="", find="", replace="", ignorecase=False):
"""Replace using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
regex = re.compile(find, flags)
return regex.sub(replace, value)
def regex_search(value, find="", ignorecase=False):
"""Search using regex."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return bool(re.search(find, value, flags))
def regex_findall_index(value, find="", index=0, ignorecase=False):
"""Find all matches using regex and then pick specific match index."""
if not isinstance(value, str):
value = str(value)
flags = re.I if ignorecase else 0
return re.findall(find, value, flags)[index]
def bitwise_and(first_value, second_value):
"""Perform a bitwise and operation."""
return first_value & second_value
def bitwise_or(first_value, second_value):
"""Perform a bitwise or operation."""
return first_value | second_value
def base64_encode(value):
"""Perform base64 encode."""
return base64.b64encode(value.encode("utf-8")).decode("utf-8")
def base64_decode(value):
"""Perform base64 denode."""
return base64.b64decode(value).decode("utf-8")
def ordinal(value):
"""Perform ordinal conversion."""
return str(value) + (
list(["th", "st", "nd", "rd"] + ["th"] * 6)[(int(str(value)[-1])) % 10]
if int(str(value)[-2:]) % 100 not in range(11, 14)
else "th"
)
def from_json(value):
"""Convert a JSON string to an object."""
return json.loads(value)
def to_json(value):
"""Convert an object to a JSON string."""
return json.dumps(value)
@contextfilter
def random_every_time(context, values):
"""Choose a random value.
Unlike Jinja's random filter,
this is context-dependent to avoid caching the chosen value.
"""
return random.choice(values)
def relative_time(value):
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it will return None.
If the input are not a datetime object the input will be returned unmodified.
"""
if not isinstance(value, datetime):
return value
if not value.tzinfo:
value = dt_util.as_local(value)
if dt_util.now() < value:
return value
return dt_util.get_age(value)
def urlencode(value):
"""Urlencode dictionary and return as UTF-8 string."""
return urllib_urlencode(value).encode("utf-8")
def _render_with_context(
template_str: str, template: jinja2.Template, **kwargs: Any
) -> str:
"""Store template being rendered in a ContextVar to aid error handling."""
template_cv.set(template_str)
return template.render(**kwargs)
class LoggingUndefined(jinja2.Undefined):
"""Log on undefined variables."""
def _log_message(self):
template = template_cv.get() or ""
_LOGGER.warning(
"Template variable warning: %s when rendering '%s'",
self._undefined_message,
template,
)
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return super()._fail_with_undefined_error(*args, **kwargs)
except self._undefined_exception as ex:
template = template_cv.get() or ""
_LOGGER.error(
"Template variable error: %s when rendering '%s'",
self._undefined_message,
template,
)
raise ex
def __str__(self):
"""Log undefined __str___."""
self._log_message()
return super().__str__()
def __iter__(self):
"""Log undefined __iter___."""
self._log_message()
return super().__iter__()
def __bool__(self):
"""Log undefined __bool___."""
self._log_message()
return super().__bool__()
class TemplateEnvironment(ImmutableSandboxedEnvironment):
"""The Home Assistant template environment."""
def __init__(self, hass, limited=False, strict=False):
"""Initialise template environment."""
if not strict:
undefined = LoggingUndefined
else:
undefined = jinja2.StrictUndefined
super().__init__(undefined=undefined)
self.hass = hass
self.template_cache = weakref.WeakValueDictionary()
self.filters["round"] = forgiving_round
self.filters["multiply"] = multiply
self.filters["log"] = logarithm
self.filters["sin"] = sine
self.filters["cos"] = cosine
self.filters["tan"] = tangent
self.filters["asin"] = arc_sine
self.filters["acos"] = arc_cosine
self.filters["atan"] = arc_tangent
self.filters["atan2"] = arc_tangent2
self.filters["sqrt"] = square_root
self.filters["as_timestamp"] = forgiving_as_timestamp
self.filters["as_local"] = dt_util.as_local
self.filters["timestamp_custom"] = timestamp_custom
self.filters["timestamp_local"] = timestamp_local
self.filters["timestamp_utc"] = timestamp_utc
self.filters["to_json"] = to_json
self.filters["from_json"] = from_json
self.filters["is_defined"] = fail_when_undefined
self.filters["max"] = max
self.filters["min"] = min
self.filters["random"] = random_every_time
self.filters["base64_encode"] = base64_encode
self.filters["base64_decode"] = base64_decode
self.filters["ordinal"] = ordinal
self.filters["regex_match"] = regex_match
self.filters["regex_replace"] = regex_replace
self.filters["regex_search"] = regex_search
self.filters["regex_findall_index"] = regex_findall_index
self.filters["bitwise_and"] = bitwise_and
self.filters["bitwise_or"] = bitwise_or
self.filters["ord"] = ord
self.globals["log"] = logarithm
self.globals["sin"] = sine
self.globals["cos"] = cosine
self.globals["tan"] = tangent
self.globals["sqrt"] = square_root
self.globals["pi"] = math.pi
self.globals["tau"] = math.pi * 2
self.globals["e"] = math.e
self.globals["asin"] = arc_sine
self.globals["acos"] = arc_cosine
self.globals["atan"] = arc_tangent
self.globals["atan2"] = arc_tangent2
self.globals["float"] = forgiving_float
self.globals["as_local"] = dt_util.as_local
self.globals["as_timestamp"] = forgiving_as_timestamp
self.globals["relative_time"] = relative_time
self.globals["timedelta"] = timedelta
self.globals["strptime"] = strptime
self.globals["urlencode"] = urlencode
self.globals["max"] = max
self.globals["min"] = min
if hass is None:
return
# We mark these as a context functions to ensure they get
# evaluated fresh with every execution, rather than executed
# at compile time and the value stored. The context itself
# can be discarded, we only need to get at the hass object.
def hassfunction(func):
"""Wrap function that depend on hass."""
@wraps(func)
def wrapper(*args, **kwargs):
return func(hass, *args[1:], **kwargs)
return contextfunction(wrapper)
self.globals["device_entities"] = hassfunction(device_entities)
self.filters["device_entities"] = contextfilter(self.globals["device_entities"])
if limited:
# Only device_entities is available to limited templates, mark other
# functions and filters as unsupported.
def unsupported(name):
def warn_unsupported(*args, **kwargs):
raise TemplateError(
f"Use of '{name}' is not supported in limited templates"
)
return warn_unsupported
hass_globals = [
"closest",
"distance",
"expand",
"is_state",
"is_state_attr",
"state_attr",
"states",
"utcnow",
"now",
]
hass_filters = ["closest", "expand"]
for glob in hass_globals:
self.globals[glob] = unsupported(glob)
for filt in hass_filters:
self.filters[filt] = unsupported(filt)
return
self.globals["expand"] = hassfunction(expand)
self.filters["expand"] = contextfilter(self.globals["expand"])
self.globals["closest"] = hassfunction(closest)
self.filters["closest"] = contextfilter(hassfunction(closest_filter))
self.globals["distance"] = hassfunction(distance)
self.globals["is_state"] = hassfunction(is_state)
self.globals["is_state_attr"] = hassfunction(is_state_attr)
self.globals["state_attr"] = hassfunction(state_attr)
self.globals["states"] = AllStates(hass)
self.globals["utcnow"] = hassfunction(utcnow)
self.globals["now"] = hassfunction(now)
def is_safe_callable(self, obj):
"""Test if callback is safe."""
return isinstance(obj, AllStates) or super().is_safe_callable(obj)
def is_safe_attribute(self, obj, attr, value):
"""Test if attribute is safe."""
if isinstance(obj, (AllStates, DomainStates, TemplateState)):
return attr[0] != "_"
if isinstance(obj, Namespace):
return True
return super().is_safe_attribute(obj, attr, value)
def compile(self, source, name=None, filename=None, raw=False, defer_init=False):
"""Compile the template."""
if (
name is not None
or filename is not None
or raw is not False
or defer_init is not False
):
# If there are any non-default keywords args, we do
# not cache. In prodution we currently do not have
# any instance of this.
return super().compile(source, name, filename, raw, defer_init)
cached = self.template_cache.get(source)
if cached is None:
cached = self.template_cache[source] = super().compile(source)
return cached
_NO_HASS_ENV = TemplateEnvironment(None) # type: ignore[no-untyped-call]
| homeassistant/helpers/template.py | 49,660 | Class to expose all HA states as attributes.
Class to expose a specific HA domain as attributes.
Log on undefined variables.
Holds information about a template render.
Result wrapper class to store render result.
Class to hold a template and manage caching and rendering.
The Home Assistant template environment.
Class to represent a state object in a template.
Wrap a tuple.
Wrapper of a kls that can store render_result.
Log undefined __bool___.
Return the states.
Compare template with another.
Ensure we collect on equality check.
Return the domain state.
Return the states.
Return a property as an attribute for jinja.
Hash code for template.
Initialize a new tuple class.
Initialise.
Instantiate a template.
Initialize all states.
Initialize the domain states.
Initialize template state.
Initialise template environment.
Return all states.
Return the iteration over all the states.
Log undefined __iter___.
Return number of states.
Return number of states.
Create a new tuple class.
Representation of RenderInfo.
Representation of Template.
Representation of All States.
Representation of Domain States.
Representation of Template State.
Return string representation.
Log undefined __str___.
Bind a template to a specific hass instance.
Template should re-render if the entity state changes when we match specific domains or entities.
Template should re-render if the entity state changes when we match specific entities.
Template should re-render if the entity is added or removed with domains watched.
Parse the result.
Store template being rendered in a ContextVar to aid error handling.
Return state or entity_id if given.
State generator for a domain or all states.
Filter to get arc cosine of the value.
Filter to get arc sine of the value.
Filter to get arc tangent of the value.
Filter to calculate four quadrant arc tangent of y / x.
Render given template.
This method must be run in the event loop.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
Render the template and collect an entity filter.
Render template with value exposed.
If valid JSON will expose value_json too.
This method must be run in the event loop.
Recursively attach hass to all template instances in list and dict.
Wrap State.attributes.
Perform base64 denode.
Perform base64 encode.
Perform a bitwise and operation.
Perform a bitwise or operation.
Find closest entity.
Closest to home:
closest(states)
closest(states.device_tracker)
closest('group.children')
closest(states.group.children)
Closest to a point:
closest(23.456, 23.456, 'group.children')
closest('zone.school', 'group.children')
closest(states.zone.school, 'group.children')
As a filter:
states | closest
states.device_tracker | closest
['group.children', states.device_tracker] | closest
'group.children' | closest(23.456, 23.456)
states.device_tracker | closest('zone.school')
'group.children' | closest(states.zone.school)
Call closest as a filter. Need to reorder arguments.
Compile the template.
Wrap State.context.
Filter to get cosine of the value.
Get entity ids for entities tied to a device.
Calculate distance.
Will calculate distance from home to a point or between points.
Points can be passed in using state objects or lat/lng coordinates.
Wrap State.domain.
Return if template is valid.
Wrap State.entity_id.
Intentionally does not collect state
Expand out any groups into entity states.
Filter to force a failure when the value is undefined.
Try to convert value to timestamp.
Try to convert value to a float.
Round accepted strings.
Convert a JSON string to an object.
Generate a result wrapper.
Wrap function that depend on hass.
Test if data structure is a complex template.
Test if attribute is safe.
Test if callback is safe.
Test if a state is a specific value.
Test if a state's attribute is a specific value.
Check if the input is a Jinja2 template.
Wrap State.last_changed.
Wrap State.last_updated.
Filter to get logarithm of the value with a specific base.
Filter to convert value to float and multiply it.
Wrap State.name.
Record fetching now.
Wrap State.object_id.
Perform ordinal conversion.
Choose a random value.
Unlike Jinja's random filter,
this is context-dependent to avoid caching the chosen value.
Find all matches using regex and then pick specific match index.
Match value using regex.
Replace using regex.
Search using regex.
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it will return None.
If the input are not a datetime object the input will be returned unmodified.
Render given template.
If limited is True, the template is not allowed to access any function or filter depending on hass or the state machine.
Recursive template creator helper function.
Render template with value exposed.
If valid JSON will expose value_json too.
Results of the template computation.
Convert the template result to a boolean.
True/not 0/'1'/'true'/'yes'/'on'/'enable' are considered truthy
False/0/None/'0'/'false'/'no'/'off'/'disable' are considered falsy
Filter to get sine of the value.
Filter to get square root of the value.
Wrap State.state.
Get a specific attribute from a state.
Return the state concatenated with the unit if available.
Parse a time string to datetime.
Filter to get tangent of the value.
Filter to convert given timestamp to format.
Filter to convert given timestamp to local date/time.
Filter to convert given timestamp to UTC date/time.
Convert an object to a JSON string.
Urlencode dictionary and return as UTF-8 string.
Record fetching utcnow.
Template helper methods for rendering strings with Home Assistant data.
type: ignore mypy: allow-untyped-defs, no-check-untyped-defs Match "simple" ints and floats. -1.0, 1, +5, 5.0 Can't get set repr to work This is all magic to be allowed to subclass a tuple. pylint: disable=super-init-not-called type: ignore[no-untyped-call] Will be set sensibly once frozen. type: ignore[no-untyped-call] type: ignore[no-untyped-call] pylint: disable=no-self-use If the literal_eval result is a string, use the original render, by not returning right here. The evaluation of strings resulting in strings impacts quotes, to avoid unexpected output; use the original render instead of the evaluated one. Complex and scientific values are also unexpected. Filter them out. Filter out string and complex numbers Pass if not numeric and not a boolean Or it's a boolean (inherit from int) Or if it's a digit pylint: disable=broad-except type: ignore[no-untyped-call] pylint: disable=protected-access Jinja will try __getitem__ first and it avoids the need to call is_safe_attribute Jinja will try __getitem__ first and it avoids the need to call is_safe_attribute Inheritance is done so functions that check against State keep working pylint: disable=super-init-not-called Jinja will try __getitem__ first and it avoids the need to call is_safe_attribute _collect_state inlined here for performance type: ignore Only need to collect if none, if not none collect first actual access to the state properties in the state wrapper. Import here, not at top-level to avoid circular import pylint: disable=import-outside-toplevel ignore other types Collect state will be called in here since it's wrapped state will already be wrapped here We expect this and next value to be lat&lng support rounding methods like jinja if method is common or something else, use common rounding If value can't be converted to float If value can't be converted to float If timestamp can't be converted If timestamp can't be converted If timestamp can't be converted We mark these as a context functions to ensure they get evaluated fresh with every execution, rather than executed at compile time and the value stored. The context itself can be discarded, we only need to get at the hass object. Only device_entities is available to limited templates, mark other functions and filters as unsupported. If there are any non-default keywords args, we do not cache. In prodution we currently do not have any instance of this. type: ignore[no-untyped-call] | 8,315 | en | 0.760687 |
import threading, queue
import time
import random
import logging
logging.basicConfig(level=logging.DEBUG, format='(%(threadName)-9s) %(message)s',)
NUMBER_OF_THREADS = 4
TIMEOUT_SECONDS = 5
class SampleThread(threading.Thread):
def __init__(self, group=None, target=None, name=None, id=None, kwargs=None):
super().__init__(group=group, target=target, name=name)
self.id = id
self.kwargs = kwargs
self.queue = kwargs['queue']
return
def run(self):
# do some work here
logging.debug(f'Tunning thread id={self.id}')
r = random.uniform(0, 5)
time.sleep(r)
self.queue.put(f'Thread id={self.id} finished running in {r} seconds')
if __name__ == '__main__':
print('Starting threads')
# create a list to hold running SampleThread object instances
threads = list()
# build a single queue to send to all thread objects
q = queue.Queue()
for i in range(NUMBER_OF_THREADS):
t = SampleThread(id = i, kwargs={'queue':q})
t.start()
threads.append(t)
# wait until all threads are finished
logging.debug('Waiting for all threads to finish running')
[t.join() for t in threads]
logging.debug('All processes are finished running')
logging.debug('Results')
while not q.empty():
logging.debug(q.get())
| multithreading/multithreading_simple.py | 1,368 | do some work here create a list to hold running SampleThread object instances build a single queue to send to all thread objects wait until all threads are finished | 164 | en | 0.903764 |
'''
Class Name: File
Purpose: The purpose of this class is represent data of a particular file
in a file system.
'''
class File:
def __init__(self, name = None, directory = None, date = None, fId = None, folderId = None, extension = ""):
self.__name = name
self.__directory = directory
self.__date = date
self.__id = fId
self.__folderId = folderId
self.__mimeType = extension
def __repr__(self):
return self.getName
'''
Name: getName
Purpose: A getter method for the name of the file.
return: private attribute __name
'''
@property
def getName(self):
return self.__name
'''
Name: getDir
Purpose: a getter method for the name of the directory the file is in.
return: private attribute __directory
'''
@property
def getDir(self):
return self.__directory
'''
Name: getLastModified
Purpose: a getter method for the date that the file was last modified at
return: private attribute __date
'''
@property
def getLastModified(self):
return self.__date
'''
Name: getDetails
Purpose: Returns the full file address of a file object.
return: a string representing the full file details
'''
def getDetails(self):
return self.getDir + self.getName
@property
def getFileId(self):
return self.__id
@property
def getFolderId(self):
return self.__folderId
@property
def getMimeType(self):
return self.__mimeType | scripts/File.py | 1,380 | Class Name: File
Purpose: The purpose of this class is represent data of a particular file
in a file system. | 125 | en | 0.919856 |
import csv
import numpy as np
import os
import sys
import time
import jismesh.utils as ju
import pandas as pd
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
from common.datastructure.Point import Point
from common.datastructure.Mesh import Mesh
# meshTokyo = Mesh('tokyo','500m')
# GRIDNUMBER = meshTokyo.lonNum * meshTokyo.latNum
# print(meshTokyo.size, GRIDNUMBER)
# InterpolatedStep = 12
def getTimestamps(fileName):
last_tid = ''
D = []
with open(fileName, "r") as rf:
reader = csv.reader(rf)
for line in reader:
tid = line[0]
if last_tid != '' and last_tid != tid:
break
timestamp = line[1]
D.append(timestamp)
last_tid = tid
return D
def getMesh(mesh, readFileName, writeFileName):
cnt = 0
wf = open(writeFileName, 'w')
with open(readFileName, 'r') as rf:
for line in csv.reader(rf):
if cnt % 1000000 == 0:
print(cnt)
tid = line[0]
timestamp = line[1]
p = Point(float(line[2]), float(line[3]))
meshid = mesh.inWhichGrid(p)
wf.write(','.join([tid, timestamp, str(meshid)])+'\n')
cnt += 1
wf.close()
def genMeshDynamic(mesh, fileName, meshFileName):
MD = {}
with open(fileName, "r") as rf:
reader = csv.reader(rf)
for line in reader:
tid = line[0]
timestamp = line[1]
meshid = line[2]
key = (timestamp, meshid)
if key in MD:
MD[key].add(tid)
else:
MD[key] = set(tid)
wf = open(meshFileName, 'w')
Timestamps = getTimestamps(fileName)
for ts in Timestamps:
for meshid in range(mesh.lonNum * mesh.latNum):
key = (ts, str(meshid))
if key in MD:
value = len(MD[key])
else:
value = 0
wf.write(','.join([key[0], key[1], str(value)]) + '\n')
wf.close()
def getGrids(fileName):
last_tid = ''
G = []
with open(fileName, "r") as rf:
reader = csv.reader(rf)
for line in reader:
tid = line[0]
if last_tid != '' and last_tid != tid:
break
grid = line[1]
G.append(grid)
last_tid = tid
return G
def getDynamicMesh_mobmap(trajFileName, dynamicFileName, meshcode_level):
Timestamps = getTimestamps(trajFileName)
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
print('getDynamicMesh Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append({})
with open(trajFileName, 'r') as rf:
reader = csv.reader(rf)
for line in reader:
# tid = line[0]
timestamp = line[1]
lon = float(line[2])
lat = float(line[3])
meshcode = ju.to_meshcode(lat, lon, meshcode_level)
if meshcode in R[TS[timestamp]]:
R[TS[timestamp]][meshcode] += 1
else:
R[TS[timestamp]][meshcode] = 1
print('getDynamicMesh Count Ended : ', time.ctime())
with open(dynamicFileName, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level))
for i in range(len(R)):
timestamp = Timestamps[i]
for key in R[i]:
meshcode = key
meshpop = R[i][meshcode]
wf.write(','.join([timestamp, meshcode, str(meshpop)]) + '\n')
print('getDynamicMesh Ended : ', time.ctime())
def getDynamicMeshMobmap(trajFileName, dynamicFileName, meshcode_level):
Timestamps = getTimestamps(trajFileName)
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
print('getDynamicMesh Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append({})
with open(trajFileName, 'r') as rf:
reader = csv.reader(rf)
for line in reader:
# tid = line[0]
timestamp = line[1]
lon = float(line[2])
lat = float(line[3])
meshcode = ju.to_meshcode(lat, lon, meshcode_level)
if meshcode in R[TS[timestamp]]:
R[TS[timestamp]][meshcode] += 1
else:
R[TS[timestamp]][meshcode] = 1
with open(dynamicFileName, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level))
for i in range(len(R)):
timestamp = Timestamps[i]
for key in R[i]:
meshcode = key
meshpop = R[i][meshcode]
wf.write(','.join([timestamp, meshcode, str(meshpop)]) + '\n')
print('getDynamicMesh Ended : ', time.ctime())
def getRfromDynamicMeshMobmap(meshcode_level, dynamicFileName, dynamicFileName1, dynamicFileName2):
df1 = pd.read_csv(dynamicFileName, header=None, skiprows=2)
df1.iloc[:,2] = np.log10(df1.iloc[:,2]+1) * 100
df2 = pd.read_csv(dynamicFileName, header=None, skiprows=2)
df2.iloc[:, 2] = np.log(df2.iloc[:,2]+1) * 100
with open(dynamicFileName1, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level) + '\n')
with open(dynamicFileName2, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level) + '\n')
df1.to_csv(dynamicFileName1, header=False, index=False, mode='a')
df2.to_csv(dynamicFileName2, header=False, index=False, mode='a')
def getDynamicMeshMobmapR(R, trajFileName, dynamicFileName, meshcode_level):
Timestamps = getTimestamps(trajFileName)
print('getDynamicMesh Count Ended : ', time.ctime())
with open(dynamicFileName, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level))
for i in range(len(R)):
timestamp = Timestamps[i]
for key in R[i]:
meshcode = key
meshpop = R[i][meshcode]
wf.write(','.join([timestamp, meshcode, str(meshpop)]) + '\n')
print('getDynamicMesh Ended : ', time.ctime())
def genMeshDynamicTimeInterval(fileName, meshFileName, startTimestamp, endTimestamp):
Timestamps = getTimestamps(fileName)
startIndex = Timestamps.index(startTimestamp)
endIndex = Timestamps.index(endTimestamp)
Interval = [Timestamps[t] for t in range(startIndex, endIndex)]
def strHH(timestamp):
return timestamp[11:13] + timestamp[14:16]
wf = open(meshFileName[:-4] + '_' + strHH(startTimestamp) + '_' + strHH(endTimestamp) + '.csv', 'w')
with open(meshFileName, 'r') as rf:
for line in csv.reader(rf):
if line[0] in Interval:
wf.write(','.join(line) + '\n')
else:
pass
wf.close()
def genMeshDynamicTimeInterval_Mobmap(fileName, meshFileName, startTimestamp, endTimestamp):
Timestamps = getTimestamps(fileName)
startIndex = Timestamps.index(startTimestamp)
endIndex = Timestamps.index(endTimestamp)
Interval = [Timestamps[t] for t in range(startIndex, endIndex)]
def strHH(timestamp):
return timestamp[11:13] + timestamp[14:16]
wf = open(meshFileName[:-4] + '_' + strHH(startTimestamp) + '_' + strHH(endTimestamp) + '.csv', 'w')
with open(meshFileName, 'r') as rf:
for line in csv.reader(rf):
if line[0] == '@dynamic-mesh' or '"@use-mesh-code':
wf.write(line + '\n')
if line[0] in Interval:
wf.write(','.join(line) + '\n')
else:
pass
wf.close()
def genMeshDynamicMobmap(mesh, meshFileName, mobmapFile, timestamp):
wf = open(mobmapFile, 'w')
wf.write('@static-mesh' + '\n')
wf.write(','.join([str(x) for x in
[mesh.minLat, mesh.minLon, mesh.dLat, mesh.dLon]]) + '\n')
with open(meshFileName, 'r') as rf:
for line in csv.reader(rf):
if timestamp != line[0]:
continue
else:
meshid = line[1]
number = line[2]
xi, yi = mesh.Index[int(meshid)]
wf.write(','.join([str(item) for item in [yi, xi, number]]) + '\n')
wf.close()
def loadGTrajectory(fileName):
print('loadTrajectory Started : ', time.ctime())
TDB = {}
with open(fileName, 'r') as rf:
reader = csv.reader(rf)
for line in reader:
tid = line[0]
# timestamp = line[1]
meshid = line[2]
if tid in TDB:
TDB[tid].append(meshid)
else:
TDB[tid] = [meshid]
print('loadTrajectory Ended : ', time.ctime())
return TDB
def getINDEX(mesh, gTrajFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
print('getTrajectoryINDEX Started : ', time.ctime())
Timestamps = getTimestamps(gTrajFileName)
print('timestamps...', len(Timestamps))
TDB = loadGTrajectory(gTrajFileName)
INDEX = []
for i in range(len(Timestamps)):
INDEX.append([])
for G in range(GRIDNUMBER):
INDEX[i].append(set()) # set().add
# print(np.array(INDEX).shape)
for tid in TDB:
traj = TDB[tid]
for i in range(len(traj)):
HH = i
if traj[i] == 'None':
pass
else:
gid = int(traj[i])
INDEX[HH][gid].add(tid) # set().add
return INDEX
def getGridImageIndex(mesh, window=15):
GRIDNUMBER = mesh.lonNum * mesh.latNum
IMG = []
for g in range(GRIDNUMBER):
R = np.zeros((window, window), dtype='int32')
current_x, current_y = mesh.Index[g]
start = 0 - window // 2
end = window + start
for i, dx in enumerate(list(range(start, end))):
for j, dy in enumerate(list(range(start, end))):
x = current_x + dx
y = current_y + dy
if mesh.inMesh(x, y):
grid = mesh.ReverseIndex[(x, y)]
R[j][i] = grid
else:
R[j][i] = -1
R = R[::-1, :]
IMG.append(R)
return IMG
def genGridTransit(mesh, gTrajFileName, transitFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
print('genGridTransit Started : ', time.ctime())
transitWriteFile = open(transitFileName, 'w')
INDEX = getINDEX(mesh, gTrajFileName)
Timestamps = getTimestamps(gTrajFileName)
GridImageIndex = getGridImageIndex(mesh)
print('INDEX, Timestamps, GridImageIndex have been prepared.', time.ctime())
for i in range(len(Timestamps) - 1):
for j in range(GRIDNUMBER):
cur_time = i
next_time = i + 1
cur_grid = j
transitgrids = GridImageIndex[cur_grid]
Transit = np.zeros(transitgrids.shape, dtype='int32')
for ii in range(transitgrids.shape[0]):
for jj in range(transitgrids.shape[1]):
next_grid = transitgrids[ii][jj]
if next_grid != -1:
trajfirst = INDEX[cur_time][cur_grid]
trajsecond = INDEX[next_time][next_grid]
transit_num = len(trajfirst & trajsecond)
Transit[ii][jj] = transit_num
else:
pass
FlattedTransit = Transit.reshape(-1).tolist()
lineitem = [str(i), str(j)]
lineitem.extend([str(t) for t in FlattedTransit])
line = ','.join(lineitem) + '\n'
transitWriteFile.write(line)
print('genGridTransit timestamp: ', i)
transitWriteFile.close()
print('genGridTransit Ended: ', time.ctime())
# This grid transit version is for 1minutes trajectory, more accurate, not for 5minutes.
# !!!!!!!!!!!!!!!!!!!! 1 minute trajectory data.
# TT is supposed to be 288 not 289 because it is interval.
def genGridTransit_5minutes_from_1minute(mesh, gTrajFileName, transitFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
print('genGridTransit Started : ', time.ctime())
transitWriteFile = open(transitFileName, 'w')
INDEX = getINDEX(mesh, gTrajFileName)
# Timestamps = getTimestamps(gTrajFileName)
GridImageIndex = getGridImageIndex(mesh)
print('INDEX, Timestamps, GridImageIndex have been prepared.', time.ctime())
TT, SPAN = 24 * 12, 5
for i in range(TT):
for j in range(GRIDNUMBER):
cur_time = i
cur_grid = j
transitgrids = GridImageIndex[cur_grid]
Transit = np.zeros(transitgrids.shape, dtype='int32')
for ii in range(transitgrids.shape[0]):
for jj in range(transitgrids.shape[1]):
next_grid = transitgrids[ii][jj]
if next_grid != -1:
cur_time_start = cur_time * SPAN
cur_time_end = (cur_time + 1) * SPAN + 1
SS = set()
for pp in range(cur_time_start, cur_time_end):
trajfirst = INDEX[pp][cur_grid]
for qq in range(pp, cur_time_end):
trajsecond = INDEX[qq][next_grid]
SS.update(trajfirst & trajsecond)
transit_num = len(SS)
Transit[ii][jj] = transit_num
else:
pass
FlattedTransit = Transit.reshape(-1).tolist()
lineitem = [str(i), str(j)]
lineitem.extend([str(t) for t in FlattedTransit])
line = ','.join(lineitem) + '\n'
transitWriteFile.write(line)
print('genGridTransit timestamp: ', i)
transitWriteFile.close()
print('genGridTransit Ended: ', time.ctime())
def getGridTransit(mesh, gTrajFileName, transitFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
Timestamps = getTimestamps(gTrajFileName)
TIMENUMBER = len(Timestamps) - 1 # -1 is because of transit
print('getGridTransit Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(transitFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = int(line[0])
grid = int(line[1])
R[timestamp][grid] = line[2:]
R = np.array(R, dtype='int32') # 144, 6000, 225
R = R.reshape(R.shape[0], mesh.lonNum, mesh.latNum, R.shape[2])
R = np.swapaxes(R, 2, 1)
R = R[:, ::-1, :, :] # 144, 75, 80, 225
return R
def getGridPop(mesh, gTrajFileName, popFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
Timestamps = getTimestamps(gTrajFileName)
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
print('getGridPop Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(popFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = TS[line[0]]
grid = int(line[1])
R[timestamp][grid] = int(line[2])
R = np.array(R, dtype='int32') # shape 145, 6000
R = R.reshape(R.shape[0], int(R.shape[1] ** 0.5), int(R.shape[1] ** 0.5), 1)
R = np.swapaxes(R, 2, 1)
R = R[:, ::-1, :, :] # shape 145, 80, 80, 1
return R
def getGridPopPartition(R, M, K):
# Original 8*8 matrix N = 8 = M*K
# M = 4 # M*M sub matrix
# K = 2 # each sub matrix has the size of K * K
P = []
for i in range(M):
for j in range(M):
P.append(R[:, i*K:i*K+K, j*K:j*K+K, :])
return np.array(P)
def getGridPop2DNumpy(mesh, gTrajFileName, popFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
Timestamps = getTimestamps(gTrajFileName)
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
print('getGridPop Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(popFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = TS[line[0]]
grid = int(line[1])
R[timestamp][grid] = int(line[2])
R = np.array(R, dtype='int32') # shape 145, 6000
return R
def getGridPopTimeInterval(mesh, popFileName):
print('getGridPop', popFileName, time.ctime())
GRIDNUMBER = mesh.lonNum * mesh.latNum
Timestamps = []
lastTimestamp = ''
with open(popFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = line[0]
if timestamp != lastTimestamp:
Timestamps.append(timestamp)
lastTimestamp = timestamp
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(popFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = TS[line[0]]
grid = int(line[1])
R[timestamp][grid] = int(line[2])
R = np.array(R, dtype='int32') # shape 145, 6000
R = R.reshape(R.shape[0], int(R.shape[1] ** 0.5), int(R.shape[1] ** 0.5), 1)
R = np.swapaxes(R, 2, 1)
R = R[:, ::-1, :, :] # shape 145, 75, 80, 1
return R
def getGridTransitTimeInterval(mesh, transitFileName):
print('getGridTransit Started : ', transitFileName, time.ctime())
GRIDNUMBER = mesh.lonNum * mesh.latNum
# Timestamps = []
# lastTimestamp = ''
# with open(transitFileName, 'r') as rf:
# tansistReader = csv.reader(rf)
# for line in tansistReader:
# timestamp = line[0]
# if timestamp != lastTimestamp:
# Timestamps.append(timestamp)
# lastTimestamp = timestamp
# TIMENUMBER = len(Timestamps)
TIMENUMBER = 24 * 12
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(transitFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = int(line[0])
grid = int(line[1])
R[timestamp][grid] = line[2:]
R = np.array(R, dtype='int32') # 144, 6000, 225
R = R.reshape(R.shape[0], mesh.lonNum, mesh.latNum, R.shape[2])
R = np.swapaxes(R, 2, 1)
R = R[:, ::-1, :, :] # 144, 75, 80, 225
return R
def shuffleTrainValidateTest(InterpolatedStep, path, fileName, R, testRate=0.2):
TIMESTEP = InterpolatedStep * 2
Sequence = []
for i in range(R.shape[0] - TIMESTEP):
Sequence.append(R[i:i+TIMESTEP, :, :, :])
Sequence = np.array(Sequence, dtype='int32')
INDEX = list(range(len(Sequence)))
np.random.shuffle(INDEX)
np.random.shuffle(INDEX)
np.random.shuffle(INDEX)
trainINDEX = INDEX[:int(len(INDEX) * (1 - testRate))]
testINDEX = INDEX[int(len(INDEX) * (1 - testRate)):]
train = Sequence[trainINDEX]
test = Sequence[testINDEX]
np.save(path + 'train_' + fileName, train)
np.save(path + 'test_' + fileName, test)
print(train.shape, test.shape)
# trainINDEX = INDEX[:int(len(INDEX) * (1 - testRate - validateRate))]
# validateINDEX = INDEX[int(len(INDEX) * (1 - testRate - validateRate)):int(len(INDEX) * (1 - testRate))]
# testINDEX = INDEX[int(len(INDEX) * (1 - testRate)):]
# train = Sequence[trainINDEX]
# validate = Sequence[validateINDEX]
# test = Sequence[testINDEX]
# np.save(path + 'train_' + fileName, train)
# np.save(path + 'validate_' + fileName, validate)
# np.save(path + 'test_' + fileName, test)
# print(train.shape, validate.shape, test.shape)
# or directly return not save to file because just too big.
# return train, validate, test
def getShuffledTrainTest(path, fileName, TrainTest):
return np.load(path + TrainTest + '_' + fileName + '.npy')
def testcode(mesh):
GRIDNUMBER = mesh.lonNum * mesh.latNum
window = 5
R = np.zeros((window, window), dtype='int32')
center = mesh.ReverseIndex[(2,2)]
current_x, current_y = mesh.Index[center]
start = 0 - window // 2
end = window + start
for i, dx in enumerate(list(range(start, end))):
for j, dy in enumerate(list(range(start, end))):
x = current_x + dx
y = current_y + dy
if mesh.inMesh(x, y):
grid = mesh.ReverseIndex[(x, y)]
R[j][i] = grid
else:
R[j][i] = -1
R = R[::-1, :]
print(R)
for i in range(len(R)):
print(R[i])
for i in range(len(R)):
print(R[i][0], R[i][1], R[i][2], R[i][3], R[i][4])
T = R.reshape(-1)
print(T.tolist())
P = T.reshape(window, window)
print(P)
print(R.shape)
print(R[54][4178])
print(np.max(R) == 3369)
print(mesh.Index[3369])
x, y = mesh.Index[3369]
lon, lat = mesh.minLon + (x + 0.5) * mesh.dLon, \
mesh.minLat + (y + 0.5) * mesh.dLat
print(lon, lat)
print(mesh.lonNum, mesh.latNum)
T = np.array(range(GRIDNUMBER))
T = T.reshape(mesh.lonNum, mesh.latNum)
T = np.swapaxes(T, 1, 0)
T = T[::-1, :]
print(T)
print(T.shape)
def run5min201802(mesh, dataPATH, dates):
print('Now is getting trainig XS and YS...', dates)
# timestamp = '2011-10-20 09:00:00'
# filenameTime = timestamp[0:4] + timestamp[5:7] + timestamp[8:10] \
# + timestamp[11:13] + timestamp[14:16] + timestamp[17:19]
# print(filenameTime)
for date in dates:
# first step: from trajectory point to mesh
getMesh(dataPATH + date + 'tokyo_interpo5min.csv',
dataPATH + date + 'tokyo_' + mesh.size + '_5min.csv')
# second step: calculate mesh population at each timestamp
genMeshDynamic(dataPATH + date + 'tokyo_' + mesh.size + '_5min.csv',
dataPATH + date + 'tokyo_' + mesh.size + '_5min_pop.csv')
# fourth step: mesh transit between two consecutive timestamps
genGridTransit(dataPATH + date + 'tokyo_' + mesh.size + '_5min.csv',
dataPATH + date + 'tokyo_' + mesh.size + '_5min_transit.csv')
def getHHTransit(HH):
assert HH <= 22, 'Hour should not be over 22.'
dataPATH = '../interpo_data/'
date = '20111020'
R = getGridTransit(dataPATH + date + 'tokyo_meshtransit10min_1min_15.csv')
# (144, 72, 80, 225)
R = R[HH*6:HH*6+6, :, :, :]
# (6, 72, 80, 225)
R = R.reshape(R.shape[0], -1, R.shape[-1])
# (6, 5760, 225)
R = R.transpose(1, 0, 2)
# (5760, 6, 225)
R = R.reshape(R.shape[0], R.shape[1], int(R.shape[2]**0.5), int(R.shape[2]**0.5), 1)
return R
def runCrowdDensity():
dataPATH = '../interpo_data/'
meshTokyo = Mesh('tokyo', '500m')
#meshcode_level = 4
alldates = ["20110217","20110218","20110219","20110220", "20110221",
"20110222","20110223", "20110224", "20110225", "20110226", "20110227"]
for date in alldates:
print('this is date', date)
getMesh(meshTokyo, dataPATH + date + 'tokyo_interpo5min.csv',
dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min.csv')
genMeshDynamic(meshTokyo, dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min.csv',
dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min_pop.csv')
# def runCrowdFlow_from5min():
# from common.dataparam.Param import alldates
# dataPATH = '../interpo_data/'
# meshTokyo = Mesh('tokyo', '500m')
# #meshcode_level = 4
#
# for date in alldates:
# print('this is date', date)
# genGridTransit(meshTokyo,
# dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min.csv',
# dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min_transit_from5min.csv')
# paper crowd flow is from 1min.!!!!!!!!!!!!
def runCrowdFlow():
dataPATH = '../interpo_data/'
meshTokyo = Mesh('tokyo', '500m')
#meshcode_level = 4
alldates = ["20110217", "20110218", "20110219", "20110220", "20110221",
"20110222", "20110223", "20110224", "20110225", "20110226", "20110227"]
for date in alldates:
print('this is date', date)
getMesh(meshTokyo, dataPATH + date + 'tokyo_interpo1min.csv',
dataPATH + date + 'tokyo_' + meshTokyo.size + '_1min.csv')
genGridTransit_5minutes_from_1minute(meshTokyo,
dataPATH + date + 'tokyo_' + meshTokyo.size + '_1min.csv',
dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min_transit.csv')
def main():
runCrowdDensity()
if __name__ == '__main__':
main() | meshdynamic/meshDynamic-Density.py | 26,018 | meshTokyo = Mesh('tokyo','500m') GRIDNUMBER = meshTokyo.lonNum * meshTokyo.latNum print(meshTokyo.size, GRIDNUMBER) InterpolatedStep = 12 tid = line[0] tid = line[0] timestamp = line[1] set().add print(np.array(INDEX).shape) set().add This grid transit version is for 1minutes trajectory, more accurate, not for 5minutes. !!!!!!!!!!!!!!!!!!!! 1 minute trajectory data. TT is supposed to be 288 not 289 because it is interval. Timestamps = getTimestamps(gTrajFileName) -1 is because of transit 144, 6000, 225 144, 75, 80, 225 shape 145, 6000 shape 145, 80, 80, 1 Original 8*8 matrix N = 8 = M*K M = 4 M*M sub matrix K = 2 each sub matrix has the size of K * K shape 145, 6000 shape 145, 6000 shape 145, 75, 80, 1 Timestamps = [] lastTimestamp = '' with open(transitFileName, 'r') as rf: tansistReader = csv.reader(rf) for line in tansistReader: timestamp = line[0] if timestamp != lastTimestamp: Timestamps.append(timestamp) lastTimestamp = timestamp TIMENUMBER = len(Timestamps) 144, 6000, 225 144, 75, 80, 225 trainINDEX = INDEX[:int(len(INDEX) * (1 - testRate - validateRate))] validateINDEX = INDEX[int(len(INDEX) * (1 - testRate - validateRate)):int(len(INDEX) * (1 - testRate))] testINDEX = INDEX[int(len(INDEX) * (1 - testRate)):] train = Sequence[trainINDEX] validate = Sequence[validateINDEX] test = Sequence[testINDEX] np.save(path + 'train_' + fileName, train) np.save(path + 'validate_' + fileName, validate) np.save(path + 'test_' + fileName, test) print(train.shape, validate.shape, test.shape) or directly return not save to file because just too big. return train, validate, test timestamp = '2011-10-20 09:00:00' filenameTime = timestamp[0:4] + timestamp[5:7] + timestamp[8:10] \ + timestamp[11:13] + timestamp[14:16] + timestamp[17:19] print(filenameTime) first step: from trajectory point to mesh second step: calculate mesh population at each timestamp fourth step: mesh transit between two consecutive timestamps (144, 72, 80, 225) (6, 72, 80, 225) (6, 5760, 225) (5760, 6, 225)meshcode_level = 4 def runCrowdFlow_from5min(): from common.dataparam.Param import alldates dataPATH = '../interpo_data/' meshTokyo = Mesh('tokyo', '500m') meshcode_level = 4 for date in alldates: print('this is date', date) genGridTransit(meshTokyo, dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min.csv', dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min_transit_from5min.csv') paper crowd flow is from 1min.!!!!!!!!!!!!meshcode_level = 4 | 2,587 | en | 0.568947 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test the quoted APOGEE uncertainties from individual (rebinned) spectra. """
__author__ = "Andy Casey <arc@ast.cam.ac.uk>"
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from glob import glob
from itertools import combinations
def get_differences(apStar_filename):
image = fits.open(apStar_filename)
N_visits = image[0].header["NVISITS"]
data_index = 1
error_index = 2
mask_index = 3
# Generate all permutations.
differences = []
for i, j in combinations(range(N_visits), 2):
di = image[data_index].data[i + 2, :]
dj = image[data_index].data[j + 2, :]
sigma = np.sqrt(image[error_index].data[i + 2, :]**2 \
+ image[error_index].data[j + 2, :]**2)
ok = (di > 0) * (dj > 0) * np.isfinite(di * dj * sigma) \
* (image[mask_index].data[i + 2, :] == 0) \
* (image[mask_index].data[j + 2, :] == 0)
differences.extend(((di - dj)/sigma)[ok])
differences = np.array(differences).flatten()
return differences
def plot_differences(differences):
fig, ax = plt.subplots(1)
y_bin, x_bin, _ = ax.hist(differences, bins=100, facecolor="#666666")
x = np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 1000)
y = np.exp(-0.5*x**2)/np.sqrt(2*np.pi)
ax.plot(x, y*np.trapz(y_bin, x=x_bin[1:])/np.sqrt(2*np.pi), lw=2, c="r")
ax.set_title("mu = {0:.1f}, sigma(|d|) = {1:.1f}".format(
np.median(differences), np.std(np.abs(differences))))
ax.set_xlabel("(F1 - F2)/sqrt(sigma_1^2 + sigma_2^2)")
return fig
if __name__ == "__main__":
filenames = glob("APOGEE/*.fits")
all_differences = []
for filename in filenames:
differences = get_differences(filename)
if len(differences) > 0:
fig = plot_differences(differences)
fig.savefig("APOGEE/{0}.png".format(filename.split("/")[-1].split(".")[0]))
plt.close("all")
print(filename)
all_differences.extend(differences)
fig = plot_differences(np.array(all_differences))
fig.savefig("APOGEE/all.png")
| data/check_apogee_spectra.py | 2,167 | Test the quoted APOGEE uncertainties from individual (rebinned) spectra.
!/usr/bin/env python -*- coding: utf-8 -*- Generate all permutations. | 144 | en | 0.687536 |
from __future__ import annotations
import abc
from dataclasses import asdict as asdict_, fields, is_dataclass
from pathlib import Path
from typing import Dict, Union, Tuple
from pprint import pformat
from covid_shared import ihme_deps
import numpy as np
import pandas as pd
import yaml
class YamlIOMixin:
"""Mixin for reading and writing yaml files."""
@staticmethod
def _coerce_path(path: Union[str, Path]) -> Path:
path = Path(path)
if path.suffix not in ['.yaml', '.yml']:
raise ValueError('Path must point to a yaml file. '
f'You provided {str(path)}')
return path
@classmethod
def _load(cls, path: Union[str, Path]):
path = cls._coerce_path(path)
with path.open() as f:
data = yaml.full_load(f)
return data
@classmethod
def _dump(cls, data, path: Union[str, Path]) -> None:
path = cls._coerce_path(path)
with path.open('w') as f:
yaml.dump(data, f, sort_keys=False)
class Specification(YamlIOMixin):
"""Generic class for pipeline stage specifications."""
@classmethod
def from_path(cls, specification_path: Union[str, Path]) -> Specification:
"""Builds the specification from a file path."""
spec_dict = cls._load(specification_path)
return cls.from_dict(spec_dict)
@classmethod
def from_dict(cls, spec_dict: Dict) -> Specification:
"""Builds the specification from a dictionary."""
args = cls.parse_spec_dict(spec_dict)
return cls(*args)
@classmethod
@abc.abstractmethod
def parse_spec_dict(cls, specification: Dict) -> Tuple:
"""Parses a dict representation of the specification into init args."""
raise NotImplementedError
@abc.abstractmethod
def to_dict(self) -> Dict:
"""Coerce the specification to a dict."""
raise NotImplementedError
def dump(self, path: Union[str, Path]) -> None:
"""Writes this specification to a file."""
data = self.to_dict()
self._dump(data, path)
def __repr__(self):
return f'{self.__class__.__name__}(\n{pformat(self.to_dict())}\n)'
def asdict(data_class) -> Dict:
"""Type coerce items for easy serialization"""
data = asdict_(data_class)
out = {}
for k, v in data.items():
if isinstance(v, tuple):
out[k] = list(v)
elif isinstance(v, np.ndarray):
out[k] = v.tolist()
else:
out[k] = v
return out
def filter_to_spec_fields(spec_dict: dict, specification):
if is_dataclass(specification):
return {
k: v for k, v in spec_dict.items()
if k in [f.name for f in fields(specification)]
}
else:
return spec_dict
def load_location_hierarchy(location_set_version_id: int = None,
location_file: Path = None, **kwargs):
assert ((location_set_version_id and not location_file)
or (not location_set_version_id and location_file))
if location_set_version_id:
return ihme_deps.get_location_hierarchy_by_version(
location_set_version_id=location_set_version_id,
)
else:
return pd.read_csv(location_file)
| src/covid_model_seiir_pipeline/lib/utilities.py | 3,287 | Generic class for pipeline stage specifications.
Mixin for reading and writing yaml files.
Type coerce items for easy serialization
Writes this specification to a file.
Builds the specification from a dictionary.
Builds the specification from a file path.
Parses a dict representation of the specification into init args.
Coerce the specification to a dict. | 357 | en | 0.854518 |
import tensorflow as tf
from tensorflow.contrib.losses.python.metric_learning.metric_loss_ops import pairwise_distance
def dist_weighted_sampling(labels, embeddings, high_var_threshold=0.5, nonzero_loss_threshold=1.4, neg_multiplier=1):
"""
Distance weighted sampling.
# References
- [sampling matters in deep embedding learning]
(https://arxiv.org/abs/1706.07567)
# Arguments:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
multi-class integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should
be l2 normalized.
high_var_threshold: float. cutoff for high gradient variance.
nonzero_loss_threshold: float. cutoff for non-zero loss zone.
neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.
Returns:
a_indices: indices of anchors.
anchors: sampled anchor embeddings.
positives: sampled positive embeddings.
negatives: sampled negative embeddings.
"""
if not isinstance(neg_multiplier, int):
raise ValueError("`neg_multiplier` must be an integer.")
n = tf.size(labels)
if not isinstance(embeddings, tf.Tensor):
embeddings = tf.convert_to_tensor(embeddings)
d = embeddings.shape[1].value
distances = pairwise_distance(embeddings, squared=False)
# cut off to void high variance.
distances = tf.maximum(distances, high_var_threshold)
# subtract max(log(distance)) for stability
log_weights = (2 - d) * tf.log(distances + 1e-16) - 0.5 * (d - 3) * tf.log(1 + 1e-16 - 0.25 * (distances**2))
weights = tf.exp(log_weights - tf.reduce_max(log_weights))
# sample only negative examples by setting weights of the same class examples to 0.
lshape = tf.shape(labels)
assert lshape.shape == 1
labels = tf.reshape(labels, [lshape[0], 1])
adjacency = tf.equal(labels, tf.transpose(labels))
adjacency_not = tf.logical_not(adjacency)
mask = tf.cast(adjacency_not, tf.float32)
# number of negative/positive samples to sampling per sample.
# For imbalanced data, this sampling method can be a sample weighted method.
adjacency_ex = tf.cast(adjacency, tf.int32) - tf.diag(tf.ones(n, dtype=tf.int32))
m = tf.reduce_sum(adjacency_ex, axis=1)
if tf.reduce_min(m) == 0:
m = tf.diag(tf.cast(tf.equal(m,0), tf.int32))
adjacency_ex += m
k = tf.maximum(tf.reduce_max(m),1) * neg_multiplier
pos_weights = tf.cast(adjacency_ex, tf.float32)
weights = weights * mask * tf.cast(distances < nonzero_loss_threshold, tf.float32)
weights = weights / (tf.reduce_sum(weights, axis=1, keepdims=True) + 1e-16)
# anchors indices
a_indices = tf.reshape(tf.range(n), (-1,1))
a_indices = tf.tile(a_indices, [1, k])
a_indices = tf.reshape(a_indices, (-1,))
# negative sampling
def neg_sampling(i):
s = tf.squeeze(tf.multinomial(tf.log(tf.expand_dims(weights[i] + 1e-16, axis=0)), k, output_dtype=tf.int32), axis=0)
return s
n_indices = tf.map_fn(neg_sampling, tf.range(n), dtype=tf.int32)
n_indices = tf.reshape(n_indices, (-1,))
# postive samping
def pos_sampling(i):
s = tf.squeeze(tf.multinomial(tf.log(tf.expand_dims(pos_weights[i] + 1e-16, axis=0)), k, output_dtype=tf.int32), axis=0)
return s
p_indices = tf.map_fn(pos_sampling, tf.range(n), dtype=tf.int32)
p_indices = tf.reshape(p_indices, (-1,))
anchors = tf.gather(embeddings, a_indices, name='gather_anchors')
positives = tf.gather(embeddings, p_indices, name='gather_pos')
negatives = tf.gather(embeddings, n_indices, name='gather_neg')
return a_indices, anchors, positives, negatives
def margin_based_loss(labels, embeddings, beta_in=1.0, margin=0.2, nu=0.0, high_var_threshold=0.5,
nonzero_loss_threshold=1.4, neg_multiplier=1):
"""
Computes the margin base loss.
# References
- [sampling matters in deep embedding learning]
(https://arxiv.org/abs/1706.07567)
Args:
labels: 1-D. tf.int32 `Tensor` with shape [batch_size] of multi-class integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.
beta_in: float,int or 1-D, float `Tensor` with shape [labels_size] of multi-class boundary parameters.
margin: Float, margin term in the loss function.
nu: float. Regularization parameter for beta.
high_var_threshold: float. cutoff for high gradient variance.
nonzero_loss_threshold: float. cutoff for non-zero loss zone.
neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.
Returns:
margin_based_Loss: tf.float32 scalar
"""
a_indices, anchors, positives, negatives = dist_weighted_sampling(labels,
embeddings,
high_var_threshold=high_var_threshold,
nonzero_loss_threshold=nonzero_loss_threshold,
neg_multiplier=neg_multiplier)
if isinstance(beta_in, (float,int)):
beta = beta_in
beta_reg_loss = 0.0
else:
if isinstance(beta_in, tf.Tensor):
assert tf.shape(beta_in).shape == 1
k = tf.size(a_indices) / tf.size(labels)
k = tf.cast(k, tf.int32)
beta = tf.reshape(beta_in, (-1, 1))
beta = tf.tile(beta, [1, k])
beta = tf.reshape(beta, (-1,))
beta_reg_loss = tf.reduce_sum(beta) * nu
else:
raise ValueError("`beta_in` must be one of [float, int, tf.Tensor].")
d_ap = tf.sqrt(tf.reduce_sum(tf.square(positives - anchors), axis=1) + 1e-16)
d_an = tf.sqrt(tf.reduce_sum(tf.square(negatives - anchors), axis=1) + 1e-16)
pos_loss = tf.maximum(margin + d_ap - beta, 0)
neg_loss = tf.maximum(margin + beta - d_an, 0)
pair_cnt = tf.cast(tf.size(a_indices), tf.float32)
# normalize based on the number of pairs
loss = (tf.reduce_sum(pos_loss) + tf.reduce_sum(neg_loss) + beta_reg_loss) / pair_cnt
return loss
def distance_weighted_triplet_loss(labels, embeddings, margin=1.0, squared=False, high_var_threshold=0.5,
nonzero_loss_threshold=1.4, neg_multiplier=1):
"""distance weighted sampling + triplet loss
Args:
labels: 1-D. tf.int32 `Tensor` with shape [batch_size] of multi-class integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.
margin: Float, margin term in the loss function.
squared: Boolean, whether or not to square the triplet distances.
nu: float. Regularization parameter for beta.
high_var_threshold: float. cutoff for high gradient variance.
nonzero_loss_threshold: float. cutoff for non-zero loss zone.
neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.
Returns:
triplet_loss: tf.float32 scalar
"""
a_indices, anchors, positives, negatives = dist_weighted_sampling(labels,
embeddings,
high_var_threshold=high_var_threshold,
nonzero_loss_threshold=nonzero_loss_threshold,
neg_multiplier=neg_multiplier)
d_ap = tf.reduce_sum(tf.square(positives - anchors), axis=1)
d_an = tf.reduce_sum(tf.square(negatives - anchors), axis=1)
if not squared:
d_ap = K.sqrt(d_ap + 1e-16)
d_an = K.sqrt(d_an + 1e-16)
loss = tf.maximum(d_ap - d_an + margin, 0)
loss = tf.reduce_mean(loss)
return loss
| loss.py | 8,087 | Distance weighted sampling.
# References
- [sampling matters in deep embedding learning]
(https://arxiv.org/abs/1706.07567)
# Arguments:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
multi-class integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should
be l2 normalized.
high_var_threshold: float. cutoff for high gradient variance.
nonzero_loss_threshold: float. cutoff for non-zero loss zone.
neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.
Returns:
a_indices: indices of anchors.
anchors: sampled anchor embeddings.
positives: sampled positive embeddings.
negatives: sampled negative embeddings.
distance weighted sampling + triplet loss
Args:
labels: 1-D. tf.int32 `Tensor` with shape [batch_size] of multi-class integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.
margin: Float, margin term in the loss function.
squared: Boolean, whether or not to square the triplet distances.
nu: float. Regularization parameter for beta.
high_var_threshold: float. cutoff for high gradient variance.
nonzero_loss_threshold: float. cutoff for non-zero loss zone.
neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.
Returns:
triplet_loss: tf.float32 scalar
Computes the margin base loss.
# References
- [sampling matters in deep embedding learning]
(https://arxiv.org/abs/1706.07567)
Args:
labels: 1-D. tf.int32 `Tensor` with shape [batch_size] of multi-class integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.
beta_in: float,int or 1-D, float `Tensor` with shape [labels_size] of multi-class boundary parameters.
margin: Float, margin term in the loss function.
nu: float. Regularization parameter for beta.
high_var_threshold: float. cutoff for high gradient variance.
nonzero_loss_threshold: float. cutoff for non-zero loss zone.
neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.
Returns:
margin_based_Loss: tf.float32 scalar
cut off to void high variance. subtract max(log(distance)) for stability sample only negative examples by setting weights of the same class examples to 0. number of negative/positive samples to sampling per sample. For imbalanced data, this sampling method can be a sample weighted method. anchors indices negative sampling postive samping normalize based on the number of pairs | 2,621 | en | 0.659532 |
import tensorflow as tf
import numpy as np
class WindowGenerator():
def __init__(self, input_width, label_width, shift,
train_df, val_df, test_df, label_columns=None):
# Store the raw data.
self.train_df = train_df
self.val_df = val_df
self.test_df = test_df
# Work out the label column indices.
self.label_columns = label_columns
if label_columns is not None:
self.label_columns_indices = {name: i for i, name in
enumerate(label_columns)}
self.column_indices = {name: i for i, name in
enumerate(train_df.columns)}
# Work out the window parameters.
self.input_width = input_width
self.label_width = label_width
self.shift = shift
self.total_window_size = input_width + shift
self.input_slice = slice(0, input_width)
self.input_indices = np.arange(self.total_window_size)[self.input_slice]
self.label_start = self.total_window_size - self.label_width
self.labels_slice = slice(self.label_start, None)
self.label_indices = np.arange(self.total_window_size)[self.labels_slice]
def split_window(self, features):
inputs = features[:, self.input_slice, :]
labels = features[:, self.labels_slice, :]
if self.label_columns is not None:
labels = tf.stack(
[labels[:, :, self.column_indices[name]] for name in self.label_columns],
axis=-1)
# Slicing doesn't preserve static shape information, so set the shapes
# manually. This way the `tf.data.Datasets` are easier to inspect.
inputs.set_shape([None, self.input_width, None])
labels.set_shape([None, self.label_width, None])
return inputs, labels
def make_dataset(self, data):
data = np.array(data, dtype=np.float32)
ds = tf.keras.preprocessing.timeseries_dataset_from_array(
data=data,
targets=None,
sequence_length=self.total_window_size,
sequence_stride=1,
shuffle=True,
batch_size=32,)
ds = ds.map(self.split_window)
return ds
@property
def train(self):
return self.make_dataset(self.train_df)
@property
def val(self):
return self.make_dataset(self.val_df)
@property
def test(self):
return self.make_dataset(self.test_df)
@property
def example(self):
"""Get and cache an example batch of `inputs, labels` for plotting."""
result = getattr(self, '_example', None)
if result is None:
# No example batch was found, so get one from the `.train` dataset
result = next(iter(self.train))
# And cache it for next time
self._example = result
return result
def __repr__(self):
return '\n'.join([
f'Total window size: {self.total_window_size}',
f'Input indices: {self.input_indices}',
f'Label indices: {self.label_indices}',
f'Label column name(s): {self.label_columns}'])
| src/data_cleaning/window_generator.py | 2,919 | Get and cache an example batch of `inputs, labels` for plotting.
Store the raw data. Work out the label column indices. Work out the window parameters. Slicing doesn't preserve static shape information, so set the shapes manually. This way the `tf.data.Datasets` are easier to inspect. No example batch was found, so get one from the `.train` dataset And cache it for next time | 379 | en | 0.826151 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-10 14:38
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('studies', '0015_auto_20170707_1820'),
]
operations = [
migrations.AlterModelOptions(
name='study',
options={'ordering': ['name'], 'permissions': (('can_view_study', 'Can View Study'), ('can_create_study', 'Can Create Study'), ('can_edit_study', 'Can Edit Study'), ('can_remove_study', 'Can Remove Study'), ('can_activate_study', 'Can Activate Study'), ('can_deactivate_study', 'Can Deactivate Study'), ('can_pause_study', 'Can Pause Study'), ('can_resume_study', 'Can Resume Study'), ('can_approve_study', 'Can Approve Study'), ('can_submit_study', 'Can Submit Study'), ('can_retract_study', 'Can Retract Study'), ('can_resubmit_study', 'Can Resubmit Study'), ('can_edit_study_permissions', 'Can Edit Study Permissions'), ('can_view_study_permissions', 'Can View Study Permissions'), ('can_view_study_responses', 'Can View Study Responses'), ('can_view_study_video_responses', 'Can View Study Video Responses'), ('can_view_study_demographics', 'Can View Study Demographics'))},
),
]
| studies/migrations/0016_auto_20170710_1438.py | 1,256 | -*- coding: utf-8 -*- Generated by Django 1.11.2 on 2017-07-10 14:38 | 68 | en | 0.675996 |
from src.analysis.QQZoneAnalysis import QQZoneAnalysis
import json
from src.util.constant import BASE_DIR
from src.util.util import get_mktime2
import pandas as pd
import re
from src.analysis.SentimentClassify import SentimentClassify
class TrainMood(QQZoneAnalysis):
"""
生成各种训练需要的数据集
"""
def __init__(self, use_redis=False, debug=True, file_name_head=''):
QQZoneAnalysis.__init__(self, use_redis=use_redis, debug=debug, username=file_name_head, analysis_friend=False)
TRAIN_BASE_DIR = BASE_DIR + file_name_head + '/data/train/'
self.MOOD_DATA_SCORE_FILE_NAME = TRAIN_BASE_DIR + 'score_mood_data.csv'
self.RE_DO_SENTIMENT_FILE_NAME = TRAIN_BASE_DIR + 're_do_mood_data.csv'
self.TEXT_LABEL_TRAIN_DATA = TRAIN_BASE_DIR + 'mood_text.csv'
self.TRAIN_DATA_AFTER_CLASSIFIC = TRAIN_BASE_DIR + 'mood_classific.csv'
self.TEXT_LABEL_RESULT_TRAIN_DATA = '../data/train3/text_' + file_name_head + '_label.csv'
self.TEXT_CLASSIFICATION_DATA_SET = '../data/train/'
self.FINAL_RESULT_TRAIN_DATA = '../data/train/' + file_name_head + '_final_train.csv'
self.mood_data_df = pd.read_csv(self.MOOD_DATA_FILE_NAME)
self.IMAGE_OBJECT_FILE_NAME = '../data/train3/' + file_name_head + '_image_object.csv'
self.MOOD_DATA_AFTER_OBJECT = '../data/train/' + file_name_head + '_after_object.csv'
self.sc = SentimentClassify()
self.mood_data_df['score'] = '-1'
self.label_dict = {'1': '旅游与运动',
'2': '爱情与家庭',
'3': '学习与工作',
'4': '广告',
'5': '生活日常',
'6': '其他',
'7': '人生感悟'}
self.label_dict_reverse = {v: k for k, v in self.label_dict.items()}
def calculate_score_for_each_mood(self):
"""
利用谷歌nima模型对图片进行评分
paper: https://arxiv.org/abs/1709.05424
pytorch model: https://github.com/truskovskiyk/nima.pytorch.git
计算每条说说中图片的平均分
对于没有图片的按均值进行填充
:return:
"""
# nima模型预测结果文件
self.IMAGE_SCORE_FILE_PATH = '/Users/maicius/code/nima.pytorch/nima/result_dict.json'
with open(self.IMAGE_SCORE_FILE_PATH, 'r', encoding='utf-8') as r:
self.image_score_dict = json.load(r)
self.image_score_df = pd.DataFrame(self.image_score_dict)
mean_score = self.image_score_df[self.image_score_df['score'] != -1].mean()[0]
self.image_score_df.loc[self.image_score_df.score == -1, 'score'] = mean_score
tid_list = self.mood_data_df['tid'].values
for tid in tid_list:
scores = self.image_score_df[self.image_score_df.image.str.contains(tid)].score
if len(scores) > 0:
self.mood_data_df.loc[self.mood_data_df.tid == tid, 'score'] = round(scores.mean(), 2)
self.mood_data_df.fillna(mean_score)
print("score shape:", self.mood_data_df.shape)
self.mood_data_df.to_csv(self.MOOD_DATA_SCORE_FILE_NAME)
def calculate_send_time(self):
"""
计算每条说说的发送时间
分为以下五种类型:
0.午夜:0点-4点
1.凌晨:4点-8点
2.上午:8点-12点
3.下午:12点-16点
4.傍晚:16点-20点
5.晚上:20点-24点
:return:
"""
day_begin_time = self.mood_data_df['time'].apply(lambda x: get_mktime2(x))
day_time_stamp = self.mood_data_df['time_stamp']
time_diff = day_time_stamp - day_begin_time
# 四个小时的时间差
time_step = 60 * 60 * 4
time_state = time_diff.apply(lambda x: x // time_step)
self.mood_data_df['time_state'] = time_state
print('send time:', self.mood_data_df.shape)
def export_df_after_clean(self):
try:
self.mood_data_df.drop(['Unnamed: 0'], axis=1, inplace=True)
except BaseException as e:
print(e)
self.mood_data_df.to_csv(self.MOOD_DATA_SCORE_FILE_NAME)
def export_train_text(self):
train_text = pd.read_csv(self.label_path + 'result/' + 'final.csv')
train_text = train_text[['type', 'content']]
train_text.columns = ['Y', 'content']
train_text.fillna('空', inplace=True)
train_text.Y = train_text.Y.apply(lambda x: self.label_dict[str(int(x))])
train_text.content = train_text.content.apply(lambda x: str(x).replace('\n', ''))
train_text.content = train_text.content.apply(lambda x: str(x).replace(' ', ''))
train_text.content = train_text.content.apply(lambda x: remove_waste_emoji(x))
train_text.fillna('空', inplace=True)
train_dataset = train_text.sample(frac=0.8)
val_dataset = train_text.sample(frac=0.3)
test_dataset = train_text.sample(frac=0.3)
self.print_label_dict(train_text)
self.print_label_dict(train_dataset)
self.print_label_dict(val_dataset)
self.print_label_dict(test_dataset)
train_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_train.csv', sep='\t', index=None, header=None)
val_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_val.csv', sep='\t', index=None, header=None)
test_dataset.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_test.csv', sep='\t', index=None, header=None)
self.calculate_avg_length(train_text)
# train_text.to_csv(self.TEXT_LABEL_TRAIN_DATA, sep=' ', index=None, header=None)
def calculate_avg_length(self, data_df):
num = data_df.shape[0]
content_list = data_df.content.sum()
print(len(content_list) / num)
def calculate_sentiment(self):
print("Begin to calculate sentiment...")
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: str(x).replace('\n', ''))
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: str(x).replace(' ', ''))
self.mood_data_df.content = self.mood_data_df.content.apply(lambda x: remove_waste_emoji(str(x)))
# 使用apply会导致超过qps限额
# sentiments = self.mood_data_df['content'].apply(lambda x: self.sc.get_sentiment_for_text(x))
# self.mood_data_df['sentiment'] = sentiments
self.mood_data_df['sentiments'] = -1
for i in range(self.mood_data_df.shape[0]):
content = self.mood_data_df.loc[i, 'content']
sentiment = self.sc.get_sentiment_for_text(content)
print('content:', content, 'senti:', sentiment)
self.mood_data_df.loc[i, 'sentiments'] = sentiment
self.mood_data_df = self.re_do_sentiment(self.mood_data_df)
try:
self.mood_data_df.drop(['Unnamed: 0'], axis=1, inplace=True)
except BaseException as e:
print(e)
self.mood_data_df.to_csv('after_sentiment.csv')
print("text sentiment:", self.mood_data_df.shape)
def print_label_dict(self, data_df):
for item in self.label_dict.values():
print(item, data_df.loc[data_df.Y == item, :].shape[0])
print('==========')
def re_do_sentiment(self, data_df):
# data_df = pd.read_csv(self.MOOD_DATA_SCORE_FILE_NAME)
for i in range(data_df.shape[0]):
sentiment = data_df.loc[i, 'sentiments']
content = data_df.loc[i, 'content']
if sentiment == -1:
content = content.replace('\u2207', '')
content = content.replace('\ue40c', '')
content = content.replace('\ue412', '')
content = content.replace('\ue056', '')
sentiment = self.sc.get_sentiment_for_text(str(content))
data_df.loc[i, 'sentiments'] = sentiment
data_df.to_csv(self.RE_DO_SENTIMENT_FILE_NAME)
return data_df
def export_classification_data(self):
"""
导出待分类待的数据
:return:
"""
data = pd.read_csv(self.RE_DO_SENTIMENT_FILE_NAME)
data_df = data[['content']]
data_df['Y'] = '旅游与运动'
data_df.fillna('空', inplace=True)
columns = ['Y', 'content']
data_df = data_df.ix[:, columns]
print(data_df.shape)
data_df.to_csv(self.TEXT_CLASSIFICATION_DATA_SET + 'text_maicius.csv', sep='\t')
def combine_text_type_data(self):
data = pd.read_csv(self.MOOD_DATA_SCORE_FILE_NAME)
print('mood_after_object_data:', data.shape)
label = pd.read_csv(self.TEXT_LABEL_RESULT_TRAIN_DATA)
print('label data:', label.shape)
label_y = label['Y']
data['type'] = label_y
data.to_csv(self.TRAIN_DATA_AFTER_CLASSIFIC)
def attach_image_object_for_each_mood(self):
with open('qq_big_image.json', 'r', encoding='utf-8') as r:
data = json.load(r)
with open('category.json', 'r', encoding='utf-8') as r:
category = json.load(r)
category_df = pd.DataFrame(category)
image_object_df = pd.DataFrame(
columns=['tid', 'person', 'vehicle', 'outdoor', 'animal', 'accessory', 'sports', 'kitchen', 'food',
'furniture',
'electronic', 'appliance', 'indoor'])
i = 0
for key, value in data.items():
tid = key.split('--')[0].split('/')[-1]
if image_object_df.loc[image_object_df.tid == tid].shape[0] == 0:
image_object_df.loc[i, 'tid'] = tid
i +=1
for item in value:
item = item.split(' ')[0]
super_cate = category_df.loc[category_df.name.str.contains(item), 'supercategory']
if len(super_cate) > 0:
print(super_cate)
image_object_df.loc[image_object_df.tid == tid, super_cate.values[0]] = 1
image_object_df.fillna(0, inplace=True)
image_object_df['vector'] = 0
image_object_df['vector'] = image_object_df['tid'].apply(lambda x: image_object_df.loc[image_object_df.tid == x,'person':].values[0])
image_object_df.to_csv(self.IMAGE_OBJECT_FILE_NAME)
def combine_image_object(self):
image_object_df = pd.read_csv(self.IMAGE_OBJECT_FILE_NAME)
mood_data_df = pd.read_csv(self.TRAIN_DATA_AFTER_CLASSIFIC)
try:
mood_data_df.drop(['vector'], axis=1, inplace=True)
except BaseException as e:
print(e)
image_object = image_object_df[['tid', 'vector']]
print(image_object_df.shape, mood_data_df.shape)
result = pd.merge(mood_data_df, image_object, on='tid', how='left')
print(result.shape)
result.to_csv(self.MOOD_DATA_AFTER_OBJECT)
def export_final_train_data(self):
data = pd.read_csv(self.MOOD_DATA_AFTER_OBJECT)
train = data[['n_E', 'score', 'time_state', 'sentiments', 'type', 'vector']]
train = train.loc[6:, :]
self.mean_score = self.image_score_df[self.image_score_df['score'] != -1].mean()[0]
train.score = train['score'].apply(lambda x: self.change_neg_image_score(x))
train.type = train['type'].map(self.label_dict_reverse)
train.vector.fillna('[0 0 0 0 0 0 0 0 0 0 0 0 0]', inplace=True)
train.vector = train.vector.apply(lambda x: self.change_vector_to_int(x))
train.sort_values(by='n_E', inplace=True, ascending=False)
train.to_csv(self.FINAL_RESULT_TRAIN_DATA)
def change_neg_image_score(self, score):
if score == -1:
return self.mean_score
else:
return score
def change_vector_to_int(self, vector):
vector = re.findall(re.compile('[0-9]'), vector)
str_vector = "".join(vector)
sum = 0
length = len(str_vector)
for i in range(length):
sum += int(str_vector[i]) **(length - 1)
return sum
def remove_waste_emoji(text):
text = re.subn(re.compile('\[em\].*?\[\/em\]'), '', text)[0]
text = re.subn(re.compile('@\{.*?\}'), '', text)[0]
return text
if __name__ == '__main__':
train = TrainMood(use_redis=True, debug=True, file_name_head='maicius')
# train.calculate_score_for_each_mood()
# train.calculate_send_time()
# train.calculate_sentiment()
# train.export_df_after_clean()
train.export_train_text()
# train.export_classification_data()
# train.attach_image_object_for_each_mood()
# train.combine_text_type_data()
# train.combine_image_object()
# train.export_final_train_data()
| src/analysis/TrainMood.py | 12,717 | 生成各种训练需要的数据集
利用谷歌nima模型对图片进行评分
paper: https://arxiv.org/abs/1709.05424
pytorch model: https://github.com/truskovskiyk/nima.pytorch.git
计算每条说说中图片的平均分
对于没有图片的按均值进行填充
:return:
计算每条说说的发送时间
分为以下五种类型:
0.午夜:0点-4点
1.凌晨:4点-8点
2.上午:8点-12点
3.下午:12点-16点
4.傍晚:16点-20点
5.晚上:20点-24点
:return:
导出待分类待的数据
:return:
nima模型预测结果文件 四个小时的时间差 train_text.to_csv(self.TEXT_LABEL_TRAIN_DATA, sep=' ', index=None, header=None) 使用apply会导致超过qps限额 sentiments = self.mood_data_df['content'].apply(lambda x: self.sc.get_sentiment_for_text(x)) self.mood_data_df['sentiment'] = sentiments data_df = pd.read_csv(self.MOOD_DATA_SCORE_FILE_NAME) train.calculate_score_for_each_mood() train.calculate_send_time() train.calculate_sentiment() train.export_df_after_clean() train.export_classification_data() train.attach_image_object_for_each_mood() train.combine_text_type_data() train.combine_image_object() train.export_final_train_data() | 902 | zh | 0.431836 |
from __future__ import absolute_import, unicode_literals
from django import forms
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from mayan.apps.common.classes import ModelProperty
from mayan.apps.common.forms import FilteredSelectionForm
from mayan.apps.documents.models import Document
from .models import Index, IndexTemplateNode
from .permissions import permission_document_indexing_rebuild
class IndexTemplateFilteredForm(FilteredSelectionForm):
class Meta:
allow_multiple = True
field_name = 'index_templates'
help_text = _('Index templates to be queued for rebuilding.')
label = _('Index templates')
queryset = Index.objects.filter(enabled=True)
permission = permission_document_indexing_rebuild
widget_attributes = {'class': 'select2'}
class IndexTemplateNodeForm(forms.ModelForm):
"""
A standard model form to allow users to create a new index template node
"""
def __init__(self, *args, **kwargs):
super(IndexTemplateNodeForm, self).__init__(*args, **kwargs)
self.fields['index'].widget = forms.widgets.HiddenInput()
self.fields['parent'].widget = forms.widgets.HiddenInput()
self.fields['expression'].help_text = ' '.join(
[
force_text(self.fields['expression'].help_text),
'<br>',
ModelProperty.get_help_text_for(
model=Document, show_name=True
).replace('\n', '<br>')
]
)
class Meta:
fields = ('parent', 'index', 'expression', 'enabled', 'link_documents')
model = IndexTemplateNode
| mayan/apps/document_indexing/forms.py | 1,701 | A standard model form to allow users to create a new index template node | 72 | en | 0.51357 |
import io
import json
import os
from django.conf import settings
from django.urls import reverse_lazy
from django.utils.translation import gettext_lazy as _
from cms.menu_bases import CMSAttachMenu
from menus.base import NavigationNode
from menus.menu_pool import menu_pool
class DocumentationMenu(CMSAttachMenu):
name = _("Documentation Menu") # give the menu a name this is required.
def get_nodes(self, request):
"""
This method is used to build the menu tree.
"""
nodes = []
docsmap_file = os.path.join(settings.SPHINX_DOCS_ROOT, 'docsmap.json')
if not os.path.exists(docsmap_file):
return nodes
with io.open(docsmap_file) as fh:
docs_map = json.load(fh, encoding='utf-8')
for counter, items in enumerate(docs_map.items(), 1):
bits = items[0].split('/')
if len(bits) == 1 and bits[0] == 'index' or len(bits) == 2 and bits[1] != 'index':
continue
node = NavigationNode(
title=items[1],
url=reverse_lazy('sphinx-documentation', args=(bits[0],)),
id=counter,
)
nodes.append(node)
return nodes
menu_pool.register_menu(DocumentationMenu)
| cmsplugin_cascade/sphinx/cms_menus.py | 1,271 | This method is used to build the menu tree.
give the menu a name this is required. | 84 | en | 0.93677 |
"""Constants for Airly integration."""
from __future__ import annotations
from typing import Final
from homeassistant.components.sensor import STATE_CLASS_MEASUREMENT
from homeassistant.const import (
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
DEVICE_CLASS_AQI,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_PM1,
DEVICE_CLASS_PM10,
DEVICE_CLASS_PM25,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
PRESSURE_HPA,
TEMP_CELSIUS,
)
from .model import AirlySensorEntityDescription
ATTR_API_ADVICE: Final = "ADVICE"
ATTR_API_CAQI: Final = "CAQI"
ATTR_API_CAQI_DESCRIPTION: Final = "DESCRIPTION"
ATTR_API_CAQI_LEVEL: Final = "LEVEL"
ATTR_API_HUMIDITY: Final = "HUMIDITY"
ATTR_API_PM10: Final = "PM10"
ATTR_API_PM1: Final = "PM1"
ATTR_API_PM25: Final = "PM25"
ATTR_API_PRESSURE: Final = "PRESSURE"
ATTR_API_TEMPERATURE: Final = "TEMPERATURE"
ATTR_ADVICE: Final = "advice"
ATTR_DESCRIPTION: Final = "description"
ATTR_LEVEL: Final = "level"
ATTR_LIMIT: Final = "limit"
ATTR_PERCENT: Final = "percent"
SUFFIX_PERCENT: Final = "PERCENT"
SUFFIX_LIMIT: Final = "LIMIT"
ATTRIBUTION: Final = "Data provided by Airly"
CONF_USE_NEAREST: Final = "use_nearest"
DEFAULT_NAME: Final = "Airly"
DOMAIN: Final = "airly"
LABEL_ADVICE: Final = "advice"
MANUFACTURER: Final = "Airly sp. z o.o."
MAX_UPDATE_INTERVAL: Final = 90
MIN_UPDATE_INTERVAL: Final = 5
NO_AIRLY_SENSORS: Final = "There are no Airly sensors in this area yet."
SENSOR_TYPES: tuple[AirlySensorEntityDescription, ...] = (
AirlySensorEntityDescription(
key=ATTR_API_CAQI,
device_class=DEVICE_CLASS_AQI,
name=ATTR_API_CAQI,
native_unit_of_measurement="CAQI",
),
AirlySensorEntityDescription(
key=ATTR_API_PM1,
device_class=DEVICE_CLASS_PM1,
name=ATTR_API_PM1,
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
state_class=STATE_CLASS_MEASUREMENT,
),
AirlySensorEntityDescription(
key=ATTR_API_PM25,
device_class=DEVICE_CLASS_PM25,
name="PM2.5",
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
state_class=STATE_CLASS_MEASUREMENT,
),
AirlySensorEntityDescription(
key=ATTR_API_PM10,
device_class=DEVICE_CLASS_PM10,
name=ATTR_API_PM10,
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
state_class=STATE_CLASS_MEASUREMENT,
),
AirlySensorEntityDescription(
key=ATTR_API_HUMIDITY,
device_class=DEVICE_CLASS_HUMIDITY,
name=ATTR_API_HUMIDITY.capitalize(),
native_unit_of_measurement=PERCENTAGE,
state_class=STATE_CLASS_MEASUREMENT,
value=lambda value: round(value, 1),
),
AirlySensorEntityDescription(
key=ATTR_API_PRESSURE,
device_class=DEVICE_CLASS_PRESSURE,
name=ATTR_API_PRESSURE.capitalize(),
native_unit_of_measurement=PRESSURE_HPA,
state_class=STATE_CLASS_MEASUREMENT,
),
AirlySensorEntityDescription(
key=ATTR_API_TEMPERATURE,
device_class=DEVICE_CLASS_TEMPERATURE,
name=ATTR_API_TEMPERATURE.capitalize(),
native_unit_of_measurement=TEMP_CELSIUS,
state_class=STATE_CLASS_MEASUREMENT,
value=lambda value: round(value, 1),
),
)
| homeassistant/components/airly/const.py | 3,325 | Constants for Airly integration. | 32 | en | 0.853098 |
#!/usr/bin/env python3
# Copyright 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Ignore indention messages, since legacy scripts use 2 spaces instead of 4.
# pylint: disable=bad-indentation,docstring-section-indent
# pylint: disable=docstring-trailing-quotes
# A script to pack EC binary into SPI flash image for MEC17xx
# Based on MEC170x_ROM_Description.pdf DS00002225C (07-28-17).
import argparse
import hashlib
import os
import struct
import subprocess
import tempfile
import zlib # CRC32
# MEC1701 has 256KB SRAM from 0xE0000 - 0x120000
# SRAM is divided into contiguous CODE & DATA
# CODE at [0xE0000, 0x117FFF] DATA at [0x118000, 0x11FFFF]
# SPI flash size for board is 512KB
# Boot-ROM TAG is located at SPI offset 0 (two 4-byte tags)
#
LFW_SIZE = 0x1000
LOAD_ADDR = 0x0E0000
LOAD_ADDR_RW = 0xE1000
HEADER_SIZE = 0x40
SPI_CLOCK_LIST = [48, 24, 16, 12]
SPI_READ_CMD_LIST = [0x3, 0xb, 0x3b, 0x6b]
CRC_TABLE = [0x00, 0x07, 0x0e, 0x09, 0x1c, 0x1b, 0x12, 0x15,
0x38, 0x3f, 0x36, 0x31, 0x24, 0x23, 0x2a, 0x2d]
def mock_print(*args, **kwargs):
pass
debug_print = mock_print
def Crc8(crc, data):
"""Update CRC8 value."""
for v in data:
crc = ((crc << 4) & 0xff) ^ (CRC_TABLE[(crc >> 4) ^ (v >> 4)]);
crc = ((crc << 4) & 0xff) ^ (CRC_TABLE[(crc >> 4) ^ (v & 0xf)]);
return crc ^ 0x55
def GetEntryPoint(payload_file):
"""Read entry point from payload EC image."""
with open(payload_file, 'rb') as f:
f.seek(4)
s = f.read(4)
return struct.unpack('<I', s)[0]
def GetPayloadFromOffset(payload_file, offset):
"""Read payload and pad it to 64-byte aligned."""
with open(payload_file, 'rb') as f:
f.seek(offset)
payload = bytearray(f.read())
rem_len = len(payload) % 64
if rem_len:
payload += b'\0' * (64 - rem_len)
return payload
def GetPayload(payload_file):
"""Read payload and pad it to 64-byte aligned."""
return GetPayloadFromOffset(payload_file, 0)
def GetPublicKey(pem_file):
"""Extract public exponent and modulus from PEM file."""
result = subprocess.run(['openssl', 'rsa', '-in', pem_file, '-text',
'-noout'], stdout=subprocess.PIPE, encoding='utf-8')
modulus_raw = []
in_modulus = False
for line in result.stdout.splitlines():
if line.startswith('modulus'):
in_modulus = True
elif not line.startswith(' '):
in_modulus = False
elif in_modulus:
modulus_raw.extend(line.strip().strip(':').split(':'))
if line.startswith('publicExponent'):
exp = int(line.split(' ')[1], 10)
modulus_raw.reverse()
modulus = bytearray((int(x, 16) for x in modulus_raw[:256]))
return struct.pack('<Q', exp), modulus
def GetSpiClockParameter(args):
assert args.spi_clock in SPI_CLOCK_LIST, \
"Unsupported SPI clock speed %d MHz" % args.spi_clock
return SPI_CLOCK_LIST.index(args.spi_clock)
def GetSpiReadCmdParameter(args):
assert args.spi_read_cmd in SPI_READ_CMD_LIST, \
"Unsupported SPI read command 0x%x" % args.spi_read_cmd
return SPI_READ_CMD_LIST.index(args.spi_read_cmd)
def PadZeroTo(data, size):
data.extend(b'\0' * (size - len(data)))
def BuildHeader(args, payload_len, load_addr, rorofile):
# Identifier and header version
header = bytearray(b'PHCM\0')
# byte[5]
b = GetSpiClockParameter(args)
b |= (1 << 2)
header.append(b)
# byte[6]
b = 0
header.append(b)
# byte[7]
header.append(GetSpiReadCmdParameter(args))
# bytes 0x08 - 0x0b
header.extend(struct.pack('<I', load_addr))
# bytes 0x0c - 0x0f
header.extend(struct.pack('<I', GetEntryPoint(rorofile)))
# bytes 0x10 - 0x13
header.append((payload_len >> 6) & 0xff)
header.append((payload_len >> 14) & 0xff)
PadZeroTo(header, 0x14)
# bytes 0x14 - 0x17
header.extend(struct.pack('<I', args.payload_offset))
# bytes 0x14 - 0x3F all 0
PadZeroTo(header, 0x40)
# header signature is appended by the caller
return header
def BuildHeader2(args, payload_len, load_addr, payload_entry):
# Identifier and header version
header = bytearray(b'PHCM\0')
# byte[5]
b = GetSpiClockParameter(args)
b |= (1 << 2)
header.append(b)
# byte[6]
b = 0
header.append(b)
# byte[7]
header.append(GetSpiReadCmdParameter(args))
# bytes 0x08 - 0x0b
header.extend(struct.pack('<I', load_addr))
# bytes 0x0c - 0x0f
header.extend(struct.pack('<I', payload_entry))
# bytes 0x10 - 0x13
header.append((payload_len >> 6) & 0xff)
header.append((payload_len >> 14) & 0xff)
PadZeroTo(header, 0x14)
# bytes 0x14 - 0x17
header.extend(struct.pack('<I', args.payload_offset))
# bytes 0x14 - 0x3F all 0
PadZeroTo(header, 0x40)
# header signature is appended by the caller
return header
#
# Compute SHA-256 of data and return digest
# as a bytearray
#
def HashByteArray(data):
hasher = hashlib.sha256()
hasher.update(data)
h = hasher.digest()
bah = bytearray(h)
return bah
#
# Return 64-byte signature of byte array data.
# Signature is SHA256 of data with 32 0 bytes appended
#
def SignByteArray(data):
debug_print("Signature is SHA-256 of data")
sigb = HashByteArray(data)
sigb.extend(b'\0' * 32)
return sigb
# MEC1701H supports two 32-bit Tags located at offsets 0x0 and 0x4
# in the SPI flash.
# Tag format:
# bits[23:0] correspond to bits[31:8] of the Header SPI address
# Header is always on a 256-byte boundary.
# bits[31:24] = CRC8-ITU of bits[23:0].
# Notice there is no chip-select field in the Tag both Tag's point
# to the same flash part.
#
def BuildTag(args):
tag = bytearray([(args.header_loc >> 8) & 0xff,
(args.header_loc >> 16) & 0xff,
(args.header_loc >> 24) & 0xff])
tag.append(Crc8(0, tag))
return tag
def BuildTagFromHdrAddr(header_loc):
tag = bytearray([(header_loc >> 8) & 0xff,
(header_loc >> 16) & 0xff,
(header_loc >> 24) & 0xff])
tag.append(Crc8(0, tag))
return tag
#
# Creates temporary file for read/write
# Reads binary file containing LFW image_size (loader_file)
# Writes LFW image to temporary file
# Reads RO image at beginning of rorw_file up to image_size
# (assumes RO/RW images have been padded with 0xFF
# Returns temporary file name
#
def PacklfwRoImage(rorw_file, loader_file, image_size):
"""Create a temp file with the
first image_size bytes from the loader file and append bytes
from the rorw file.
return the filename"""
fo=tempfile.NamedTemporaryFile(delete=False) # Need to keep file around
with open(loader_file,'rb') as fin1: # read 4KB loader file
pro = fin1.read()
fo.write(pro) # write 4KB loader data to temp file
with open(rorw_file, 'rb') as fin:
ro = fin.read(image_size)
fo.write(ro)
fo.close()
return fo.name
#
# Generate a test EC_RW image of same size
# as original.
# Preserve image_data structure and fill all
# other bytes with 0xA5.
# useful for testing SPI read and EC build
# process hash generation.
#
def gen_test_ecrw(pldrw):
debug_print("gen_test_ecrw: pldrw type =", type(pldrw))
debug_print("len pldrw =", len(pldrw), " = ", hex(len(pldrw)))
cookie1_pos = pldrw.find(b'\x99\x88\x77\xce')
cookie2_pos = pldrw.find(b'\xdd\xbb\xaa\xce', cookie1_pos+4)
t = struct.unpack("<L", pldrw[cookie1_pos+0x24:cookie1_pos+0x28])
size = t[0]
debug_print("EC_RW size =", size, " = ", hex(size))
debug_print("Found cookie1 at ", hex(cookie1_pos))
debug_print("Found cookie2 at ", hex(cookie2_pos))
if cookie1_pos > 0 and cookie2_pos > cookie1_pos:
for i in range(0, cookie1_pos):
pldrw[i] = 0xA5
for i in range(cookie2_pos+4, len(pldrw)):
pldrw[i] = 0xA5
with open("ec_RW_test.bin", "wb") as fecrw:
fecrw.write(pldrw[:size])
def parseargs():
rpath = os.path.dirname(os.path.relpath(__file__))
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input",
help="EC binary to pack, usually ec.bin or ec.RO.flat.",
metavar="EC_BIN", default="ec.bin")
parser.add_argument("-o", "--output",
help="Output flash binary file",
metavar="EC_SPI_FLASH", default="ec.packed.bin")
parser.add_argument("--loader_file",
help="EC loader binary",
default="ecloader.bin")
parser.add_argument("-s", "--spi_size", type=int,
help="Size of the SPI flash in KB",
default=512)
parser.add_argument("-l", "--header_loc", type=int,
help="Location of header in SPI flash",
default=0x1000)
parser.add_argument("-p", "--payload_offset", type=int,
help="The offset of payload from the start of header",
default=0x80)
parser.add_argument("-r", "--rw_loc", type=int,
help="Start offset of EC_RW. Default is -1 meaning 1/2 flash size",
default=-1)
parser.add_argument("--spi_clock", type=int,
help="SPI clock speed. 8, 12, 24, or 48 MHz.",
default=24)
parser.add_argument("--spi_read_cmd", type=int,
help="SPI read command. 0x3, 0xB, or 0x3B.",
default=0xb)
parser.add_argument("--image_size", type=int,
help="Size of a single image. Default 220KB",
default=(220 * 1024))
parser.add_argument("--test_spi", action='store_true',
help="Test SPI data integrity by adding CRC32 in last 4-bytes of RO/RW binaries",
default=False)
parser.add_argument("--test_ecrw", action='store_true',
help="Use fixed pattern for EC_RW but preserve image_data",
default=False)
parser.add_argument("--verbose", action='store_true',
help="Enable verbose output",
default=False)
return parser.parse_args()
# Debug helper routine
def dumpsects(spi_list):
debug_print("spi_list has {0} entries".format(len(spi_list)))
for s in spi_list:
debug_print("0x{0:x} 0x{1:x} {2:s}".format(s[0],len(s[1]),s[2]))
def printByteArrayAsHex(ba, title):
debug_print(title,"= ")
count = 0
for b in ba:
count = count + 1
debug_print("0x{0:02x}, ".format(b),end="")
if (count % 8) == 0:
debug_print("")
debug_print("\n")
def print_args(args):
debug_print("parsed arguments:")
debug_print(".input = ", args.input)
debug_print(".output = ", args.output)
debug_print(".loader_file = ", args.loader_file)
debug_print(".spi_size (KB) = ", hex(args.spi_size))
debug_print(".image_size = ", hex(args.image_size))
debug_print(".header_loc = ", hex(args.header_loc))
debug_print(".payload_offset = ", hex(args.payload_offset))
if args.rw_loc < 0:
debug_print(".rw_loc = ", args.rw_loc)
else:
debug_print(".rw_loc = ", hex(args.rw_loc))
debug_print(".spi_clock = ", args.spi_clock)
debug_print(".spi_read_cmd = ", args.spi_read_cmd)
debug_print(".test_spi = ", args.test_spi)
debug_print(".verbose = ", args.verbose)
#
# Handle quiet mode build from Makefile
# Quiet mode when V is unset or V=0
# Verbose mode when V=1
#
def main():
global debug_print
args = parseargs()
if args.verbose:
debug_print = print
debug_print("Begin MEC17xx pack_ec.py script")
# MEC17xx maximum 192KB each for RO & RW
# mec1701 chip Makefile sets args.spi_size = 512
# Tags at offset 0
#
print_args(args)
spi_size = args.spi_size * 1024
debug_print("SPI Flash image size in bytes =", hex(spi_size))
# !!! IMPORTANT !!!
# These values MUST match chip/mec1701/config_flash_layout.h
# defines.
# MEC17xx Boot-ROM TAGs are at offset 0 and 4.
# lfw + EC_RO starts at beginning of second 4KB sector
# EC_RW starts at offset 0x40000 (256KB)
spi_list = []
debug_print("args.input = ",args.input)
debug_print("args.loader_file = ",args.loader_file)
debug_print("args.image_size = ",hex(args.image_size))
rorofile=PacklfwRoImage(args.input, args.loader_file, args.image_size)
payload = GetPayload(rorofile)
payload_len = len(payload)
# debug
debug_print("EC_LFW + EC_RO length = ",hex(payload_len))
# SPI image integrity test
# compute CRC32 of EC_RO except for last 4 bytes
# skip over 4KB LFW
# Store CRC32 in last 4 bytes
if args.test_spi == True:
crc = zlib.crc32(bytes(payload[LFW_SIZE:(payload_len - 4)]))
crc_ofs = payload_len - 4
debug_print("EC_RO CRC32 = 0x{0:08x} @ 0x{1:08x}".format(crc, crc_ofs))
for i in range(4):
payload[crc_ofs + i] = crc & 0xff
crc = crc >> 8
# Chromebooks are not using MEC BootROM ECDSA.
# We implemented the ECDSA disabled case where
# the 64-byte signature contains a SHA-256 of the binary plus
# 32 zeros bytes.
payload_signature = SignByteArray(payload)
# debug
printByteArrayAsHex(payload_signature, "LFW + EC_RO payload_signature")
# MEC17xx Header is 0x80 bytes with an 64 byte signature
# (32 byte SHA256 + 32 zero bytes)
header = BuildHeader(args, payload_len, LOAD_ADDR, rorofile)
# debug
printByteArrayAsHex(header, "Header LFW + EC_RO")
# MEC17xx payload ECDSA not used, 64 byte signature is
# SHA256 + 32 zero bytes
header_signature = SignByteArray(header)
# debug
printByteArrayAsHex(header_signature, "header_signature")
tag = BuildTag(args)
# MEC17xx truncate RW length to args.image_size to not overwrite LFW
# offset may be different due to Header size and other changes
# MCHP we want to append a SHA-256 to the end of the actual payload
# to test SPI read routines.
debug_print("Call to GetPayloadFromOffset")
debug_print("args.input = ", args.input)
debug_print("args.image_size = ", hex(args.image_size))
payload_rw = GetPayloadFromOffset(args.input, args.image_size)
debug_print("type(payload_rw) is ", type(payload_rw))
debug_print("len(payload_rw) is ", hex(len(payload_rw)))
# truncate to args.image_size
rw_len = args.image_size
payload_rw = payload_rw[:rw_len]
payload_rw_len = len(payload_rw)
debug_print("Truncated size of EC_RW = ", hex(payload_rw_len))
payload_entry_tuple = struct.unpack_from('<I', payload_rw, 4)
debug_print("payload_entry_tuple = ", payload_entry_tuple)
payload_entry = payload_entry_tuple[0]
debug_print("payload_entry = ", hex(payload_entry))
# Note: payload_rw is a bytearray therefore is mutable
if args.test_ecrw:
gen_test_ecrw(payload_rw)
# SPI image integrity test
# compute CRC32 of EC_RW except for last 4 bytes
# Store CRC32 in last 4 bytes
if args.test_spi == True:
crc = zlib.crc32(bytes(payload_rw[:(payload_rw_len - 32)]))
crc_ofs = payload_rw_len - 4
debug_print("EC_RW CRC32 = 0x{0:08x} at offset 0x{1:08x}".format(crc, crc_ofs))
for i in range(4):
payload_rw[crc_ofs + i] = crc & 0xff
crc = crc >> 8
payload_rw_sig = SignByteArray(payload_rw)
# debug
printByteArrayAsHex(payload_rw_sig, "payload_rw_sig")
os.remove(rorofile) # clean up the temp file
# MEC170x Boot-ROM Tags are located at SPI offset 0
spi_list.append((0, tag, "tag"))
spi_list.append((args.header_loc, header, "header(lwf + ro)"))
spi_list.append((args.header_loc + HEADER_SIZE, header_signature,
"header(lwf + ro) signature"))
spi_list.append((args.header_loc + args.payload_offset, payload,
"payload(lfw + ro)"))
offset = args.header_loc + args.payload_offset + payload_len
# No SPI Header for EC_RW as its not loaded by BootROM
spi_list.append((offset, payload_signature,
"payload(lfw_ro) signature"))
# EC_RW location
rw_offset = int(spi_size // 2)
if args.rw_loc >= 0:
rw_offset = args.rw_loc
debug_print("rw_offset = 0x{0:08x}".format(rw_offset))
if rw_offset < offset + len(payload_signature):
print("ERROR: EC_RW overlaps EC_RO")
spi_list.append((rw_offset, payload_rw, "payload(rw)"))
# don't add to EC_RW. We don't know if Google will process
# EC SPI flash binary with other tools during build of
# coreboot and OS.
#offset = rw_offset + payload_rw_len
#spi_list.append((offset, payload_rw_sig, "payload(rw) signature"))
spi_list = sorted(spi_list)
dumpsects(spi_list)
#
# MEC17xx Boot-ROM locates TAG at SPI offset 0 instead of end of SPI.
#
with open(args.output, 'wb') as f:
debug_print("Write spi list to file", args.output)
addr = 0
for s in spi_list:
if addr < s[0]:
debug_print("Offset ",hex(addr)," Length", hex(s[0]-addr),
"fill with 0xff")
f.write(b'\xff' * (s[0] - addr))
addr = s[0]
debug_print("Offset ",hex(addr), " Length", hex(len(s[1])), "write data")
f.write(s[1])
addr += len(s[1])
if addr < spi_size:
debug_print("Offset ",hex(addr), " Length", hex(spi_size - addr),
"fill with 0xff")
f.write(b'\xff' * (spi_size - addr))
f.flush()
if __name__ == '__main__':
main()
| chip/mchp/util/pack_ec.py | 17,094 | Update CRC8 value.
Read entry point from payload EC image.
Read payload and pad it to 64-byte aligned.
Read payload and pad it to 64-byte aligned.
Extract public exponent and modulus from PEM file.
Create a temp file with the
first image_size bytes from the loader file and append bytes
from the rorw file.
return the filename
!/usr/bin/env python3 Copyright 2013 The Chromium OS Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. Ignore indention messages, since legacy scripts use 2 spaces instead of 4. pylint: disable=bad-indentation,docstring-section-indent pylint: disable=docstring-trailing-quotes A script to pack EC binary into SPI flash image for MEC17xx Based on MEC170x_ROM_Description.pdf DS00002225C (07-28-17). CRC32 MEC1701 has 256KB SRAM from 0xE0000 - 0x120000 SRAM is divided into contiguous CODE & DATA CODE at [0xE0000, 0x117FFF] DATA at [0x118000, 0x11FFFF] SPI flash size for board is 512KB Boot-ROM TAG is located at SPI offset 0 (two 4-byte tags) Identifier and header version byte[5] byte[6] byte[7] bytes 0x08 - 0x0b bytes 0x0c - 0x0f bytes 0x10 - 0x13 bytes 0x14 - 0x17 bytes 0x14 - 0x3F all 0 header signature is appended by the caller Identifier and header version byte[5] byte[6] byte[7] bytes 0x08 - 0x0b bytes 0x0c - 0x0f bytes 0x10 - 0x13 bytes 0x14 - 0x17 bytes 0x14 - 0x3F all 0 header signature is appended by the caller Compute SHA-256 of data and return digest as a bytearray Return 64-byte signature of byte array data. Signature is SHA256 of data with 32 0 bytes appended MEC1701H supports two 32-bit Tags located at offsets 0x0 and 0x4 in the SPI flash. Tag format: bits[23:0] correspond to bits[31:8] of the Header SPI address Header is always on a 256-byte boundary. bits[31:24] = CRC8-ITU of bits[23:0]. Notice there is no chip-select field in the Tag both Tag's point to the same flash part. Creates temporary file for read/write Reads binary file containing LFW image_size (loader_file) Writes LFW image to temporary file Reads RO image at beginning of rorw_file up to image_size (assumes RO/RW images have been padded with 0xFF Returns temporary file name Need to keep file around read 4KB loader file write 4KB loader data to temp file Generate a test EC_RW image of same size as original. Preserve image_data structure and fill all other bytes with 0xA5. useful for testing SPI read and EC build process hash generation. Debug helper routine Handle quiet mode build from Makefile Quiet mode when V is unset or V=0 Verbose mode when V=1 MEC17xx maximum 192KB each for RO & RW mec1701 chip Makefile sets args.spi_size = 512 Tags at offset 0 !!! IMPORTANT !!! These values MUST match chip/mec1701/config_flash_layout.h defines. MEC17xx Boot-ROM TAGs are at offset 0 and 4. lfw + EC_RO starts at beginning of second 4KB sector EC_RW starts at offset 0x40000 (256KB) debug SPI image integrity test compute CRC32 of EC_RO except for last 4 bytes skip over 4KB LFW Store CRC32 in last 4 bytes Chromebooks are not using MEC BootROM ECDSA. We implemented the ECDSA disabled case where the 64-byte signature contains a SHA-256 of the binary plus 32 zeros bytes. debug MEC17xx Header is 0x80 bytes with an 64 byte signature (32 byte SHA256 + 32 zero bytes) debug MEC17xx payload ECDSA not used, 64 byte signature is SHA256 + 32 zero bytes debug MEC17xx truncate RW length to args.image_size to not overwrite LFW offset may be different due to Header size and other changes MCHP we want to append a SHA-256 to the end of the actual payload to test SPI read routines. truncate to args.image_size Note: payload_rw is a bytearray therefore is mutable SPI image integrity test compute CRC32 of EC_RW except for last 4 bytes Store CRC32 in last 4 bytes debug clean up the temp file MEC170x Boot-ROM Tags are located at SPI offset 0 No SPI Header for EC_RW as its not loaded by BootROM EC_RW location don't add to EC_RW. We don't know if Google will process EC SPI flash binary with other tools during build of coreboot and OS.offset = rw_offset + payload_rw_lenspi_list.append((offset, payload_rw_sig, "payload(rw) signature")) MEC17xx Boot-ROM locates TAG at SPI offset 0 instead of end of SPI. | 4,217 | en | 0.76291 |
#!/usr/bin/env python
"""Configuration parameters for the client."""
from grr.lib import config_lib
from grr.lib import rdfvalue
from grr.lib.rdfvalues import crypto
# General Client options.
config_lib.DEFINE_string("Client.name", "GRR",
"The name of the client. This will be used as a base "
"name to generate many other default parameters such "
"as binary names and service names. Note that on "
"Linux we lowercase the name to confirm with most "
"linux naming conventions.")
config_lib.DEFINE_string("Client.binary_name", "%(Client.name)",
"The name of the client binary.")
config_lib.DEFINE_list("Client.labels", [], "Labels for this client.")
config_lib.DEFINE_string("Client.company_name", "GRR Project",
"The name of the company which made the client.")
config_lib.DEFINE_string("Client.description", "%(name) %(platform) %(arch)",
"A description of this specific client build.")
config_lib.DEFINE_string("Client.platform", "windows",
"The platform we are running on.")
config_lib.DEFINE_string("Client.arch", "amd64",
"The architecture we are running on.")
config_lib.DEFINE_string("Client.build_time", "Unknown",
"The time the client was built.")
config_lib.DEFINE_string("Client.deploy_time", "Unknown",
"The time the client was deployed.")
config_lib.DEFINE_string("Client.build_environment", None,
"The output of Uname.FromCurrentSystem.signature() "
"on the system the client was built on.")
config_lib.DEFINE_integer("Client.rsa_key_length", 2048,
"The key length of the client keys in bits.")
config_lib.DEFINE_string(
name="Client.install_path",
default=r"%(SystemRoot|env)\\System32\\%(name)\\%(Source.version_string)",
help="Where the client binaries are installed.")
config_lib.DEFINE_string(
name="Client.component_path",
default=r"%(Client.install_path)/components",
help="Where the client components are installed on the client.")
config_lib.DEFINE_string(
name="Client.component_url_stem",
default="%(Frontend.static_url_path_prefix)components/",
help="A URL path where components will be served from.")
config_lib.DEFINE_semantic(
rdfvalue.RDFURN,
"Client.component_aff4_stem",
default="%(Frontend.static_aff4_prefix)/components/",
description="A common AFF4 stem where components will be served from.")
config_lib.DEFINE_string(
name="Client.rekall_profile_cache_path",
default=r"%(Client.install_path)\\rekall_profiles",
help="Where GRR stores cached Rekall profiles needed for memory analysis")
config_lib.DEFINE_list(name="Client.server_urls",
default=[],
help="Base URL for client control.")
# Deprecated. Remove when all installations switch to Client.server_urls.
config_lib.DEFINE_list("Client.control_urls", ["http://localhost:8080/control"],
"List of URLs of the controlling server.")
config_lib.DEFINE_integer("Client.http_timeout", 100,
"Timeout for HTTP requests.")
config_lib.DEFINE_string("Client.plist_path",
"/Library/LaunchDaemons/com.google.code.grrd.plist",
"Location of our launchctl plist.")
config_lib.DEFINE_string("Client.plist_filename", None,
"Filename of launchctl plist.")
config_lib.DEFINE_string("Client.plist_label", None,
"Identifier label for launchd")
config_lib.DEFINE_string("Client.plist_label_prefix", None,
"Domain for launchd label.")
config_lib.DEFINE_float("Client.poll_min", 0.2,
"Minimum time between polls in seconds.")
config_lib.DEFINE_float("Client.poll_max", 5,
"Maximum time between polls in seconds.")
config_lib.DEFINE_float("Client.error_poll_min", 15,
"Minimum time between polls in seconds if the server "
"reported an error.")
config_lib.DEFINE_float("Client.poll_slew", 1.15, "Slew of poll time.")
config_lib.DEFINE_integer("Client.connection_error_limit", 60 * 24,
"If the client encounters this many connection "
"errors, it exits and restarts. Retries are one "
"minute apart.")
config_lib.DEFINE_integer("Client.retry_error_limit", 10,
"If the client encounters this many connection "
"errors, it searches for a new proxy/server url "
"combination.")
config_lib.DEFINE_list(
name="Client.proxy_servers",
help="List of valid proxy servers the client should try.",
default=[])
config_lib.DEFINE_integer("Client.max_post_size", 40000000,
"Maximum size of the post.")
config_lib.DEFINE_integer("Client.max_out_queue", 51200000,
"Maximum size of the output queue.")
config_lib.DEFINE_integer("Client.foreman_check_frequency", 1800,
"The minimum number of seconds before checking with "
"the foreman for new work.")
config_lib.DEFINE_float("Client.rss_max", 1000,
"Maximum memory footprint in MB (soft limit). "
"Exceeding this will result in an orderly shutdown.")
config_lib.DEFINE_float("Client.rss_max_hard", 2000,
"Maximum memory footprint in MB (hard limit). "
"Exceeding this will result in aborting the current "
"client action and restarting.")
config_lib.DEFINE_string(
name="Client.tempfile_prefix",
help="Prefix to use for temp files created by the GRR client.",
default="tmp%(Client.name)")
config_lib.DEFINE_list(
name="Client.tempdir_roots",
help="List of temporary directories to use on the client.",
default=["/var/tmp/"])
config_lib.DEFINE_string(
name="Client.grr_tempdir",
help="Default subdirectory in the temp directory to use for GRR.",
default="%(Client.name)")
config_lib.DEFINE_list(
name="Client.vfs_virtualroots",
help=("If this is set for a VFS type, client VFS operations will always be"
" relative to the given root. Format is os:/mount/disk."),
default=[])
# Windows client specific options.
config_lib.DEFINE_string("Client.config_hive",
r"HKEY_LOCAL_MACHINE",
help="The registry hive where the client "
"configuration will be stored.")
config_lib.DEFINE_string("Client.config_key",
r"Software\\GRR",
help="The registry key where client configuration "
"will be stored.")
# Client Cryptographic options. Here we define defaults for key values.
config_lib.DEFINE_semantic(
crypto.RSAPrivateKey,
"Client.private_key",
description="Client private key in pem format. If not provided this "
"will be generated by the enrollment process.",)
config_lib.DEFINE_semantic(
crypto.RDFX509Cert,
"CA.certificate",
description="Trusted CA certificate in X509 pem format",)
config_lib.DEFINE_semantic(
crypto.RSAPublicKey,
"Client.executable_signing_public_key",
description="public key for verifying executable signing.")
config_lib.DEFINE_semantic(
crypto.RSAPrivateKey,
"PrivateKeys.executable_signing_private_key",
description="Private keys for signing executables. NOTE: This "
"key is usually kept offline and is thus not present in the "
"configuration file.")
config_lib.DEFINE_semantic(
crypto.RSAPublicKey,
"Client.driver_signing_public_key",
description="public key for verifying driver signing.")
config_lib.DEFINE_semantic(
crypto.RSAPrivateKey,
"PrivateKeys.driver_signing_private_key",
description="Private keys for signing drivers. NOTE: This "
"key is usually kept offline and is thus not present in the "
"configuration file.")
config_lib.DEFINE_integer("Client.server_serial_number", 0,
"Minimal serial number we accept for server cert.")
config_lib.DEFINE_integer(
"Client.gc_frequency", 10,
"Defines how often the client calls garbage collection (seconds).")
# The following configuration options are defined here but are used in
# the windows nanny code (grr/client/nanny/windows_nanny.h).
config_lib.DEFINE_string("Nanny.child_binary",
"GRR.exe",
help="The location to the client binary.")
config_lib.DEFINE_string("Nanny.child_command_line",
"%(Nanny.child_binary)",
help="The command line to launch the client binary.")
config_lib.DEFINE_string("Nanny.logfile", "%(Logging.path)/nanny.log",
"The file where we write the nanny transaction log.")
config_lib.DEFINE_string("Nanny.service_name",
"GRR Service",
help="The name of the nanny.")
config_lib.DEFINE_string("Nanny.service_description",
"GRR Service",
help="The description of the nanny service.")
config_lib.DEFINE_string("Nanny.service_key",
r"%(Client.config_key)",
help="The registry key of the nanny service.")
config_lib.DEFINE_string("Nanny.service_key_hive",
r"%(Client.config_hive)",
help="The registry key of the nanny service.")
config_lib.DEFINE_string("Nanny.statusfile", "%(Logging.path)/nanny.status",
"The file where we write the nanny status.")
config_lib.DEFINE_string("Nanny.status", "",
"The regkey where we write the nanny status.")
config_lib.DEFINE_string("Nanny.binary",
r"%(Client.install_path)\\%(service_binary_name)",
help="The full location to the nanny binary.")
config_lib.DEFINE_string("Nanny.service_binary_name",
"%(Client.name)service.exe",
help="The executable name of the nanny binary.")
config_lib.DEFINE_integer("Nanny.unresponsive_kill_period", 60,
"The time in seconds after which the nanny kills us.")
config_lib.DEFINE_integer("Network.api", 3,
"The version of the network protocol the client "
"uses.")
config_lib.DEFINE_string("Network.compression",
default="ZCOMPRESS",
help="Type of compression (ZCOMPRESS, UNCOMPRESSED)")
# Installer options.
config_lib.DEFINE_string(
name="Installer.logfile",
default="%(Logging.path)/%(Client.name)_installer.txt",
help=("A specific log file which is used for logging the "
"installation process."))
config_lib.DEFINE_list("Installer.old_key_map", [
"HKEY_LOCAL_MACHINE\\Software\\GRR\\certificate->Client.private_key",
"HKEY_LOCAL_MACHINE\\Software\\GRR\\server_serial_number"
"->Client.server_serial_number",
], """
A mapping of old registry values which will be copied to new values. The old
value location must start with a valid hive name, followed by a key name, and
end with the value name. The source location must be separated from the new
parameter name by a -> symbol.
This setting allows to carry over settings from obsolete client installations to
newer versions of the client which may store the same information in other
locations.
For example:
HKEY_LOCAL_MACHINE\\Software\\GRR\\certificate -> Client.private_key
""")
| grr/config/client.py | 11,990 | Configuration parameters for the client.
!/usr/bin/env python General Client options. Deprecated. Remove when all installations switch to Client.server_urls. Windows client specific options. Client Cryptographic options. Here we define defaults for key values. The following configuration options are defined here but are used in the windows nanny code (grr/client/nanny/windows_nanny.h). Installer options. | 408 | en | 0.584208 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class BatchV1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_job(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_job_with_http_info(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_job(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_job(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_job_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_job_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_job_for_all_namespaces_with_http_info(**kwargs)
return data
def list_job_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_job_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_job(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_job_with_http_info(namespace, **kwargs)
return data
def list_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job_status(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| kubernetes/client/apis/batch_v1_api.py | 91,264 | NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 python 2 and python 3 compatibility library verify the required parameter 'namespace' is set verify the required parameter 'body' is set HTTP header `Accept` HTTP header `Content-Type` Authentication setting verify the required parameter 'namespace' is set HTTP header `Accept` HTTP header `Content-Type` Authentication setting verify the required parameter 'name' is set verify the required parameter 'namespace' is set HTTP header `Accept` HTTP header `Content-Type` Authentication setting HTTP header `Accept` HTTP header `Content-Type` Authentication setting HTTP header `Accept` HTTP header `Content-Type` Authentication setting verify the required parameter 'namespace' is set HTTP header `Accept` HTTP header `Content-Type` Authentication setting verify the required parameter 'name' is set verify the required parameter 'namespace' is set verify the required parameter 'body' is set HTTP header `Accept` HTTP header `Content-Type` Authentication setting verify the required parameter 'name' is set verify the required parameter 'namespace' is set verify the required parameter 'body' is set HTTP header `Accept` HTTP header `Content-Type` Authentication setting verify the required parameter 'name' is set verify the required parameter 'namespace' is set HTTP header `Accept` HTTP header `Content-Type` Authentication setting verify the required parameter 'name' is set verify the required parameter 'namespace' is set HTTP header `Accept` HTTP header `Content-Type` Authentication setting verify the required parameter 'name' is set verify the required parameter 'namespace' is set verify the required parameter 'body' is set HTTP header `Accept` HTTP header `Content-Type` Authentication setting verify the required parameter 'name' is set verify the required parameter 'namespace' is set verify the required parameter 'body' is set HTTP header `Accept` HTTP header `Content-Type` Authentication setting | 44,272 | en | 0.840544 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-12-06 16:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('roster', '0021_auto_20180825_1843'),
]
operations = [
migrations.AlterField(
model_name='student',
name='track',
field=models.CharField(choices=[('A', 'Weekly'), ('B', 'Biweekly'), ('C', 'Correspondence'), ('E', 'External'), ('N', 'Not applicable')], max_length=5),
),
]
| roster/migrations/0022_auto_20181206_1148.py | 564 | -*- coding: utf-8 -*- Generated by Django 1.11.9 on 2018-12-06 16:48 | 68 | en | 0.63487 |
# author : chenxi
# encoding:utf-8
import time
import run
if __name__ == "__main__":
mutex = 1
user = 0
users = []
while True:
if mutex == 1:
mutex = mutex - 1
try:
users.append(user)
users[user] = run.Score()
users[user].run_server()
user = user + 1
mutex = mutex +1
print mutex
except Exception:
time.sleep(3)
mutex = mutex +1
print mutex
continue
else:
print mutex | main.py | 455 | author : chenxi encoding:utf-8 | 30 | en | 0.693339 |
# -*- coding: utf-8 -*-
#
# Submittable API Client documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 9 15:21:21 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
dirname = os.path.dirname
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(dirname(dirname(dirname(os.path.abspath(__file__)))), 'submittable_api_client'))
print sys.path
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Submittable API Client'
copyright = u'2014, Shawn Rider'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SubmittableAPIClientdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'SubmittableAPIClient.tex', u'Submittable API Client Documentation',
u'Shawn Rider', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'submittableapiclient', u'Submittable API Client Documentation',
[u'Shawn Rider'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'SubmittableAPIClient', u'Submittable API Client Documentation',
u'Shawn Rider', 'SubmittableAPIClient', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| docs/source/conf.py | 8,406 | -*- coding: utf-8 -*- Submittable API Client documentation build configuration file, created by sphinx-quickstart on Mon Jun 9 15:21:21 2014. This file is execfile()d with the current directory set to its containing dir. Note that not all possible configuration values are present in this autogenerated file. All configuration values have a default; values that are commented out serve to show the default. If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. -- General configuration ------------------------------------------------ If your documentation needs a minimal Sphinx version, state it here.needs_sphinx = '1.0' Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. The suffix of source filenames. The encoding of source files.source_encoding = 'utf-8-sig' The master toctree document. General information about the project. The version info for the project you're documenting, acts as replacement for |version| and |release|, also used in various other places throughout the built documents. The short X.Y version. The full version, including alpha/beta/rc tags. The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages.language = None There are two options for replacing |today|: either, you set today to some non-false value, then it is used:today = '' Else, today_fmt is used as the format for a strftime call.today_fmt = '%B %d, %Y' List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. The reST default role (used for this markup: `text`) to use for all documents.default_role = None If true, '()' will be appended to :func: etc. cross-reference text.add_function_parentheses = True If true, the current module name will be prepended to all description unit titles (such as .. function::).add_module_names = True If true, sectionauthor and moduleauthor directives will be shown in the output. They are ignored by default.show_authors = False The name of the Pygments (syntax highlighting) style to use. A list of ignored prefixes for module index sorting.modindex_common_prefix = [] If true, keep warnings as "system message" paragraphs in the built documents.keep_warnings = False -- Options for HTML output ---------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Theme options are theme-specific and customize the look and feel of a theme further. For a list of options available for each theme, see the documentation.html_theme_options = {} Add any paths that contain custom themes here, relative to this directory.html_theme_path = [] The name for this set of Sphinx documents. If None, it defaults to "<project> v<release> documentation".html_title = None A shorter title for the navigation bar. Default is the same as html_title.html_short_title = None The name of an image file (relative to this directory) to place at the top of the sidebar.html_logo = None The name of an image file (within the static path) to use as favicon of the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 pixels large.html_favicon = None Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". Add any extra paths that contain custom files (such as robots.txt or .htaccess) here, relative to this directory. These files are copied directly to the root of the documentation.html_extra_path = [] If not '', a 'Last updated on:' timestamp is inserted at every page bottom, using the given strftime format.html_last_updated_fmt = '%b %d, %Y' If true, SmartyPants will be used to convert quotes and dashes to typographically correct entities.html_use_smartypants = True Custom sidebar templates, maps document names to template names.html_sidebars = {} Additional templates that should be rendered to pages, maps page names to template names.html_additional_pages = {} If false, no module index is generated.html_domain_indices = True If false, no index is generated.html_use_index = True If true, the index is split into individual pages for each letter.html_split_index = False If true, links to the reST sources are added to the pages.html_show_sourcelink = True If true, "Created using Sphinx" is shown in the HTML footer. Default is True.html_show_sphinx = True If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.html_show_copyright = True If true, an OpenSearch description file will be output, and all pages will contain a <link> tag referring to it. The value of this option must be the base URL from which the finished HTML is served.html_use_opensearch = '' This is the file name suffix for HTML files (e.g. ".xhtml").html_file_suffix = None Output file base name for HTML help builder. -- Options for LaTeX output --------------------------------------------- The paper size ('letterpaper' or 'a4paper').'papersize': 'letterpaper', The font size ('10pt', '11pt' or '12pt').'pointsize': '10pt', Additional stuff for the LaTeX preamble.'preamble': '', Grouping the document tree into LaTeX files. List of tuples (source start file, target name, title, author, documentclass [howto, manual, or own class]). The name of an image file (relative to this directory) to place at the top of the title page.latex_logo = None For "manual" documents, if this is true, then toplevel headings are parts, not chapters.latex_use_parts = False If true, show page references after internal links.latex_show_pagerefs = False If true, show URL addresses after external links.latex_show_urls = False Documents to append as an appendix to all manuals.latex_appendices = [] If false, no module index is generated.latex_domain_indices = True -- Options for manual page output --------------------------------------- One entry per manual page. List of tuples (source start file, name, description, authors, manual section). If true, show URL addresses after external links.man_show_urls = False -- Options for Texinfo output ------------------------------------------- Grouping the document tree into Texinfo files. List of tuples (source start file, target name, title, author, dir menu entry, description, category) Documents to append as an appendix to all manuals.texinfo_appendices = [] If false, no module index is generated.texinfo_domain_indices = True How to display URL addresses: 'footnote', 'no', or 'inline'.texinfo_show_urls = 'footnote' If true, do not generate a @detailmenu in the "Top" node's menu.texinfo_no_detailmenu = False | 7,000 | en | 0.666306 |
"""Carbon Scraper Plugin for Userbot. //text in creative way.
usage: .karb //as a reply to any text message
Thanks to @r4v4n4 for vars,,, Random RGB feature by @PhycoNinja13b"""
from selenium.webdriver.support.ui import Select
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
from telethon import events
from urllib.parse import quote_plus
from urllib.error import HTTPError
from time import sleep
import asyncio
import os
import random
from userbot.utils import admin_cmd
#@borg.on(events.NewMessage(pattern=r"\.karb ", outgoing=True))
@borg.on(admin_cmd(pattern="karb"))
async def carbon_api(e):
RED = random.randint(0,256)
GREEN = random.randint(0,256)
BLUE = random.randint(0,256)
THEME= [ "3024-night",
"a11y-dark",
"blackboard",
"base16-dark",
"base16-light",
"cobalt",
"dracula",
"duotone-dark",
"hopscotch",
"lucario",
"material",
"monokai",
"night-owl",
"nord",
"oceanic-next",
"one-light",
"one-dark",
"panda-syntax",
"paraiso-dark",
"seti",
"shades-of-purple",
"solarized",
"solarized%20light",
"synthwave-84",
"twilight",
"verminal",
"vscode",
"yeti",
"zenburn",
]
CUNTHE = random.randint(0, len(THEME) - 1)
The = THEME[CUNTHE]
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
""" A Wrapper for carbon.now.sh """
await e.edit("⬜⬜⬜⬜⬜")
CARBON = 'https://carbon.now.sh/?bg=rgba({R}%2C{G}%2C.{B}%2C1)&t={T}&wt=none&l=auto&ds=false&dsyoff=20px&dsblur=68px&wc=true&wa=true&pv=56px&ph=56px&ln=false&fl=1&fm=Fira%20Code&fs=14px&lh=152%25&si=false&es=2x&wm=false&code={code}'
CARBONLANG = "en"
textx = await e.get_reply_message()
pcode = e.text
if pcode[6:]:
pcode = str(pcode[6:])
elif textx:
pcode = str(textx.message) # Importing message to module
code = quote_plus(pcode) # Converting to urlencoded
url = CARBON.format(code=code, R=RED, G=GREEN, B=BLUE, T=The, lang=CARBONLANG)
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.binary_location = Config.GOOGLE_CHROME_BIN
chrome_options.add_argument("--window-size=1920x1080")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument('--disable-gpu')
prefs = {'download.default_directory' : './'}
chrome_options.add_experimental_option('prefs', prefs)
await e.edit("⬛⬛⬜⬜⬜")
driver = webdriver.Chrome(executable_path=Config.CHROME_DRIVER, options=chrome_options)
driver.get(url)
download_path = './'
driver.command_executor._commands["send_command"] = ("POST", '/session/$sessionId/chromium/send_command')
params = {'cmd': 'Page.setDownloadBehavior', 'params': {'behavior': 'allow', 'downloadPath': download_path}}
command_result = driver.execute("send_command", params)
driver.find_element_by_xpath("//button[contains(text(),'Export')]").click()
sleep(5) # this might take a bit.
driver.find_element_by_xpath("//button[contains(text(),'4x')]").click()
sleep(5)
await e.edit("⬛⬛⬛⬜⬜")
driver.find_element_by_xpath("//button[contains(text(),'PNG')]").click()
sleep(5) #Waiting for downloading
await e.edit("⬛⬛⬛⬛⬛")
file = './carbon.png'
await e.edit("✅RGB Karbon Completed, Uploading RGB Karbon✅")
await e.client.send_file(
e.chat_id,
file,
caption="Carbonised by [TeleBot](https://t.me/TeleBotHelp)",
force_document=False,
reply_to=e.message.reply_to_msg_id,
)
os.remove('./carbon.png')
# Removing carbon.png after uploading
await e.delete() # Deleting msg
| userbot/plugins/carbonRGB (2).py | 4,229 | Carbon Scraper Plugin for Userbot. //text in creative way.
usage: .karb //as a reply to any text message
Thanks to @r4v4n4 for vars,,, Random RGB feature by @PhycoNinja13b
@borg.on(events.NewMessage(pattern=r"\.karb ", outgoing=True)) Importing message to module Converting to urlencoded this might take a bit.Waiting for downloading Removing carbon.png after uploading Deleting msg | 385 | en | 0.624235 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
__author__ = 'Andreas Bader'
__version__ = "0.01"
# db_folders -> List of DB Folder (for space check)
# db_client -> name of ycsb client
# db_args -> special ycsb arguments for this db
# db_name -> name of this db (e.g. for workload file)
# db_desc -> more detailed name/description
# jvm_args -> special jvm_args for this db and ycsb
# prerun_once -> list of commands to run local once before ycsb (%%IP%% uses first db vm) (without ycsb, sync or space diff or poweroff commands!)
# postrun_once -> list of commands to run local once after ycsb (%%IP%% uses first db vm) (without ycsb, sync or space diff or poweroff commands!)
# prerun -> list of commands to run before ycsb (all vms or local) (without ycsb, sync or space diff or poweroff commands!)
# postrun -> list of commands to run after ycsb (all vms or local) (without ycsb, sync or space diff or poweroff commands!)
# prerun_master -> list of commands to run before ycsb (only on master(first=ID 0) vm or local)) (without ycsb, sync or space diff or poweroff commands!)
# postrun_master -> list of commands to run after ycsb (only on master(first=ID 0) vm or local)) (without ycsb, sync or space diff or poweroff commands!)
# prerun_slaves -> list of commands to run before ycsb (only on slave (all without master(=ID 0)) vms or local)) (without ycsb, sync or space diff or poweroff commands!)
# postrun_slaves -> list of commands to run after ycsb (only on slave (all without master(=ID 0)) vms or local)) (without ycsb, sync or space diff or poweroff commands!)
# prerun_dict -> list of commands to run before ycsb for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed)
# postrun_dict -> list of commands to run after ycsb for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed)
# check -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (systemctl start xyz oftern returns true even if start failed somehow. Check that here!)
# check_master -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (only on master(first=ID 0) vm or local))
# check_slaves -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (all without master(=ID 0)) vms or local))
# check_dict -> list of commands to run after prerun for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed)
# basic -> True/False, if True this is a basic database, so no need to ssh for space checking
# sequence -> which vm should be provisioned first? (for all postrun/prerun dicts/lists. First number is considered master db vm, rest are slaves.)
# include -> which base modules should be imported and added to the dictionary (standard functions that are reusable). Warning: infinite import loop possible!
# the following variables are possible in prerun_once, postrun_once, prerun, prerun_master, prerun_slaves, check, check_master, check_slaves, postrun, postrun_master, postrun_slaves, prerun_dict, postrun_dict, check_dict, db_args:
# %%IP%% -> IP of (actual) db vm
# %%IPgen%% -> IP of (actual) generator vm (on which this script runs)
# %%IPn%% -> IP of db vm number n (e.g. %%IP2%%)
# %%IPall%% -> give String with IP of all vms)
# %%HN%% -> Hostname of (actual) db vm
# %%HNgen%% -> Hostname of (actual) generator vm (on which this script runs)
# %%HNn%% -> Hostname of db vm number n (e.g. %%HN2%%)
# %%HNall%% -> give String with Hostname of all vms)
# %%SSH%% -> if SSH should be used (set at the beginning)
# Order of Preruns/Postruns:
# 1. prerun/postrun/check, 2. prerun_master/postrun_master/check_master, 3. preun_skaves/postrun_slaves/check_slaves, 4.prerun_dict/postrun_dict/check_dict
# General Order:
# prerun -> check -> ycsb -> postrun
def getDict():
dbConfig={}
dbConfig["db_folders"]=["/tmp/druid/indexCache", "/tmp/persistent/zk_druid", "/tmp/persistent/task/", "/tmp/druid/localStorage", "/var/lib/mysql"]
dbConfig["db_client"]="druid"
dbConfig["db_args"]="-p zookeeperip=%%IP0%% -p queryip=%%IP1%% -p zookeeperport=2181 -p queryport=8090 -p replicants=1"
dbConfig["db_name"]="druid_cl5_rf1"
dbConfig["db_desc"]="Druid (Broker,Coordinator,Historical,MiddleManager,Overlord) on 5 VMs with Replication Factor 1. Ingest via Tranquility/Finagle, Query via REST."
dbConfig["jvm_args"]="-jvm-args='-Xmx4096m'"
dbConfig["prerun_once"]= []
dbConfig["postrun_once"]= []
dbConfig["prerun"]= ["%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP0%%|g\" /home/vagrant/config/_common/common.runtime.properties'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP1%%|g\" /home/vagrant/config/broker/runtime.properties'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP0%%|g\" /home/vagrant/config/coordinator/runtime.properties'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP2%%|g\" /home/vagrant/config/historical/runtime.properties'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP3%%|g\" /home/vagrant/config/middleManager/runtime.properties'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP4%%|g\" /home/vagrant/config/overlord/runtime.properties'"]
dbConfig["postrun"]= []
dbConfig["prerun_master"]= []
dbConfig["postrun_master"]= []
dbConfig["prerun_slaves"]= []
dbConfig["postrun_slaves"]= []
dbConfig["prerun_dict"]= {
0 : ["%%SSH%%sudo -s bash -c 'systemctl start druid_coordinator.service'"],
1 : ["%%SSH%%sudo -s bash -c 'systemctl start druid_broker.service'"],
2 : ["%%SSH%%sudo -s bash -c 'systemctl start druid_historical.service'"],
3 : ["%%SSH%%sudo -s bash -c 'systemctl start druid_middlemanager.service'"],
4 : ["%%SSH%%sudo -s bash -c 'systemctl start druid_overlord.service'",
"bash -c 'sleep 180'"]
}
dbConfig["postrun_dict"]= {}
dbConfig["check"]= []
dbConfig["check_master"]= []
dbConfig["check_slaves"]= []
dbConfig["check_dict"]= {
0 : ["%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_repo.service | grep -c \"inactive (dead)\")-1))'",
"%%SSH%%sudo -s bash -c 'exit $(systemctl status druid_coordinator.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_coordinator.service | grep -c \"active (running)\")-1))'"],
1 : ["%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_repo.service | grep -c \"inactive (dead)\")-1))'",
"%%SSH%%sudo -s bash -c 'exit $(systemctl status druid_broker.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_broker.service | grep -c \"active (running)\")-1))'"],
2 : ["%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_repo.service | grep -c \"inactive (dead)\")-1))'",
"%%SSH%%sudo -s bash -c 'exit $(systemctl status druid_historical.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_historical.service | grep -c \"active (running)\")-1))'"],
3 : ["%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_repo.service | grep -c \"inactive (dead)\")-1))'",
"%%SSH%%sudo -s bash -c 'exit $(systemctl status druid_middlemanager.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_middlemanager.service | grep -c \"active (running)\")-1))'"],
4 : ["%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_repo.service | grep -c \"inactive (dead)\")-1))'",
"%%SSH%%sudo -s bash -c 'exit $(systemctl status druid_overlord.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_overlord.service | grep -c \"active (running)\")-1))'"]
}
dbConfig["basic"]= False
dbConfig["sequence"]=[0,1,2,3,4]
dbConfig["include"] = []
return dbConfig | vagrant_files/generator/files/databases/druid_cl5_rf1.py | 8,214 | !/usr/bin/env python2 -*- coding: utf-8 -*- db_folders -> List of DB Folder (for space check) db_client -> name of ycsb client db_args -> special ycsb arguments for this db db_name -> name of this db (e.g. for workload file) db_desc -> more detailed name/description jvm_args -> special jvm_args for this db and ycsb prerun_once -> list of commands to run local once before ycsb (%%IP%% uses first db vm) (without ycsb, sync or space diff or poweroff commands!) postrun_once -> list of commands to run local once after ycsb (%%IP%% uses first db vm) (without ycsb, sync or space diff or poweroff commands!) prerun -> list of commands to run before ycsb (all vms or local) (without ycsb, sync or space diff or poweroff commands!) postrun -> list of commands to run after ycsb (all vms or local) (without ycsb, sync or space diff or poweroff commands!) prerun_master -> list of commands to run before ycsb (only on master(first=ID 0) vm or local)) (without ycsb, sync or space diff or poweroff commands!) postrun_master -> list of commands to run after ycsb (only on master(first=ID 0) vm or local)) (without ycsb, sync or space diff or poweroff commands!) prerun_slaves -> list of commands to run before ycsb (only on slave (all without master(=ID 0)) vms or local)) (without ycsb, sync or space diff or poweroff commands!) postrun_slaves -> list of commands to run after ycsb (only on slave (all without master(=ID 0)) vms or local)) (without ycsb, sync or space diff or poweroff commands!) prerun_dict -> list of commands to run before ycsb for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed) postrun_dict -> list of commands to run after ycsb for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed) check -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (systemctl start xyz oftern returns true even if start failed somehow. Check that here!) check_master -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (only on master(first=ID 0) vm or local)) check_slaves -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (all without master(=ID 0)) vms or local)) check_dict -> list of commands to run after prerun for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed) basic -> True/False, if True this is a basic database, so no need to ssh for space checking sequence -> which vm should be provisioned first? (for all postrun/prerun dicts/lists. First number is considered master db vm, rest are slaves.) include -> which base modules should be imported and added to the dictionary (standard functions that are reusable). Warning: infinite import loop possible! the following variables are possible in prerun_once, postrun_once, prerun, prerun_master, prerun_slaves, check, check_master, check_slaves, postrun, postrun_master, postrun_slaves, prerun_dict, postrun_dict, check_dict, db_args: %%IP%% -> IP of (actual) db vm %%IPgen%% -> IP of (actual) generator vm (on which this script runs) %%IPn%% -> IP of db vm number n (e.g. %%IP2%%) %%IPall%% -> give String with IP of all vms) %%HN%% -> Hostname of (actual) db vm %%HNgen%% -> Hostname of (actual) generator vm (on which this script runs) %%HNn%% -> Hostname of db vm number n (e.g. %%HN2%%) %%HNall%% -> give String with Hostname of all vms) %%SSH%% -> if SSH should be used (set at the beginning) Order of Preruns/Postruns: 1. prerun/postrun/check, 2. prerun_master/postrun_master/check_master, 3. preun_skaves/postrun_slaves/check_slaves, 4.prerun_dict/postrun_dict/check_dict General Order: prerun -> check -> ycsb -> postrun | 3,808 | en | 0.817976 |
"""Tensorflow trainer class."""
import logging
import math
import os
from typing import Callable, Dict, Optional
import numpy as np
import tensorflow as tf
from .modeling_tf_utils import TFPreTrainedModel, shape_list
from .optimization_tf import GradientAccumulator, create_optimizer
from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput
from .training_args_tf import TFTrainingArguments
logger = logging.getLogger(__name__)
class TFTrainer:
model: TFPreTrainedModel
args: TFTrainingArguments
# something similar to a PT Dataset.
# This is just temporary before to have
# a framework-agnostic approach for datasets.
train_dataset: Optional[tf.data.Dataset]
eval_dataset: Optional[tf.data.Dataset]
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None
prediction_loss_only: bool
def __init__(
self,
model: TFPreTrainedModel,
args: TFTrainingArguments,
train_dataset: Optional[tf.data.Dataset] = None,
eval_dataset: Optional[tf.data.Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
prediction_loss_only=False,
):
self.model = model
self.args = args
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.prediction_loss_only = prediction_loss_only
self.gradient_accumulator = GradientAccumulator()
self._setup_training()
def _setup_training(self) -> None:
"""
Setup the different steps to train a model:
- check if all the data are given
- create the proper strategy
- create the features
- prepare the model settings
"""
self._prepare_dataset()
with self.args.strategy.scope():
self._create_optimizer()
_ = self.optimizer.iterations
self._set_loss_and_metric()
self._create_checkpoint_manager()
self._create_summary_writer()
def _set_loss_and_metric(self) -> None:
"""
Create the training loss and metric with their name. Allowed names are those listed
in the Tensorflow documentation and those contained in the transformers library.
"""
try:
self.loss = tf.keras.losses.get(
{
"class_name": self.args.loss_name,
"config": {"from_logits": True, "reduction": tf.keras.losses.Reduction.NONE},
}
)
except TypeError:
self.loss = tf.keras.losses.get(
{"class_name": self.args.loss_name, "config": {"reduction": tf.keras.losses.Reduction.NONE}}
)
def _create_summary_writer(self) -> None:
"""
Create a summary writer to be able to read the logs in Tensorboard.
"""
self.writer = tf.summary.create_file_writer(self.args.logging_dir)
def _prepare_dataset(self) -> None:
"""
Prepare the training, validation and test data.
"""
if self.train_dataset is not None:
self.num_train_examples = self.train_dataset.reduce(tf.constant(0), lambda x, _: x + 1).numpy()
if self.args.max_steps > 0:
self.train_steps = self.args.max_steps
else:
self.train_steps: int = math.ceil(self.num_train_examples / self.args.train_batch_size)
self.train_dataset = (
self.train_dataset.cache()
.shuffle(self.num_train_examples)
.batch(self.args.train_batch_size)
.prefetch(tf.data.experimental.AUTOTUNE)
)
if self.args.max_steps > 0:
self.train_dataset = self.train_dataset.repeat(-1)
self.train_dataset = self.args.strategy.experimental_distribute_dataset(self.train_dataset)
else:
self.train_steps = 0
if self.eval_dataset is not None:
self.eval_dataset = (
self.eval_dataset.batch(self.args.eval_batch_size).cache().prefetch(tf.data.experimental.AUTOTUNE)
)
self.eval_dataset = self.args.strategy.experimental_distribute_dataset(self.eval_dataset)
def _create_optimizer(self) -> None:
"""
Create the training optimizer with its name. Allowed names are those listed
in the Tensorflow documentation and those contained in the transformers library.
"""
if self.args.optimizer_name == "adamw":
self.optimizer = create_optimizer(
self.args.learning_rate, self.train_steps, self.args.warmup_steps, self.args.end_lr
)
else:
try:
self.optimizer = tf.keras.optimizers.get(
{
"class_name": self.args.optimizer_name,
"config": {"learning_rate": self.args.learning_rate, "epsilon": self.args.adam_epsilon},
}
)
except TypeError:
# This is for the case where the optimizer is not Adam-like such as SGD
self.optimizer = tf.keras.optimizers.get(
{"class_name": self.args.optimizer_name, "config": {"learning_rate": self.args.learning_rate}}
)
logger.info("Created an/a {} optimizer".format(self.args.optimizer_name))
def _create_checkpoint_manager(self, max_to_keep: int = 5, load_model: bool = True) -> None:
"""
Create a checkpoint manager in order to be able to make the training
fault-tolerant.
Args:
max_to_keep: the maximum number of checkpoints to keep in the checkpoint path.
load_model: if we want to start the training from the latest checkpoint.
"""
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, PREFIX_CHECKPOINT_DIR, max_to_keep=max_to_keep)
if load_model:
ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()
@tf.function
def _evaluate_steps(self, per_replica_features, per_replica_labels):
"""
One step evaluation across replica.
Args:
per_replica_features: the batched features.
per_replica_labels: the batched labels.
Returns:
The loss corresponding to the given batch.
"""
per_replica_loss, per_replica_logits = self.args.strategy.experimental_run_v2(
self._run_model, args=(per_replica_features, per_replica_labels, False)
)
try:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0)
except ValueError:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None)
return reduced_loss, per_replica_logits
def _prediction_loop(
self, dataset: tf.data.Dataset, description: str, prediction_loss_only: Optional[bool] = None
) -> PredictionOutput:
logger.info("***** Running %s *****", description)
logger.info(" Batch size = %d", self.args.eval_batch_size)
label_ids: np.ndarray = None
preds: np.ndarray = None
step: int = 1
for features, labels in dataset:
step = tf.convert_to_tensor(step, dtype=tf.int64)
loss, logits = self._evaluate_steps(features, labels)
loss = tf.reduce_mean(loss)
if not prediction_loss_only:
if self.args.n_gpu > 1:
for val in logits.values:
if preds is None:
preds = val.numpy()
else:
preds = np.append(preds, val.numpy(), axis=0)
for val in labels.values:
if label_ids is None:
label_ids = val.numpy()
else:
label_ids = np.append(label_ids, val.numpy(), axis=0)
else:
if preds is None:
preds = logits.numpy()
else:
preds = np.append(preds, logits.numpy(), axis=0)
if label_ids is None:
label_ids = labels.numpy()
else:
label_ids = np.append(label_ids, labels.numpy(), axis=0)
step += 1
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
metrics["eval_loss"] = loss.numpy()
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def evaluate(
self, eval_dataset: Optional[tf.data.Dataset] = None, prediction_loss_only: Optional[bool] = None
) -> Dict[str, float]:
"""
Prediction/evaluation loop, shared by `evaluate()` and `predict()`.
"""
if eval_dataset is None:
eval_dataset = self.eval_dataset
output = self._prediction_loop(eval_dataset, description="Evaluation")
return output.metrics
def train(self) -> None:
"""
Train method to train the model.
"""
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
iterations = self.optimizer.iterations
if iterations.numpy() > 0:
logger.info("Start the training from the last checkpoint")
start_epoch = (iterations.numpy() // self.train_steps) + 1
else:
start_epoch = 1
tf.summary.experimental.set_step(iterations)
epochs = 1 if self.args.max_steps > 0 else self.args.num_train_epochs
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_train_examples)
logger.info(" Num Epochs = %d", epochs)
logger.info(" Total optimization steps = %d", self.train_steps)
for epoch in range(start_epoch, int(epochs + 1)):
for training_loss in self._training_steps():
step = iterations.numpy()
if self.args.debug:
with self.writer.as_default():
tf.summary.scalar("loss", training_loss, step=step)
if step == 1 and self.args.debug:
with self.writer.as_default():
tf.summary.trace_export(name="training", step=step, profiler_outdir=self.args.logging_dir)
if self.args.evaluate_during_training and step % self.args.eval_steps == 0:
logs = {}
results = self.evaluate()
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
if callable(self.optimizer.learning_rate):
logs["learning_rate"] = self.optimizer.learning_rate(step).numpy()
else:
logs["learning_rate"] = self.optimizer.learning_rate.numpy()
logger.info("Epoch {} Step {} Validation Metrics {}".format(epoch, step, logs))
with self.writer.as_default():
for k, v in logs.items():
tf.summary.scalar(k, v, step=step)
if step % self.args.logging_steps == 0:
logger.info("Epoch {} Step {} Train Loss {:.4f}".format(epoch, step, training_loss.numpy()))
if step % self.args.save_steps == 0:
ckpt_save_path = self.model.ckpt_manager.save()
logger.info("Saving checkpoint for step {} at {}".format(step, ckpt_save_path))
if step % self.train_steps == 0:
break
def _training_steps(self):
"""
Returns a generator over training steps (i.e. parameters update).
"""
for i, loss in enumerate(self._accumulate_next_gradients()):
if i % self.args.gradient_accumulation_steps == 0:
self._apply_gradients()
yield loss
@tf.function
def _apply_gradients(self):
"""Applies the gradients (cross-replica)."""
self.args.strategy.experimental_run_v2(self._step)
def _step(self):
"""Applies gradients and resets accumulation."""
gradient_scale = self.gradient_accumulator.step * self.args.strategy.num_replicas_in_sync
gradients = [
gradient / tf.cast(gradient_scale, gradient.dtype) for gradient in self.gradient_accumulator.gradients
]
gradients = [(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients]
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
self.gradient_accumulator.reset()
def _accumulate_next_gradients(self):
"""Accumulates the gradients from the next element in dataset."""
iterator = iter(self.train_dataset)
@tf.function
def _accumulate_next():
per_replica_features, per_replica_labels = next(iterator)
return self._accumulate_gradients(per_replica_features, per_replica_labels)
while True:
try:
yield _accumulate_next()
except tf.errors.OutOfRangeError:
break
def _accumulate_gradients(self, per_replica_features, per_replica_labels):
"""Accumulates the gradients across all the replica."""
per_replica_loss = self.args.strategy.experimental_run_v2(
self._forward, args=(per_replica_features, per_replica_labels)
)
try:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0)
except ValueError:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None)
return reduced_loss
def _forward(self, features, labels):
"""Forwards a training example and accumulates the gradients."""
per_example_loss, _ = self._run_model(features, labels, True)
gradients = tf.gradients(per_example_loss, self.model.trainable_variables)
gradients = [
g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables)
]
self.gradient_accumulator(gradients)
return per_example_loss
def _run_model(self, features, labels, training):
"""
Computes the loss of the given features and labels pair.
Args:
features: the batched features.
labels: the batched labels.
training: run the model in training mode or not
"""
if self.args.mode == "text-classification" or self.args.mode == "token-classification":
logits = self.model(features, training=training)[0]
else:
logits = self.model(features, training=training)
if self.args.mode == "token-classification":
active_loss = tf.reshape(labels, (-1,)) != -1
reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
loss = self.loss(labels, reduced_logits)
elif self.args.mode == "question-answering":
start_loss = self.loss(labels["start_position"], logits[0])
end_loss = self.loss(labels["end_position"], logits[1])
loss = (start_loss + end_loss) / 2.0
else:
loss = self.loss(labels, logits)
loss += sum(self.model.losses) * (1.0 / self.args.n_gpu)
return loss, logits
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
"""
Run prediction and return predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in evaluate().
Args:
test_dataset: something similar to a PT Dataset. This is just
temporary before to have a framework-agnostic approach for datasets.
"""
test_dataset = test_dataset.batch(self.args.eval_batch_size)
test_dataset = self.args.strategy.experimental_distribute_dataset(test_dataset)
return self._prediction_loop(test_dataset, description="Prediction")
def save_model(self) -> None:
"""
Save the pretrained model and create a Tensorflow saved model.
"""
logger.info("Saving model in {}".format(self.args.output_dir))
path = os.path.join(self.args.output_dir, "saved_model")
logger.info("Saving model in {}".format(path))
os.makedirs(path, exist_ok=True)
self.model.save_pretrained(self.args.output_dir)
| src/transformers/trainer_tf.py | 17,412 | Accumulates the gradients across all the replica.
Accumulates the gradients from the next element in dataset.
Applies the gradients (cross-replica).
Create a checkpoint manager in order to be able to make the training
fault-tolerant.
Args:
max_to_keep: the maximum number of checkpoints to keep in the checkpoint path.
load_model: if we want to start the training from the latest checkpoint.
Create the training optimizer with its name. Allowed names are those listed
in the Tensorflow documentation and those contained in the transformers library.
Create a summary writer to be able to read the logs in Tensorboard.
One step evaluation across replica.
Args:
per_replica_features: the batched features.
per_replica_labels: the batched labels.
Returns:
The loss corresponding to the given batch.
Forwards a training example and accumulates the gradients.
Prepare the training, validation and test data.
Computes the loss of the given features and labels pair.
Args:
features: the batched features.
labels: the batched labels.
training: run the model in training mode or not
Create the training loss and metric with their name. Allowed names are those listed
in the Tensorflow documentation and those contained in the transformers library.
Setup the different steps to train a model:
- check if all the data are given
- create the proper strategy
- create the features
- prepare the model settings
Applies gradients and resets accumulation.
Returns a generator over training steps (i.e. parameters update).
Prediction/evaluation loop, shared by `evaluate()` and `predict()`.
Run prediction and return predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in evaluate().
Args:
test_dataset: something similar to a PT Dataset. This is just
temporary before to have a framework-agnostic approach for datasets.
Save the pretrained model and create a Tensorflow saved model.
Train method to train the model.
Tensorflow trainer class.
something similar to a PT Dataset. This is just temporary before to have a framework-agnostic approach for datasets. This is for the case where the optimizer is not Adam-like such as SGD | 2,265 | en | 0.859607 |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Unselected(_BaseTraceHierarchyType):
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.unselected.Marker
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
color
Sets the marker color of unselected points,
applied only when a selection exists.
opacity
Sets the marker opacity of unselected points,
applied only when a selection exists.
size
Sets the marker size of unselected points,
applied only when a selection exists.
Returns
-------
plotly.graph_objs.scattergeo.unselected.Marker
"""
return self['marker']
@marker.setter
def marker(self, val):
self['marker'] = val
# textfont
# --------
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.unselected.Textfont
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
Sets the text font color of unselected points,
applied only when a selection exists.
Returns
-------
plotly.graph_objs.scattergeo.unselected.Textfont
"""
return self['textfont']
@textfont.setter
def textfont(self, val):
self['textfont'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
marker
plotly.graph_objs.scattergeo.unselected.Marker instance
or dict with compatible properties
textfont
plotly.graph_objs.scattergeo.unselected.Textfont
instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Unselected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Unselected
marker
plotly.graph_objs.scattergeo.unselected.Marker instance
or dict with compatible properties
textfont
plotly.graph_objs.scattergeo.unselected.Textfont
instance or dict with compatible properties
Returns
-------
Unselected
"""
super(Unselected, self).__init__('unselected')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Unselected
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Unselected"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (unselected as v_unselected)
# Initialize validators
# ---------------------
self._validators['marker'] = v_unselected.MarkerValidator()
self._validators['textfont'] = v_unselected.TextfontValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('marker', None)
self['marker'] = marker if marker is not None else _v
_v = arg.pop('textfont', None)
self['textfont'] = textfont if textfont is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['colorsrc']
@colorsrc.setter
def colorsrc(self, val):
self['colorsrc'] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self['family']
@family.setter
def family(self, val):
self['family'] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['familysrc']
@familysrc.setter
def familysrc(self, val):
self['familysrc'] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self['size']
@size.setter
def size(self, val):
self['size'] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['sizesrc']
@sizesrc.setter
def sizesrc(self, val):
self['sizesrc'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Textfont object
Sets the text font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Textfont
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Textfont
"""
super(Textfont, self).__init__('textfont')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Textfont
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Textfont"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (textfont as v_textfont)
# Initialize validators
# ---------------------
self._validators['color'] = v_textfont.ColorValidator()
self._validators['colorsrc'] = v_textfont.ColorsrcValidator()
self._validators['family'] = v_textfont.FamilyValidator()
self._validators['familysrc'] = v_textfont.FamilysrcValidator()
self._validators['size'] = v_textfont.SizeValidator()
self._validators['sizesrc'] = v_textfont.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('color', None)
self['color'] = color if color is not None else _v
_v = arg.pop('colorsrc', None)
self['colorsrc'] = colorsrc if colorsrc is not None else _v
_v = arg.pop('family', None)
self['family'] = family if family is not None else _v
_v = arg.pop('familysrc', None)
self['familysrc'] = familysrc if familysrc is not None else _v
_v = arg.pop('size', None)
self['size'] = size if size is not None else _v
_v = arg.pop('sizesrc', None)
self['sizesrc'] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self['maxpoints']
@maxpoints.setter
def maxpoints(self, val):
self['maxpoints'] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://plot.ly/settings for more details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self['token']
@token.setter
def token(self, val):
self['token'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Stream
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
Returns
-------
Stream
"""
super(Stream, self).__init__('stream')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Stream
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Stream"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (stream as v_stream)
# Initialize validators
# ---------------------
self._validators['maxpoints'] = v_stream.MaxpointsValidator()
self._validators['token'] = v_stream.TokenValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('maxpoints', None)
self['maxpoints'] = maxpoints if maxpoints is not None else _v
_v = arg.pop('token', None)
self['token'] = token if token is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Selected(_BaseTraceHierarchyType):
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.selected.Marker
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
color
Sets the marker color of selected points.
opacity
Sets the marker opacity of selected points.
size
Sets the marker size of selected points.
Returns
-------
plotly.graph_objs.scattergeo.selected.Marker
"""
return self['marker']
@marker.setter
def marker(self, val):
self['marker'] = val
# textfont
# --------
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.selected.Textfont
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
Sets the text font color of selected points.
Returns
-------
plotly.graph_objs.scattergeo.selected.Textfont
"""
return self['textfont']
@textfont.setter
def textfont(self, val):
self['textfont'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
marker
plotly.graph_objs.scattergeo.selected.Marker instance
or dict with compatible properties
textfont
plotly.graph_objs.scattergeo.selected.Textfont instance
or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Selected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Selected
marker
plotly.graph_objs.scattergeo.selected.Marker instance
or dict with compatible properties
textfont
plotly.graph_objs.scattergeo.selected.Textfont instance
or dict with compatible properties
Returns
-------
Selected
"""
super(Selected, self).__init__('selected')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Selected
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Selected"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (selected as v_selected)
# Initialize validators
# ---------------------
self._validators['marker'] = v_selected.MarkerValidator()
self._validators['textfont'] = v_selected.TextfontValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('marker', None)
self['marker'] = marker if marker is not None else _v
_v = arg.pop('textfont', None)
self['textfont'] = textfont if textfont is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in `marker.color`is
set to a numerical array. In case `colorscale` is unspecified
or `autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color` array are
all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['autocolorscale']
@autocolorscale.setter
def autocolorscale(self, val):
self['autocolorscale'] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.color`) or the
bounds set in `marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical array. Defaults
to `false` when `marker.cmin` and `marker.cmax` are set by the
user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['cauto']
@cauto.setter
def cauto(self, val):
self['cauto'] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmin`
must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self['cmax']
@cmax.setter
def cmax(self, val):
self['cmax'] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `marker.cmin`
and/or `marker.cmax` to be equidistant to this point. Has an
effect only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`. Has no
effect when `marker.cauto` is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self['cmid']
@cmid.setter
def cmid(self, val):
self['cmid'] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmax`
must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self['cmin']
@cmin.setter
def cmin(self, val):
self['cmin'] = val
# color
# -----
@property
def color(self):
"""
Sets themarkercolor. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A number that will be interpreted as a color
according to scattergeo.marker.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self['coloraxis']
@coloraxis.setter
def coloraxis(self, val):
self['coloraxis'] = val
# colorbar
# --------
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.marker.ColorBar
- A dict of string/value properties that will be passed
to the ColorBar constructor
Supported dict properties:
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see: h
ttps://github.com/d3/d3-format/blob/master/READ
ME.md#locale_format And for dates see:
https://github.com/d3/d3-time-
format/blob/master/README.md#locale_format We
add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
plotly.graph_objs.scattergeo.marker.colorbar.Ti
ckformatstop instance or dict with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.scattergeo.marker.colorbar.tickformatstopdefa
ults), sets the default property values to use
for elements of
scattergeo.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on plot.ly for
ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for
tickvals .
tickwidth
Sets the tick width (in px).
title
plotly.graph_objs.scattergeo.marker.colorbar.Ti
tle instance or dict with compatible properties
titlefont
Deprecated: Please use
scattergeo.marker.colorbar.title.font instead.
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
scattergeo.marker.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
plotly.graph_objs.scattergeo.marker.ColorBar
"""
return self['colorbar']
@colorbar.setter
def colorbar(self, val):
self['colorbar'] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in `marker.color`is
set to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At minimum, a mapping for
the lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively, `colorscale`
may be a palette name string of the following list: Greys,YlGnB
u,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland
,Jet,Hot,Blackbody,Earth,Electric,Viridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['Greys', 'YlGnBu', 'Greens', 'YlOrRd', 'Bluered', 'RdBu',
'Reds', 'Blues', 'Picnic', 'Rainbow', 'Portland', 'Jet',
'Hot', 'Blackbody', 'Earth', 'Electric', 'Viridis', 'Cividis']
Returns
-------
str
"""
return self['colorscale']
@colorscale.setter
def colorscale(self, val):
self['colorscale'] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['colorsrc']
@colorsrc.setter
def colorsrc(self, val):
self['colorsrc'] = val
# gradient
# --------
@property
def gradient(self):
"""
The 'gradient' property is an instance of Gradient
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.marker.Gradient
- A dict of string/value properties that will be passed
to the Gradient constructor
Supported dict properties:
color
Sets the final color of the gradient fill: the
center color for radial, the right for
horizontal, or the bottom for vertical.
colorsrc
Sets the source reference on plot.ly for color
.
type
Sets the type of gradient used to fill the
markers
typesrc
Sets the source reference on plot.ly for type
.
Returns
-------
plotly.graph_objs.scattergeo.marker.Gradient
"""
return self['gradient']
@gradient.setter
def gradient(self, val):
self['gradient'] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.marker.Line
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.line.colorscale`. Has an
effect only if in `marker.line.color`is set to
a numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has
an effect only if in `marker.line.color`is set
to a numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are
set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.line.cmin` and/or
`marker.line.cmax` to be equidistant to this
point. Has an effect only if in
`marker.line.color`is set to a numerical array.
Value should have the same units as in
`marker.line.color`. Has no effect when
`marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
color
Sets themarker.linecolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if
set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array.
The colorscale must be an array containing
arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color
space, use`marker.line.cmin` and
`marker.line.cmax`. Alternatively, `colorscale`
may be a palette name string of the following
list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,R
eds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Black
body,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color
.
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.line.color`is set to
a numerical array. If true, `marker.line.cmin`
will correspond to the last color in the array
and `marker.line.cmax` will correspond to the
first color.
width
Sets the width (in px) of the lines bounding
the marker points.
widthsrc
Sets the source reference on plot.ly for width
.
Returns
-------
plotly.graph_objs.scattergeo.marker.Line
"""
return self['line']
@line.setter
def line(self, val):
self['line'] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the marker opacity.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self['opacity']
@opacity.setter
def opacity(self, val):
self['opacity'] = val
# opacitysrc
# ----------
@property
def opacitysrc(self):
"""
Sets the source reference on plot.ly for opacity .
The 'opacitysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['opacitysrc']
@opacitysrc.setter
def opacitysrc(self, val):
self['opacitysrc'] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.color`is set to a numerical array. If true,
`marker.cmin` will correspond to the last color in the array
and `marker.cmax` will correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['reversescale']
@reversescale.setter
def reversescale(self, val):
self['reversescale'] = val
# showscale
# ---------
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace. Has an effect only if in `marker.color`is set to a
numerical array.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['showscale']
@showscale.setter
def showscale(self, val):
self['showscale'] = val
# size
# ----
@property
def size(self):
"""
Sets the marker size (in px).
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self['size']
@size.setter
def size(self, val):
self['size'] = val
# sizemin
# -------
@property
def sizemin(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the minimum size (in px) of the rendered marker
points.
The 'sizemin' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['sizemin']
@sizemin.setter
def sizemin(self, val):
self['sizemin'] = val
# sizemode
# --------
@property
def sizemode(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the rule for which the data in `size` is converted
to pixels.
The 'sizemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['diameter', 'area']
Returns
-------
Any
"""
return self['sizemode']
@sizemode.setter
def sizemode(self, val):
self['sizemode'] = val
# sizeref
# -------
@property
def sizeref(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the scale factor used to determine the rendered
size of marker points. Use with `sizemin` and `sizemode`.
The 'sizeref' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self['sizeref']
@sizeref.setter
def sizeref(self, val):
self['sizeref'] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['sizesrc']
@sizesrc.setter
def sizesrc(self, val):
self['sizesrc'] = val
# symbol
# ------
@property
def symbol(self):
"""
Sets the marker symbol type. Adding 100 is equivalent to
appending "-open" to a symbol name. Adding 200 is equivalent to
appending "-dot" to a symbol name. Adding 300 is equivalent to
appending "-open-dot" or "dot-open" to a symbol name.
The 'symbol' property is an enumeration that may be specified as:
- One of the following enumeration values:
[0, 'circle', 100, 'circle-open', 200, 'circle-dot', 300,
'circle-open-dot', 1, 'square', 101, 'square-open', 201,
'square-dot', 301, 'square-open-dot', 2, 'diamond', 102,
'diamond-open', 202, 'diamond-dot', 302,
'diamond-open-dot', 3, 'cross', 103, 'cross-open', 203,
'cross-dot', 303, 'cross-open-dot', 4, 'x', 104, 'x-open',
204, 'x-dot', 304, 'x-open-dot', 5, 'triangle-up', 105,
'triangle-up-open', 205, 'triangle-up-dot', 305,
'triangle-up-open-dot', 6, 'triangle-down', 106,
'triangle-down-open', 206, 'triangle-down-dot', 306,
'triangle-down-open-dot', 7, 'triangle-left', 107,
'triangle-left-open', 207, 'triangle-left-dot', 307,
'triangle-left-open-dot', 8, 'triangle-right', 108,
'triangle-right-open', 208, 'triangle-right-dot', 308,
'triangle-right-open-dot', 9, 'triangle-ne', 109,
'triangle-ne-open', 209, 'triangle-ne-dot', 309,
'triangle-ne-open-dot', 10, 'triangle-se', 110,
'triangle-se-open', 210, 'triangle-se-dot', 310,
'triangle-se-open-dot', 11, 'triangle-sw', 111,
'triangle-sw-open', 211, 'triangle-sw-dot', 311,
'triangle-sw-open-dot', 12, 'triangle-nw', 112,
'triangle-nw-open', 212, 'triangle-nw-dot', 312,
'triangle-nw-open-dot', 13, 'pentagon', 113,
'pentagon-open', 213, 'pentagon-dot', 313,
'pentagon-open-dot', 14, 'hexagon', 114, 'hexagon-open',
214, 'hexagon-dot', 314, 'hexagon-open-dot', 15,
'hexagon2', 115, 'hexagon2-open', 215, 'hexagon2-dot',
315, 'hexagon2-open-dot', 16, 'octagon', 116,
'octagon-open', 216, 'octagon-dot', 316,
'octagon-open-dot', 17, 'star', 117, 'star-open', 217,
'star-dot', 317, 'star-open-dot', 18, 'hexagram', 118,
'hexagram-open', 218, 'hexagram-dot', 318,
'hexagram-open-dot', 19, 'star-triangle-up', 119,
'star-triangle-up-open', 219, 'star-triangle-up-dot', 319,
'star-triangle-up-open-dot', 20, 'star-triangle-down',
120, 'star-triangle-down-open', 220,
'star-triangle-down-dot', 320,
'star-triangle-down-open-dot', 21, 'star-square', 121,
'star-square-open', 221, 'star-square-dot', 321,
'star-square-open-dot', 22, 'star-diamond', 122,
'star-diamond-open', 222, 'star-diamond-dot', 322,
'star-diamond-open-dot', 23, 'diamond-tall', 123,
'diamond-tall-open', 223, 'diamond-tall-dot', 323,
'diamond-tall-open-dot', 24, 'diamond-wide', 124,
'diamond-wide-open', 224, 'diamond-wide-dot', 324,
'diamond-wide-open-dot', 25, 'hourglass', 125,
'hourglass-open', 26, 'bowtie', 126, 'bowtie-open', 27,
'circle-cross', 127, 'circle-cross-open', 28, 'circle-x',
128, 'circle-x-open', 29, 'square-cross', 129,
'square-cross-open', 30, 'square-x', 130, 'square-x-open',
31, 'diamond-cross', 131, 'diamond-cross-open', 32,
'diamond-x', 132, 'diamond-x-open', 33, 'cross-thin', 133,
'cross-thin-open', 34, 'x-thin', 134, 'x-thin-open', 35,
'asterisk', 135, 'asterisk-open', 36, 'hash', 136,
'hash-open', 236, 'hash-dot', 336, 'hash-open-dot', 37,
'y-up', 137, 'y-up-open', 38, 'y-down', 138,
'y-down-open', 39, 'y-left', 139, 'y-left-open', 40,
'y-right', 140, 'y-right-open', 41, 'line-ew', 141,
'line-ew-open', 42, 'line-ns', 142, 'line-ns-open', 43,
'line-ne', 143, 'line-ne-open', 44, 'line-nw', 144,
'line-nw-open']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self['symbol']
@symbol.setter
def symbol(self, val):
self['symbol'] = val
# symbolsrc
# ---------
@property
def symbolsrc(self):
"""
Sets the source reference on plot.ly for symbol .
The 'symbolsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['symbolsrc']
@symbolsrc.setter
def symbolsrc(self, val):
self['symbolsrc'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
plotly.graph_objs.scattergeo.marker.ColorBar instance
or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color .
gradient
plotly.graph_objs.scattergeo.marker.Gradient instance
or dict with compatible properties
line
plotly.graph_objs.scattergeo.marker.Line instance or
dict with compatible properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on plot.ly for opacity .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on plot.ly for size .
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on plot.ly for symbol .
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorbar=None,
colorscale=None,
colorsrc=None,
gradient=None,
line=None,
opacity=None,
opacitysrc=None,
reversescale=None,
showscale=None,
size=None,
sizemin=None,
sizemode=None,
sizeref=None,
sizesrc=None,
symbol=None,
symbolsrc=None,
**kwargs
):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Marker
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
plotly.graph_objs.scattergeo.marker.ColorBar instance
or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color .
gradient
plotly.graph_objs.scattergeo.marker.Gradient instance
or dict with compatible properties
line
plotly.graph_objs.scattergeo.marker.Line instance or
dict with compatible properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on plot.ly for opacity .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on plot.ly for size .
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on plot.ly for symbol .
Returns
-------
Marker
"""
super(Marker, self).__init__('marker')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Marker
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Marker"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (marker as v_marker)
# Initialize validators
# ---------------------
self._validators['autocolorscale'] = v_marker.AutocolorscaleValidator()
self._validators['cauto'] = v_marker.CautoValidator()
self._validators['cmax'] = v_marker.CmaxValidator()
self._validators['cmid'] = v_marker.CmidValidator()
self._validators['cmin'] = v_marker.CminValidator()
self._validators['color'] = v_marker.ColorValidator()
self._validators['coloraxis'] = v_marker.ColoraxisValidator()
self._validators['colorbar'] = v_marker.ColorBarValidator()
self._validators['colorscale'] = v_marker.ColorscaleValidator()
self._validators['colorsrc'] = v_marker.ColorsrcValidator()
self._validators['gradient'] = v_marker.GradientValidator()
self._validators['line'] = v_marker.LineValidator()
self._validators['opacity'] = v_marker.OpacityValidator()
self._validators['opacitysrc'] = v_marker.OpacitysrcValidator()
self._validators['reversescale'] = v_marker.ReversescaleValidator()
self._validators['showscale'] = v_marker.ShowscaleValidator()
self._validators['size'] = v_marker.SizeValidator()
self._validators['sizemin'] = v_marker.SizeminValidator()
self._validators['sizemode'] = v_marker.SizemodeValidator()
self._validators['sizeref'] = v_marker.SizerefValidator()
self._validators['sizesrc'] = v_marker.SizesrcValidator()
self._validators['symbol'] = v_marker.SymbolValidator()
self._validators['symbolsrc'] = v_marker.SymbolsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('autocolorscale', None)
self['autocolorscale'
] = autocolorscale if autocolorscale is not None else _v
_v = arg.pop('cauto', None)
self['cauto'] = cauto if cauto is not None else _v
_v = arg.pop('cmax', None)
self['cmax'] = cmax if cmax is not None else _v
_v = arg.pop('cmid', None)
self['cmid'] = cmid if cmid is not None else _v
_v = arg.pop('cmin', None)
self['cmin'] = cmin if cmin is not None else _v
_v = arg.pop('color', None)
self['color'] = color if color is not None else _v
_v = arg.pop('coloraxis', None)
self['coloraxis'] = coloraxis if coloraxis is not None else _v
_v = arg.pop('colorbar', None)
self['colorbar'] = colorbar if colorbar is not None else _v
_v = arg.pop('colorscale', None)
self['colorscale'] = colorscale if colorscale is not None else _v
_v = arg.pop('colorsrc', None)
self['colorsrc'] = colorsrc if colorsrc is not None else _v
_v = arg.pop('gradient', None)
self['gradient'] = gradient if gradient is not None else _v
_v = arg.pop('line', None)
self['line'] = line if line is not None else _v
_v = arg.pop('opacity', None)
self['opacity'] = opacity if opacity is not None else _v
_v = arg.pop('opacitysrc', None)
self['opacitysrc'] = opacitysrc if opacitysrc is not None else _v
_v = arg.pop('reversescale', None)
self['reversescale'] = reversescale if reversescale is not None else _v
_v = arg.pop('showscale', None)
self['showscale'] = showscale if showscale is not None else _v
_v = arg.pop('size', None)
self['size'] = size if size is not None else _v
_v = arg.pop('sizemin', None)
self['sizemin'] = sizemin if sizemin is not None else _v
_v = arg.pop('sizemode', None)
self['sizemode'] = sizemode if sizemode is not None else _v
_v = arg.pop('sizeref', None)
self['sizeref'] = sizeref if sizeref is not None else _v
_v = arg.pop('sizesrc', None)
self['sizesrc'] = sizesrc if sizesrc is not None else _v
_v = arg.pop('symbol', None)
self['symbol'] = symbol if symbol is not None else _v
_v = arg.pop('symbolsrc', None)
self['symbolsrc'] = symbolsrc if symbolsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
Sets the line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# dash
# ----
@property
def dash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self['dash']
@dash.setter
def dash(self, val):
self['dash'] = val
# width
# -----
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['width']
@width.setter
def width(self, val):
self['width'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
"""
def __init__(self, arg=None, color=None, dash=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Line
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
Returns
-------
Line
"""
super(Line, self).__init__('line')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Line
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Line"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (line as v_line)
# Initialize validators
# ---------------------
self._validators['color'] = v_line.ColorValidator()
self._validators['dash'] = v_line.DashValidator()
self._validators['width'] = v_line.WidthValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('color', None)
self['color'] = color if color is not None else _v
_v = arg.pop('dash', None)
self['dash'] = dash if dash is not None else _v
_v = arg.pop('width', None)
self['width'] = width if width is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self['align']
@align.setter
def align(self, val):
self['align'] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on plot.ly for align .
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['alignsrc']
@alignsrc.setter
def alignsrc(self, val):
self['alignsrc'] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self['bgcolor']
@bgcolor.setter
def bgcolor(self, val):
self['bgcolor'] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on plot.ly for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['bgcolorsrc']
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self['bgcolorsrc'] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self['bordercolor']
@bordercolor.setter
def bordercolor(self, val):
self['bordercolor'] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on plot.ly for bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['bordercolorsrc']
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self['bordercolorsrc'] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.hoverlabel.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
Returns
-------
plotly.graph_objs.scattergeo.hoverlabel.Font
"""
return self['font']
@font.setter
def font(self, val):
self['font'] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self['namelength']
@namelength.setter
def namelength(self, val):
self['namelength'] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on plot.ly for namelength .
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['namelengthsrc']
@namelengthsrc.setter
def namelengthsrc(self, val):
self['namelengthsrc'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'scattergeo'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Hoverlabel
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__('hoverlabel')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergeo.Hoverlabel
constructor must be a dict or
an instance of plotly.graph_objs.scattergeo.Hoverlabel"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.scattergeo import (hoverlabel as v_hoverlabel)
# Initialize validators
# ---------------------
self._validators['align'] = v_hoverlabel.AlignValidator()
self._validators['alignsrc'] = v_hoverlabel.AlignsrcValidator()
self._validators['bgcolor'] = v_hoverlabel.BgcolorValidator()
self._validators['bgcolorsrc'] = v_hoverlabel.BgcolorsrcValidator()
self._validators['bordercolor'] = v_hoverlabel.BordercolorValidator()
self._validators['bordercolorsrc'
] = v_hoverlabel.BordercolorsrcValidator()
self._validators['font'] = v_hoverlabel.FontValidator()
self._validators['namelength'] = v_hoverlabel.NamelengthValidator()
self._validators['namelengthsrc'
] = v_hoverlabel.NamelengthsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('align', None)
self['align'] = align if align is not None else _v
_v = arg.pop('alignsrc', None)
self['alignsrc'] = alignsrc if alignsrc is not None else _v
_v = arg.pop('bgcolor', None)
self['bgcolor'] = bgcolor if bgcolor is not None else _v
_v = arg.pop('bgcolorsrc', None)
self['bgcolorsrc'] = bgcolorsrc if bgcolorsrc is not None else _v
_v = arg.pop('bordercolor', None)
self['bordercolor'] = bordercolor if bordercolor is not None else _v
_v = arg.pop('bordercolorsrc', None)
self['bordercolorsrc'
] = bordercolorsrc if bordercolorsrc is not None else _v
_v = arg.pop('font', None)
self['font'] = font if font is not None else _v
_v = arg.pop('namelength', None)
self['namelength'] = namelength if namelength is not None else _v
_v = arg.pop('namelengthsrc', None)
self['namelengthsrc'
] = namelengthsrc if namelengthsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.graph_objs.scattergeo import unselected
from plotly.graph_objs.scattergeo import selected
from plotly.graph_objs.scattergeo import marker
from plotly.graph_objs.scattergeo import hoverlabel
| plotly/graph_objs/scattergeo/__init__.py | 104,192 | Construct a new Unselected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Unselected
marker
plotly.graph_objs.scattergeo.unselected.Marker instance
or dict with compatible properties
textfont
plotly.graph_objs.scattergeo.unselected.Textfont
instance or dict with compatible properties
Returns
-------
Unselected
Construct a new Textfont object
Sets the text font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Textfont
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Textfont
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Stream
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
Returns
-------
Stream
Construct a new Selected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Selected
marker
plotly.graph_objs.scattergeo.selected.Marker instance
or dict with compatible properties
textfont
plotly.graph_objs.scattergeo.selected.Textfont instance
or dict with compatible properties
Returns
-------
Selected
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Marker
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
plotly.graph_objs.scattergeo.marker.ColorBar instance
or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color .
gradient
plotly.graph_objs.scattergeo.marker.Gradient instance
or dict with compatible properties
line
plotly.graph_objs.scattergeo.marker.Line instance or
dict with compatible properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on plot.ly for opacity .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on plot.ly for size .
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on plot.ly for symbol .
Returns
-------
Marker
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Line
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
Returns
-------
Line
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scattergeo.Hoverlabel
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
Returns
-------
Hoverlabel
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
Sets the source reference on plot.ly for align .
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in `marker.color`is
set to a numerical array. In case `colorscale` is unspecified
or `autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color` array are
all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
Sets the source reference on plot.ly for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
Sets the source reference on plot.ly for bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.color`) or the
bounds set in `marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical array. Defaults
to `false` when `marker.cmin` and `marker.cmax` are set by the
user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
Sets the upper bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmin`
must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
Sets the mid-point of the color domain by scaling `marker.cmin`
and/or `marker.cmax` to be equidistant to this point. Has an
effect only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`. Has no
effect when `marker.cauto` is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
Sets the lower bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmax`
must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
Sets themarkercolor. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A number that will be interpreted as a color
according to scattergeo.marker.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
Sets the line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.marker.ColorBar
- A dict of string/value properties that will be passed
to the ColorBar constructor
Supported dict properties:
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see: h
ttps://github.com/d3/d3-format/blob/master/READ
ME.md#locale_format And for dates see:
https://github.com/d3/d3-time-
format/blob/master/README.md#locale_format We
add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
plotly.graph_objs.scattergeo.marker.colorbar.Ti
ckformatstop instance or dict with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.scattergeo.marker.colorbar.tickformatstopdefa
ults), sets the default property values to use
for elements of
scattergeo.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on plot.ly for
ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for
tickvals .
tickwidth
Sets the tick width (in px).
title
plotly.graph_objs.scattergeo.marker.colorbar.Ti
tle instance or dict with compatible properties
titlefont
Deprecated: Please use
scattergeo.marker.colorbar.title.font instead.
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
scattergeo.marker.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
plotly.graph_objs.scattergeo.marker.ColorBar
Sets the colorscale. Has an effect only if in `marker.color`is
set to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At minimum, a mapping for
the lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively, `colorscale`
may be a palette name string of the following list: Greys,YlGnB
u,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland
,Jet,Hot,Blackbody,Earth,Electric,Viridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['Greys', 'YlGnBu', 'Greens', 'YlOrRd', 'Bluered', 'RdBu',
'Reds', 'Blues', 'Picnic', 'Rainbow', 'Portland', 'Jet',
'Hot', 'Blackbody', 'Earth', 'Electric', 'Viridis', 'Cividis']
Returns
-------
str
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.hoverlabel.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
Returns
-------
plotly.graph_objs.scattergeo.hoverlabel.Font
The 'gradient' property is an instance of Gradient
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.marker.Gradient
- A dict of string/value properties that will be passed
to the Gradient constructor
Supported dict properties:
color
Sets the final color of the gradient fill: the
center color for radial, the right for
horizontal, or the bottom for vertical.
colorsrc
Sets the source reference on plot.ly for color
.
type
Sets the type of gradient used to fill the
markers
typesrc
Sets the source reference on plot.ly for type
.
Returns
-------
plotly.graph_objs.scattergeo.marker.Gradient
The 'line' property is an instance of Line
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.marker.Line
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.line.colorscale`. Has an
effect only if in `marker.line.color`is set to
a numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has
an effect only if in `marker.line.color`is set
to a numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are
set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.line.cmin` and/or
`marker.line.cmax` to be equidistant to this
point. Has an effect only if in
`marker.line.color`is set to a numerical array.
Value should have the same units as in
`marker.line.color`. Has no effect when
`marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
color
Sets themarker.linecolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if
set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array.
The colorscale must be an array containing
arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)', [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color
space, use`marker.line.cmin` and
`marker.line.cmax`. Alternatively, `colorscale`
may be a palette name string of the following
list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,R
eds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Black
body,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color
.
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.line.color`is set to
a numerical array. If true, `marker.line.cmin`
will correspond to the last color in the array
and `marker.line.cmax` will correspond to the
first color.
width
Sets the width (in px) of the lines bounding
the marker points.
widthsrc
Sets the source reference on plot.ly for width
.
Returns
-------
plotly.graph_objs.scattergeo.marker.Line
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.unselected.Marker
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
color
Sets the marker color of unselected points,
applied only when a selection exists.
opacity
Sets the marker opacity of unselected points,
applied only when a selection exists.
size
Sets the marker size of unselected points,
applied only when a selection exists.
Returns
-------
plotly.graph_objs.scattergeo.unselected.Marker
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.selected.Marker
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
color
Sets the marker color of selected points.
opacity
Sets the marker opacity of selected points.
size
Sets the marker size of selected points.
Returns
-------
plotly.graph_objs.scattergeo.selected.Marker
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
Sets the source reference on plot.ly for namelength .
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
Sets the marker opacity.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
Sets the source reference on plot.ly for opacity .
The 'opacitysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
Reverses the color mapping if true. Has an effect only if in
`marker.color`is set to a numerical array. If true,
`marker.cmin` will correspond to the last color in the array
and `marker.cmax` will correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
Determines whether or not a colorbar is displayed for this
trace. Has an effect only if in `marker.color`is set to a
numerical array.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
Sets the marker size (in px).
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
Has an effect only if `marker.size` is set to a numerical
array. Sets the minimum size (in px) of the rendered marker
points.
The 'sizemin' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
Has an effect only if `marker.size` is set to a numerical
array. Sets the rule for which the data in `size` is converted
to pixels.
The 'sizemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['diameter', 'area']
Returns
-------
Any
Has an effect only if `marker.size` is set to a numerical
array. Sets the scale factor used to determine the rendered
size of marker points. Use with `sizemin` and `sizemode`.
The 'sizeref' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
Sets the marker symbol type. Adding 100 is equivalent to
appending "-open" to a symbol name. Adding 200 is equivalent to
appending "-dot" to a symbol name. Adding 300 is equivalent to
appending "-open-dot" or "dot-open" to a symbol name.
The 'symbol' property is an enumeration that may be specified as:
- One of the following enumeration values:
[0, 'circle', 100, 'circle-open', 200, 'circle-dot', 300,
'circle-open-dot', 1, 'square', 101, 'square-open', 201,
'square-dot', 301, 'square-open-dot', 2, 'diamond', 102,
'diamond-open', 202, 'diamond-dot', 302,
'diamond-open-dot', 3, 'cross', 103, 'cross-open', 203,
'cross-dot', 303, 'cross-open-dot', 4, 'x', 104, 'x-open',
204, 'x-dot', 304, 'x-open-dot', 5, 'triangle-up', 105,
'triangle-up-open', 205, 'triangle-up-dot', 305,
'triangle-up-open-dot', 6, 'triangle-down', 106,
'triangle-down-open', 206, 'triangle-down-dot', 306,
'triangle-down-open-dot', 7, 'triangle-left', 107,
'triangle-left-open', 207, 'triangle-left-dot', 307,
'triangle-left-open-dot', 8, 'triangle-right', 108,
'triangle-right-open', 208, 'triangle-right-dot', 308,
'triangle-right-open-dot', 9, 'triangle-ne', 109,
'triangle-ne-open', 209, 'triangle-ne-dot', 309,
'triangle-ne-open-dot', 10, 'triangle-se', 110,
'triangle-se-open', 210, 'triangle-se-dot', 310,
'triangle-se-open-dot', 11, 'triangle-sw', 111,
'triangle-sw-open', 211, 'triangle-sw-dot', 311,
'triangle-sw-open-dot', 12, 'triangle-nw', 112,
'triangle-nw-open', 212, 'triangle-nw-dot', 312,
'triangle-nw-open-dot', 13, 'pentagon', 113,
'pentagon-open', 213, 'pentagon-dot', 313,
'pentagon-open-dot', 14, 'hexagon', 114, 'hexagon-open',
214, 'hexagon-dot', 314, 'hexagon-open-dot', 15,
'hexagon2', 115, 'hexagon2-open', 215, 'hexagon2-dot',
315, 'hexagon2-open-dot', 16, 'octagon', 116,
'octagon-open', 216, 'octagon-dot', 316,
'octagon-open-dot', 17, 'star', 117, 'star-open', 217,
'star-dot', 317, 'star-open-dot', 18, 'hexagram', 118,
'hexagram-open', 218, 'hexagram-dot', 318,
'hexagram-open-dot', 19, 'star-triangle-up', 119,
'star-triangle-up-open', 219, 'star-triangle-up-dot', 319,
'star-triangle-up-open-dot', 20, 'star-triangle-down',
120, 'star-triangle-down-open', 220,
'star-triangle-down-dot', 320,
'star-triangle-down-open-dot', 21, 'star-square', 121,
'star-square-open', 221, 'star-square-dot', 321,
'star-square-open-dot', 22, 'star-diamond', 122,
'star-diamond-open', 222, 'star-diamond-dot', 322,
'star-diamond-open-dot', 23, 'diamond-tall', 123,
'diamond-tall-open', 223, 'diamond-tall-dot', 323,
'diamond-tall-open-dot', 24, 'diamond-wide', 124,
'diamond-wide-open', 224, 'diamond-wide-dot', 324,
'diamond-wide-open-dot', 25, 'hourglass', 125,
'hourglass-open', 26, 'bowtie', 126, 'bowtie-open', 27,
'circle-cross', 127, 'circle-cross-open', 28, 'circle-x',
128, 'circle-x-open', 29, 'square-cross', 129,
'square-cross-open', 30, 'square-x', 130, 'square-x-open',
31, 'diamond-cross', 131, 'diamond-cross-open', 32,
'diamond-x', 132, 'diamond-x-open', 33, 'cross-thin', 133,
'cross-thin-open', 34, 'x-thin', 134, 'x-thin-open', 35,
'asterisk', 135, 'asterisk-open', 36, 'hash', 136,
'hash-open', 236, 'hash-dot', 336, 'hash-open-dot', 37,
'y-up', 137, 'y-up-open', 38, 'y-down', 138,
'y-down-open', 39, 'y-left', 139, 'y-left-open', 40,
'y-right', 140, 'y-right-open', 41, 'line-ew', 141,
'line-ew-open', 42, 'line-ns', 142, 'line-ns-open', 43,
'line-ne', 143, 'line-ne-open', 44, 'line-nw', 144,
'line-nw-open']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
Sets the source reference on plot.ly for symbol .
The 'symbolsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.unselected.Textfont
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
Sets the text font color of unselected points,
applied only when a selection exists.
Returns
-------
plotly.graph_objs.scattergeo.unselected.Textfont
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of plotly.graph_objs.scattergeo.selected.Textfont
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
Sets the text font color of selected points.
Returns
-------
plotly.graph_objs.scattergeo.selected.Textfont
The stream id number links a data trace on a plot with a
stream. See https://plot.ly/settings for more details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
marker ------ textfont -------- property parent name -------------------- Self properties description --------------------------- Validate arg ------------ Handle skip_invalid ------------------- Import validators ----------------- Initialize validators --------------------- Populate data dict with properties ---------------------------------- Process unknown kwargs ---------------------- Reset skip_invalid ------------------ color ----- colorsrc -------- family ------ familysrc --------- size ---- sizesrc ------- property parent name -------------------- Self properties description --------------------------- Validate arg ------------ Handle skip_invalid ------------------- Import validators ----------------- Initialize validators --------------------- Populate data dict with properties ---------------------------------- Process unknown kwargs ---------------------- Reset skip_invalid ------------------ maxpoints --------- token ----- property parent name -------------------- Self properties description --------------------------- Validate arg ------------ Handle skip_invalid ------------------- Import validators ----------------- Initialize validators --------------------- Populate data dict with properties ---------------------------------- Process unknown kwargs ---------------------- Reset skip_invalid ------------------ marker ------ textfont -------- property parent name -------------------- Self properties description --------------------------- Validate arg ------------ Handle skip_invalid ------------------- Import validators ----------------- Initialize validators --------------------- Populate data dict with properties ---------------------------------- Process unknown kwargs ---------------------- Reset skip_invalid ------------------ autocolorscale -------------- cauto ----- cmax ---- cmid ---- cmin ---- color ----- coloraxis --------- colorbar -------- colorscale ---------- colorsrc -------- gradient -------- line ---- opacity ------- opacitysrc ---------- reversescale ------------ showscale --------- size ---- sizemin ------- sizemode -------- sizeref ------- sizesrc ------- symbol ------ symbolsrc --------- property parent name -------------------- Self properties description --------------------------- Validate arg ------------ Handle skip_invalid ------------------- Import validators ----------------- Initialize validators --------------------- Populate data dict with properties ---------------------------------- Process unknown kwargs ---------------------- Reset skip_invalid ------------------ color ----- dash ---- width ----- property parent name -------------------- Self properties description --------------------------- Validate arg ------------ Handle skip_invalid ------------------- Import validators ----------------- Initialize validators --------------------- Populate data dict with properties ---------------------------------- Process unknown kwargs ---------------------- Reset skip_invalid ------------------ align ----- alignsrc -------- bgcolor ------- bgcolorsrc ---------- bordercolor ----------- bordercolorsrc -------------- font ---- namelength ---------- namelengthsrc ------------- property parent name -------------------- Self properties description --------------------------- Validate arg ------------ Handle skip_invalid ------------------- Import validators ----------------- Initialize validators --------------------- Populate data dict with properties ---------------------------------- Process unknown kwargs ---------------------- Reset skip_invalid ------------------ | 57,532 | en | 0.589549 |
"""MxShop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
# from django.contrib import admin
import xadmin
from MxShop.settings import MEDIA_ROOT
from django.views.static import serve
from rest_framework.documentation import include_docs_urls
from rest_framework.routers import DefaultRouter
from rest_framework.authtoken import views
from rest_framework_jwt.views import obtain_jwt_token
from goods.views import GoodsListViewSet, CategoryViewset, HotSearchsViewset, BannerViewset
from goods.views import IndexCategoryViewset
from users.views import SmsCodeViewset, UserViewset
from user_operation.views import UserFavViewset, LeavingMessageViewset, AddressViewset
from trade.views import ShoppingCartViewset, OrderViewset
router = DefaultRouter()
#配置goods的url
router.register(r'goods', GoodsListViewSet, base_name="goods")
#配置category的url
router.register(r'categorys', CategoryViewset, base_name="categorys")
router.register(r'codes', SmsCodeViewset, base_name="codes")
router.register(r'hotsearchs', HotSearchsViewset, base_name="hotsearchs")
router.register(r'users', UserViewset, base_name="users")
#收藏
router.register(r'userfavs', UserFavViewset, base_name="userfavs")
#留言
router.register(r'messages', LeavingMessageViewset, base_name="messages")
#收货地址
router.register(r'address', AddressViewset, base_name="address")
#购物车url
router.register(r'shopcarts', ShoppingCartViewset, base_name="shopcarts")
#订单相关url
router.register(r'orders', OrderViewset, base_name="orders")
#轮播图url
router.register(r'banners', BannerViewset, base_name="banners")
#首页商品系列数据
router.register(r'indexgoods', IndexCategoryViewset, base_name="indexgoods")
goods_list = GoodsListViewSet.as_view({
'get': 'list',
})
from trade.views import AlipayView
from django.views.generic import TemplateView
urlpatterns = [
url(r'^xadmin/', xadmin.site.urls),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^media/(?P<path>.*)$', serve, {"document_root": MEDIA_ROOT}),
url('', include('social_django.urls', namespace='social')),
url(r'^', include(router.urls)),
url(r'^index/', TemplateView.as_view(template_name="index.html"), name="index"),
url(r'docs/', include_docs_urls(title="慕学生鲜")),
#drf自带的token认证模式
url(r'^api-token-auth/', views.obtain_auth_token),
#jwt的认证接口
url(r'^login/', obtain_jwt_token),
url(r'^alipay/return/', AlipayView.as_view(), name="alipay"),
url(r'^ueditor/',include('DjangoUeditor.urls' )),
]
| MxShop/MxShop/urls.py | 3,226 | MxShop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
from django.contrib import admin配置goods的url配置category的url收藏留言收货地址购物车url订单相关url轮播图url首页商品系列数据drf自带的token认证模式jwt的认证接口 | 750 | en | 0.56614 |
from inspect import isclass
from django.conf import settings
from django.core.files.storage import get_storage_class
from celery.datastructures import AttributeDict
from tower import ugettext_lazy as _
__all__ = ('LOG', 'LOG_BY_ID', 'LOG_KEEP',)
class _LOG(object):
action_class = None
class CREATE_ADDON(_LOG):
id = 1
action_class = 'add'
format = _(u'{addon} was created.')
keep = True
class EDIT_PROPERTIES(_LOG):
""" Expects: addon """
id = 2
action_class = 'edit'
format = _(u'{addon} properties edited.')
class EDIT_DESCRIPTIONS(_LOG):
id = 3
action_class = 'edit'
format = _(u'{addon} description edited.')
class EDIT_CATEGORIES(_LOG):
id = 4
action_class = 'edit'
format = _(u'Categories edited for {addon}.')
class ADD_USER_WITH_ROLE(_LOG):
id = 5
action_class = 'add'
format = _(u'{0.name} ({1}) added to {addon}.')
keep = True
class REMOVE_USER_WITH_ROLE(_LOG):
id = 6
action_class = 'delete'
# L10n: {0} is the user being removed, {1} is their role.
format = _(u'{0.name} ({1}) removed from {addon}.')
keep = True
class EDIT_CONTRIBUTIONS(_LOG):
id = 7
action_class = 'edit'
format = _(u'Contributions for {addon}.')
class USER_DISABLE(_LOG):
id = 8
format = _(u'{addon} disabled.')
keep = True
class USER_ENABLE(_LOG):
id = 9
format = _(u'{addon} enabled.')
keep = True
# TODO(davedash): Log these types when pages are present
class SET_PUBLIC_STATS(_LOG):
id = 10
format = _(u'Stats set public for {addon}.')
keep = True
# TODO(davedash): Log these types when pages are present
class UNSET_PUBLIC_STATS(_LOG):
id = 11
format = _(u'{addon} stats set to private.')
keep = True
class CHANGE_STATUS(_LOG):
id = 12
# L10n: {0} is the status
format = _(u'{addon} status changed to {0}.')
keep = True
class ADD_PREVIEW(_LOG):
id = 13
action_class = 'add'
format = _(u'Preview added to {addon}.')
class EDIT_PREVIEW(_LOG):
id = 14
action_class = 'edit'
format = _(u'Preview edited for {addon}.')
class DELETE_PREVIEW(_LOG):
id = 15
action_class = 'delete'
format = _(u'Preview deleted from {addon}.')
class ADD_VERSION(_LOG):
id = 16
action_class = 'add'
format = _(u'{version} added to {addon}.')
keep = True
class EDIT_VERSION(_LOG):
id = 17
action_class = 'edit'
format = _(u'{version} edited for {addon}.')
class DELETE_VERSION(_LOG):
id = 18
action_class = 'delete'
# Note, {0} is a string not a version since the version is deleted.
# L10n: {0} is the version number
format = _(u'Version {0} deleted from {addon}.')
keep = True
class ADD_FILE_TO_VERSION(_LOG):
id = 19
action_class = 'add'
format = _(u'File {0.name} added to {version} of {addon}.')
class DELETE_FILE_FROM_VERSION(_LOG):
"""
Expecting: addon, filename, version
Because the file is being deleted, filename and version
should be strings and not the object.
"""
id = 20
action_class = 'delete'
format = _(u'File {0} deleted from {version} of {addon}.')
class APPROVE_VERSION(_LOG):
id = 21
action_class = 'approve'
format = _(u'{addon} {version} approved.')
short = _(u'Approved')
keep = True
review_email_user = True
review_queue = True
class PRELIMINARY_VERSION(_LOG):
id = 42
action_class = 'approve'
format = _(u'{addon} {version} given preliminary review.')
short = _(u'Preliminarily approved')
keep = True
review_email_user = True
review_queue = True
class REJECT_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 43
action_class = 'reject'
format = _(u'{addon} {version} rejected.')
short = _(u'Rejected')
keep = True
review_email_user = True
review_queue = True
class RETAIN_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 22
format = _(u'{addon} {version} retained.')
short = _(u'Retained')
keep = True
review_email_user = True
review_queue = True
class ESCALATE_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 23
format = _(u'{addon} {version} escalated.')
short = _(u'Escalated')
keep = True
review_email_user = True
review_queue = True
class REQUEST_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 24
format = _(u'{addon} {version} review requested.')
short = _(u'Review requested')
keep = True
review_email_user = True
review_queue = True
class REQUEST_INFORMATION(_LOG):
id = 44
format = _(u'{addon} {version} more information requested.')
short = _(u'More information requested')
keep = True
review_email_user = True
review_queue = True
class REQUEST_SUPER_REVIEW(_LOG):
id = 45
format = _(u'{addon} {version} super review requested.')
short = _(u'Super review requested')
keep = True
review_queue = True
class COMMENT_VERSION(_LOG):
id = 49
format = _(u'Comment on {addon} {version}.')
short = _(u'Comment')
keep = True
review_queue = True
hide_developer = True
class ADD_TAG(_LOG):
id = 25
action_class = 'tag'
format = _(u'{tag} added to {addon}.')
class REMOVE_TAG(_LOG):
id = 26
action_class = 'tag'
format = _(u'{tag} removed from {addon}.')
class ADD_TO_COLLECTION(_LOG):
id = 27
action_class = 'collection'
format = _(u'{addon} added to {collection}.')
class REMOVE_FROM_COLLECTION(_LOG):
id = 28
action_class = 'collection'
format = _(u'{addon} removed from {collection}.')
class ADD_REVIEW(_LOG):
id = 29
action_class = 'review'
format = _(u'{review} for {addon} written.')
# TODO(davedash): Add these when we do the admin site
class ADD_RECOMMENDED_CATEGORY(_LOG):
id = 31
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} featured in {0}.')
class REMOVE_RECOMMENDED_CATEGORY(_LOG):
id = 32
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} no longer featured in {0}.')
class ADD_RECOMMENDED(_LOG):
id = 33
format = _(u'{addon} is now featured.')
keep = True
class REMOVE_RECOMMENDED(_LOG):
id = 34
format = _(u'{addon} is no longer featured.')
keep = True
class ADD_APPVERSION(_LOG):
id = 35
action_class = 'add'
# L10n: {0} is the application, {1} is the version of the app
format = _(u'{0} {1} added.')
class CHANGE_USER_WITH_ROLE(_LOG):
""" Expects: author.user, role, addon """
id = 36
# L10n: {0} is a user, {1} is their role
format = _(u'{0.name} role changed to {1} for {addon}.')
keep = True
class CHANGE_LICENSE(_LOG):
""" Expects: license, addon """
id = 37
action_class = 'edit'
format = _(u'{addon} is now licensed under {0.name}.')
class CHANGE_POLICY(_LOG):
id = 38
action_class = 'edit'
format = _(u'{addon} policy changed.')
class CHANGE_ICON(_LOG):
id = 39
action_class = 'edit'
format = _(u'{addon} icon changed.')
class APPROVE_REVIEW(_LOG):
id = 40
action_class = 'approve'
format = _(u'{review} for {addon} approved.')
editor_format = _(u'{user} approved {review} for {addon}.')
keep = True
editor_event = True
class DELETE_REVIEW(_LOG):
"""Requires review.id and add-on objects."""
id = 41
action_class = 'review'
format = _(u'Review {0} for {addon} deleted.')
editor_format = _(u'{user} deleted {0} for {addon}.')
keep = True
editor_event = True
class MAX_APPVERSION_UPDATED(_LOG):
id = 46
format = _(u'Application max version for {version} updated.')
class BULK_VALIDATION_EMAILED(_LOG):
id = 47
format = _(u'Authors emailed about compatibility of {version}.')
class BULK_VALIDATION_USER_EMAILED(_LOG):
id = 130
format = _(u'Email sent to Author about add-on compatibility.')
class CHANGE_PASSWORD(_LOG):
id = 48
format = _(u'Password changed.')
class MAKE_PREMIUM(_LOG):
id = 50
format = _(u'{addon} changed to premium.')
class MANIFEST_UPDATED(_LOG):
id = 52
format = _(u'{addon} manifest updated.')
class APPROVE_VERSION_WAITING(_LOG):
id = 53
action_class = 'approve'
format = _(u'{addon} {version} approved but waiting to be made public.')
short = _(u'Approved but waiting')
keep = True
review_email_user = True
review_queue = True
class PURCHASE_ADDON(_LOG):
id = 54
format = _(u'{addon} purchased.')
class INSTALL_ADDON(_LOG):
id = 55
format = _(u'{addon} installed.')
class REFUND_REQUESTED(_LOG):
id = 56
format = _(u'Refund requested for {addon}')
class REFUND_DECLINED(_LOG):
id = 57
format = _(u'Refund declined for {addon} for {0}.')
class REFUND_GRANTED(_LOG):
id = 58
format = _(u'Refund granted for {addon} for {0}.')
class REFUND_INSTANT(_LOG):
id = 59
format = _(u'Instant refund granted for {addon}.')
class USER_EDITED(_LOG):
id = 60
format = _(u'Account updated.')
class RECEIPT_CHECKED(_LOG):
id = 65
format = _(u'Valid receipt was checked for {addon}.')
class ESCALATION_CLEARED(_LOG):
id = 66
format = _(u'Escalation cleared for {addon}.')
short = _(u'Escalation cleared')
keep = True
review_queue = True
class APP_DISABLED(_LOG):
id = 67
format = _(u'{addon} disabled.')
short = _(u'App disabled')
keep = True
review_queue = True
class ESCALATED_HIGH_ABUSE(_LOG):
id = 68
format = _(u'{addon} escalated because of high number of abuse reports.')
short = _(u'High Abuse Reports')
keep = True
review_queue = True
class ESCALATED_HIGH_REFUNDS(_LOG):
id = 69
format = _(u'{addon} escalated because of high number of refund requests.')
short = _(u'High Refund Requests')
keep = True
review_queue = True
class REREVIEW_MANIFEST_CHANGE(_LOG):
id = 70
format = _(u'{addon} re-reviewed because of manifest change.')
short = _(u'Manifest Change')
keep = True
review_queue = True
class REREVIEW_PREMIUM_TYPE_UPGRADE(_LOG):
id = 71
format = _(u'{addon} re-reviewed because app upgraded premium type.')
short = _(u'Premium Type Upgrade')
keep = True
review_queue = True
class REREVIEW_CLEARED(_LOG):
id = 72
format = _(u'Re-review cleared for {addon}.')
short = _(u'Re-review cleared')
keep = True
review_queue = True
class ESCALATE_MANUAL(_LOG):
id = 73
format = _(u'{addon} escalated by reviewer.')
short = _(u'Reviewer escalation')
keep = True
review_queue = True
# TODO(robhudson): Escalation log for editor escalation..
class VIDEO_ERROR(_LOG):
id = 74
format = _(u'Video removed from {addon} because of a problem with '
'the video. ')
short = _(u'Video removed')
class REREVIEW_DEVICES_ADDED(_LOG):
id = 75
format = _(u'{addon} re-review because of new device(s) added.')
short = _(u'Device(s) Added')
keep = True
review_queue = True
class REVIEW_DEVICE_OVERRIDE(_LOG):
id = 76
format = _(u'{addon} device support manually changed by reviewer.')
short = _(u'Device(s) Changed by Reviewer')
keep = True
review_queue = True
class WEBAPP_RESUBMIT(_LOG):
id = 77
format = _(u'{addon} resubmitted for review.')
short = _(u'App Resubmission')
keep = True
review_queue = True
class ESCALATION_VIP_APP(_LOG):
id = 78
format = _(u'{addon} auto-escalated because its a VIP app.')
short = _(u'VIP auto-escalation')
keep = True
review_queue = True
class REREVIEW_MANIFEST_URL_CHANGE(_LOG):
id = 79
format = _(u'{addon} re-reviewed because of manifest URL change.')
short = _(u'Manifest URL Change')
keep = True
review_queue = True
class ESCALATION_PRERELEASE_APP(_LOG):
id = 80
format = _(u'{addon} auto-escalated because its a prerelease app.')
short = _(u'Prerelease auto-escalation')
keep = True
review_queue = True
class CUSTOM_TEXT(_LOG):
id = 98
format = '{0}'
class CUSTOM_HTML(_LOG):
id = 99
format = '{0}'
class OBJECT_ADDED(_LOG):
id = 100
format = _(u'Created: {0}.')
admin_event = True
class OBJECT_EDITED(_LOG):
id = 101
format = _(u'Edited field: {2} set to: {0}.')
admin_event = True
class OBJECT_DELETED(_LOG):
id = 102
format = _(u'Deleted: {1}.')
admin_event = True
class ADMIN_USER_EDITED(_LOG):
id = 103
format = _(u'User {user} edited, reason: {1}')
admin_event = True
class ADMIN_USER_ANONYMIZED(_LOG):
id = 104
format = _(u'User {user} anonymized.')
admin_event = True
class ADMIN_USER_RESTRICTED(_LOG):
id = 105
format = _(u'User {user} restricted.')
admin_event = True
class ADMIN_VIEWED_LOG(_LOG):
id = 106
format = _(u'Admin {0} viewed activity log for {user}.')
admin_event = True
class EDIT_REVIEW(_LOG):
id = 107
action_class = 'review'
format = _(u'{review} for {addon} updated.')
class THEME_REVIEW(_LOG):
id = 108
action_class = 'review'
format = _(u'{addon} reviewed.')
class GROUP_USER_ADDED(_LOG):
id = 120
action_class = 'access'
format = _(u'User {0.name} added to {group}.')
keep = True
admin_event = True
class GROUP_USER_REMOVED(_LOG):
id = 121
action_class = 'access'
format = _(u'User {0.name} removed from {group}.')
keep = True
admin_event = True
class REVIEW_FEATURES_OVERRIDE(_LOG):
id = 122
format = _(u'{addon} minimum requirements manually changed by reviewer.')
short = _(u'Requirements Changed by Reviewer')
keep = True
review_queue = True
class REREVIEW_FEATURES_CHANGED(_LOG):
id = 123
format = _(u'{addon} minimum requirements manually changed.')
short = _(u'Requirements Changed')
keep = True
review_queue = True
class CHANGE_VERSION_STATUS(_LOG):
id = 124
# L10n: {0} is the status
format = _(u'{version} status changed to {0}.')
keep = True
class DELETE_USER_LOOKUP(_LOG):
id = 125
# L10n: {0} is the status
format = _(u'User {0.name} {0.id} deleted via lookup tool.')
keep = True
class CONTENT_RATING_TO_ADULT(_LOG):
id = 126
format = _('{addon} content rating changed to Adult.')
review_queue = True
class CONTENT_RATING_CHANGED(_LOG):
id = 127
format = _('{addon} content rating changed.')
class PRIORITY_REVIEW_REQUESTED(_LOG):
id = 128
format = _(u'Priority review requested for {addon}.')
short = _(u'Priority Review')
keep = True
review_queue = True
LOGS = [x for x in vars().values()
if isclass(x) and issubclass(x, _LOG) and x != _LOG]
LOG_BY_ID = dict((l.id, l) for l in LOGS)
LOG = AttributeDict((l.__name__, l) for l in LOGS)
LOG_ADMINS = [l.id for l in LOGS if hasattr(l, 'admin_event')]
LOG_KEEP = [l.id for l in LOGS if hasattr(l, 'keep')]
LOG_EDITORS = [l.id for l in LOGS if hasattr(l, 'editor_event')]
LOG_REVIEW_QUEUE = [l.id for l in LOGS if hasattr(l, 'review_queue')]
# Is the user emailed the message?
LOG_REVIEW_EMAIL_USER = [l.id for l in LOGS if hasattr(l, 'review_email_user')]
# Logs *not* to show to the developer.
LOG_HIDE_DEVELOPER = [l.id for l in LOGS
if (getattr(l, 'hide_developer', False)
or l.id in LOG_ADMINS)]
def log(action, *args, **kw):
"""
e.g. amo.log(amo.LOG.CREATE_ADDON, []),
amo.log(amo.LOG.ADD_FILE_TO_VERSION, file, version)
"""
from amo import get_user, logger_log
from mkt.developers.models import (ActivityLog, ActivityLogAttachment,
AppLog, CommentLog, GroupLog, UserLog,
VersionLog)
from mkt.access.models import Group
from mkt.webapps.models import Webapp
from mkt.users.models import UserProfile
from mkt.versions.models import Version
user = kw.get('user', get_user())
if not user:
logger_log.warning('Activity log called with no user: %s' % action.id)
return
al = ActivityLog(user=user, action=action.id)
al.arguments = args
if 'details' in kw:
al.details = kw['details']
al.save()
if 'details' in kw and 'comments' in al.details:
CommentLog(comments=al.details['comments'], activity_log=al).save()
# TODO(davedash): post-remora this may not be necessary.
if 'created' in kw:
al.created = kw['created']
# Double save necessary since django resets the created date on save.
al.save()
if 'attachments' in kw:
formset = kw['attachments']
storage = get_storage_class()()
for form in formset:
data = form.cleaned_data
if 'attachment' in data:
attachment = data['attachment']
storage.save('%s/%s' % (settings.REVIEWER_ATTACHMENTS_PATH,
attachment.name), attachment)
ActivityLogAttachment(activity_log=al,
description=data['description'],
mimetype=attachment.content_type,
filepath=attachment.name).save()
for arg in args:
if isinstance(arg, tuple):
if arg[0] == Webapp:
AppLog(addon_id=arg[1], activity_log=al).save()
elif arg[0] == Version:
VersionLog(version_id=arg[1], activity_log=al).save()
elif arg[0] == UserProfile:
UserLog(user_id=arg[1], activity_log=al).save()
elif arg[0] == Group:
GroupLog(group_id=arg[1], activity_log=al).save()
# Webapp first since Webapp subclasses Addon.
if isinstance(arg, Webapp):
AppLog(addon=arg, activity_log=al).save()
elif isinstance(arg, Version):
VersionLog(version=arg, activity_log=al).save()
elif isinstance(arg, UserProfile):
# Index by any user who is mentioned as an argument.
UserLog(activity_log=al, user=arg).save()
elif isinstance(arg, Group):
GroupLog(group=arg, activity_log=al).save()
# Index by every user
UserLog(activity_log=al, user=user).save()
return al
| apps/amo/log.py | 18,434 | Expects: license, addon
Expects: author.user, role, addon
Expecting: addon, filename, version
Because the file is being deleted, filename and version
should be strings and not the object.
Requires review.id and add-on objects.
Expects: addon
e.g. amo.log(amo.LOG.CREATE_ADDON, []),
amo.log(amo.LOG.ADD_FILE_TO_VERSION, file, version)
L10n: {0} is the user being removed, {1} is their role. TODO(davedash): Log these types when pages are present TODO(davedash): Log these types when pages are present L10n: {0} is the status Note, {0} is a string not a version since the version is deleted. L10n: {0} is the version number takes add-on, version, reviewtype takes add-on, version, reviewtype takes add-on, version, reviewtype takes add-on, version, reviewtype TODO(davedash): Add these when we do the admin site L10n: {0} is a category name. L10n: {0} is a category name. L10n: {0} is the application, {1} is the version of the app L10n: {0} is a user, {1} is their role TODO(robhudson): Escalation log for editor escalation.. L10n: {0} is the status L10n: {0} is the status Is the user emailed the message? Logs *not* to show to the developer. TODO(davedash): post-remora this may not be necessary. Double save necessary since django resets the created date on save. Webapp first since Webapp subclasses Addon. Index by any user who is mentioned as an argument. Index by every user | 1,390 | en | 0.849065 |
# -*- coding: latin-1 -*-
#
# Turn off logging in extensions (too loud!)
from vb2py.test.testframework import *
import vb2py.extensions
import vb2py.utils
vb2py.extensions.disableLogging()
from vb2py.vbparser import buildParseTree, VBParserError
#
# Set some config options which are appropriate for testing
import vb2py.config
Config = vb2py.config.VB2PYConfig()
Config.setLocalOveride("General", "ReportPartialConversion", "No")
tests = []
# String methods
tests.extend([
'a = "hello".Length',
'a = ("hello").Length',
'a = ("hello" + "world").Length',
'a = ("hello" + "world").Length + 2',
])
# Expression calls
tests.extend([
'a = (a + b).Truncate(2)',
'(a + b).SendToDestination("email.com")',
'(a + b).SendToDestination',
])
tests.append(
"""
Function B()
Return 12
End Function
"""
)
tests.append((
"""
Public Class MyObject
Public Property A As Integer
Get
Return 10
End Get
Set(Value as Integer)
X = Value
End Set
End Property
End Class
"""
))
# VB.NET
tests.append("""
Class MyClass
A = 1
End Class
""")
# Decorated Class
tests.append("""
<Decorator.Thing()> Class MyClass
A = 1
End Class
""")
tests.append("""
<Decorator.Thing()> _
Class MyClass
A = 1
End Class
""")
# Handlers
tests.append("""
Class MyClass
Public Sub DoIt() Handles Button.Click
End Sub
End Class
""")
# Shared methods
tests.append("""
Class MyClass
Public Shared Sub DoIt()
End Sub
Public Shared Function DoIt()
End Function
End Class
""")
tests.append("""
Module Digests
Public Const a = ""
End Module
""")
class ParsingTest(unittest.TestCase):
"""Holder class which gets built into a whole test case"""
def getTestMethod(vb):
"""Create a test method"""
def testMethod(self):
try:
buildParseTree(vb, dialect='vb.net')
except VBParserError:
raise Exception("Unable to parse ...\n%s" % vb)
return testMethod
# Add tests to main test class
for idx in range(len(tests)):
setattr(ParsingTest, "test%d" % idx, getTestMethod(tests[idx]))
if __name__ == "__main__":
main()
| vb2py/test/testdotnet.py | 2,212 | Holder class which gets built into a whole test case
Create a test method
-*- coding: latin-1 -*- Turn off logging in extensions (too loud!) Set some config options which are appropriate for testing String methods Expression calls VB.NET Decorated Class Handlers Shared methods Add tests to main test class | 308 | en | 0.830207 |
# -*- coding: utf-8 -*-
"""
pyvisa-py.protocols.usbtmc
~~~~~~~~~~~~~~~~~~~~~~~~~~
Implements Session to control USBTMC instruments
Loosely based on PyUSBTMC:python module to handle USB-TMC(Test and
Measurement class) devices.
by Noboru Yamamot, Accl. Lab, KEK, JAPAN
This file is an offspring of the Lantz Project.
:copyright: 2014-2018 by PyVISA-py Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from __future__ import (division, unicode_literals, print_function,
absolute_import)
import enum
from pyvisa.compat import struct
import time
from collections import namedtuple
import warnings
import usb
from .usbutil import (find_devices, find_interfaces, find_endpoint,
usb_find_desc)
import sys
if sys.version_info < (3, 2):
def array_to_bytes(arr):
return arr.tostring()
else:
def array_to_bytes(arr):
return arr.tobytes()
class MsgID(enum.IntEnum):
"""From USB-TMC table2
"""
dev_dep_msg_out = 1
request_dev_dep_msg_in = 2
dev_dep_msg_in = 2
vendor_specific_out = 126
request_vendor_specific_in = 127
vendor_specific_in = 127
trigger = 128 # for USB488
class Request(enum.IntEnum):
initiate_abort_bulk_out = 1
check_abort_bulk_out_status = 2
initiate_abort_bulk_in = 3
check_abort_bulk_in_status = 4
initiate_clear = 5
check_clear_status = 6
get_capabilities = 7
indicator_pulse = 64
class UsbTmcStatus(enum.IntEnum):
success = 1
pending = 2
failed = 0x80
transfer_not_in_progress = 0x81
split_not_in_progress = 0x82
split_in_progress = 0x83
def find_tmc_devices(vendor=None, product=None, serial_number=None,
custom_match=None, **kwargs):
"""Find connected USBTMC devices. See usbutil.find_devices for more info.
"""
def is_usbtmc(dev):
if custom_match and not custom_match(dev):
return False
return bool(find_interfaces(dev, bInterfaceClass=0xfe,
bInterfaceSubClass=3))
return find_devices(vendor, product, serial_number, is_usbtmc, **kwargs)
class BulkOutMessage(object):
"""The Host uses the Bulk-OUT endpoint to send USBTMC command messages to
the device.
"""
@staticmethod
def build_array(btag, eom, chunk):
size = len(chunk)
return (struct.pack('BBBx', MsgID.dev_dep_msg_out, btag,
~btag & 0xFF) +
struct.pack("<LBxxx", size, eom) +
chunk +
b'\0' * ((4 - size) % 4))
class BulkInMessage(namedtuple('BulkInMessage', 'msgid btag btaginverse '
'transfer_size transfer_attributes data')):
"""The Host uses the Bulk-IN endpoint to read USBTMC response messages from
the device.
The Host must first send a USBTMC command message that expects a response
before attempting to read a USBTMC response message.
"""
@classmethod
def from_bytes(cls, data):
msgid, btag, btaginverse = struct.unpack_from('BBBx', data)
if msgid != MsgID.dev_dep_msg_in:
warnings.warn('Unexpected MsgID format. Consider updating the device\'s firmware. See https://github.com/pyvisa/pyvisa-py/issues/20')
return BulkInMessage.from_quirky(data)
transfer_size, transfer_attributes = struct.unpack_from('<LBxxx', data, 4)
# Truncate data to the specified length (discard padding).
data = data[12:12+transfer_size]
return cls(msgid, btag, btaginverse, transfer_size,
transfer_attributes, data)
@classmethod
def from_quirky(cls, data):
"""Constructs a correct response for quirky devices"""
msgid, btag, btaginverse = struct.unpack_from('BBBx', data)
data = data.rstrip(b'\x00')
# check whether it contains a ';' and if throw away the first 12 bytes
if ';' in str(data):
transfer_size, transfer_attributes = struct.unpack_from('<LBxxx', data, 4)
data = data[12:]
else:
transfer_size = 0
transfer_attributes = 1
return cls(msgid, btag, btaginverse, transfer_size, transfer_attributes, data)
@staticmethod
def build_array(btag, transfer_size, term_char=None):
"""
:param transfer_size:
:param btag:
:param term_char:
:return:
"""
if term_char is None:
transfer_attributes = 0
term_char = 0
else:
transfer_attributes = 2
return (struct.pack('BBBx', MsgID.request_dev_dep_msg_in, btag,
~btag & 0xFF) +
struct.pack("<LBBxx", transfer_size, transfer_attributes,
term_char))
class USBRaw(object):
"""Base class for drivers that communicate with instruments
via usb port using pyUSB
"""
#: Configuration number to be used. If None, the default will be used.
CONFIGURATION = None
#: Interface index it be used
INTERFACE = (0, 0)
#: Receive and Send endpoints to be used. If None the first IN (or OUT)
#: BULK endpoint will be used.
ENDPOINTS = (None, None)
find_devices = staticmethod(find_devices)
def __init__(self, vendor=None, product=None, serial_number=None,
device_filters=None, timeout=None, **kwargs):
super(USBRaw, self).__init__()
# Timeout expressed in ms as an integer and limited to 2**32-1
# If left to None pyusb will use its default value
self.timeout = timeout
device_filters = device_filters or {}
devices = list(self.find_devices(vendor, product, serial_number, None,
**device_filters))
if not devices:
raise ValueError('No device found.')
elif len(devices) > 1:
desc = '\n'.join(str(dev) for dev in devices)
raise ValueError('{} devices found:\n{}\nPlease narrow the search'
' criteria'.format(len(devices), desc))
self.usb_dev = devices[0]
try:
if self.usb_dev.is_kernel_driver_active(0):
self.usb_dev.detach_kernel_driver(0)
except (usb.core.USBError, NotImplementedError) as e:
pass
try:
self.usb_dev.set_configuration()
except usb.core.USBError as e:
raise Exception('failed to set configuration\n %s' % e)
try:
self.usb_dev.set_interface_altsetting()
except usb.core.USBError as e:
pass
self.usb_intf = self._find_interface(self.usb_dev, self.INTERFACE)
self.usb_recv_ep, self.usb_send_ep =\
self._find_endpoints(self.usb_intf, self.ENDPOINTS)
def _find_interface(self, dev, setting):
return self.usb_dev.get_active_configuration()[self.INTERFACE]
def _find_endpoints(self, interface, setting):
recv, send = setting
if recv is None:
recv = find_endpoint(interface, usb.ENDPOINT_IN,
usb.ENDPOINT_TYPE_BULK)
else:
recv = usb_find_desc(interface, bEndpointAddress=recv)
if send is None:
send = find_endpoint(interface, usb.ENDPOINT_OUT,
usb.ENDPOINT_TYPE_BULK)
else:
send = usb_find_desc(interface, bEndpointAddress=send)
return recv, send
def write(self, data):
"""Send raw bytes to the instrument.
:param data: bytes to be sent to the instrument
:type data: bytes
"""
try:
return self.usb_send_ep.write(data)
except usb.core.USBError as e:
raise ValueError(str(e))
def read(self, size):
"""Receive raw bytes to the instrument.
:param size: number of bytes to receive
:return: received bytes
:return type: bytes
"""
if size <= 0:
size = 1
data = array_to_bytes(self.usb_recv_ep.read(size, self.timeout))
return data
def close(self):
return usb.util.dispose_resources(self.usb_dev)
class USBTMC(USBRaw):
# Maximum number of bytes per transfer (for sending and receiving).
RECV_CHUNK = 1024 ** 2
find_devices = staticmethod(find_tmc_devices)
def __init__(self, vendor=None, product=None, serial_number=None,
**kwargs):
super(USBTMC, self).__init__(vendor, product, serial_number, **kwargs)
self.usb_intr_in = find_endpoint(self.usb_intf, usb.ENDPOINT_IN,
usb.ENDPOINT_TYPE_INTERRUPT)
self.usb_dev.reset()
self.usb_dev.set_configuration()
time.sleep(0.01)
self._get_capabilities()
self._btag = 0
if not (self.usb_recv_ep and self.usb_send_ep):
msg = "TMC device must have both Bulk-In and Bulk-out endpoints."
raise ValueError(msg)
def _get_capabilities(self):
self.usb_dev.ctrl_transfer(
usb.util.build_request_type(usb.util.CTRL_IN,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_INTERFACE),
Request.get_capabilities,
0x0000,
self.usb_intf.index,
0x0018,
timeout=self.timeout)
def _find_interface(self, dev, setting):
interfaces = find_interfaces(dev, bInterfaceClass=0xFE,
bInterfaceSubClass=3)
if not interfaces:
raise ValueError('USB TMC interface not found.')
elif len(interfaces) > 1:
pass
return interfaces[0]
def _abort_bulk_in(self, btag):
"""Request that the device abort a pending Bulk-IN operation."""
abort_timeout_ms = 5000
# Send INITIATE_ABORT_BULK_IN.
# According to USBTMC 1.00 4.2.1.4:
# wValue = bTag value of transfer to be aborted
# wIndex = Bulk-IN endpoint
# wLength = 0x0002 (length of device response)
data = self.usb_dev.ctrl_transfer(
usb.util.build_request_type(usb.util.CTRL_IN,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_ENDPOINT),
Request.initiate_abort_bulk_in,
btag,
self.usb_recv_ep.bEndpointAddress,
0x0002,
timeout=abort_timeout_ms)
if data[0] != UsbTmcStatus.success:
# Abort Bulk-IN failed. Ignore it.
return
# Read remaining data from Bulk-IN endpoint.
self.usb_recv_ep.read(self.RECV_CHUNK, abort_timeout_ms)
# Send CHECK_ABORT_BULK_IN_STATUS until it completes.
# According to USBTMC 1.00 4.2.1.5:
# wValue = 0x0000
# wIndex = Bulk-IN endpoint
# wLength = 0x0008 (length of device response)
for retry in range(100):
data = self.usb_dev.ctrl_transfer(
usb.util.build_request_type(usb.util.CTRL_IN,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_ENDPOINT),
Request.check_abort_bulk_in_status,
0x0000,
self.usb_recv_ep.bEndpointAddress,
0x0008,
timeout=abort_timeout_ms)
if data[0] != UsbTmcStatus.pending:
break
time.sleep(0.05)
def write(self, data):
"""Send raw bytes to the instrument.
:param data: bytes to be sent to the instrument
:type data: bytes
"""
begin, end, size = 0, 0, len(data)
bytes_sent = 0
raw_write = super(USBTMC, self).write
# Send all data via one or more Bulk-OUT transfers.
# Set the EOM flag on the last transfer only.
# Send at least one transfer (possibly empty).
while (end == 0) or (end < size):
begin, end = end, begin + self.RECV_CHUNK
self._btag = (self._btag % 255) + 1
eom = (end >= size)
data = BulkOutMessage.build_array(self._btag, eom, data[begin:end])
bytes_sent += raw_write(data)
return size
def read(self, size):
recv_chunk = self.RECV_CHUNK
if size > 0 and size < recv_chunk:
recv_chunk = size
header_size = 12
max_padding = 511
eom = False
raw_read = super(USBTMC, self).read
raw_write = super(USBTMC, self).write
received = bytearray()
while not eom:
self._btag = (self._btag % 255) + 1
req = BulkInMessage.build_array(self._btag, recv_chunk, None)
raw_write(req)
try:
resp = raw_read(recv_chunk + header_size + max_padding)
response = BulkInMessage.from_bytes(resp)
except (usb.core.USBError, ValueError):
# Abort failed Bulk-IN operation.
self._abort_bulk_in(self._btag)
raise
received.extend(response.data)
# Detect EOM only when device sends all expected bytes.
if len(response.data) >= response.transfer_size:
eom = response.transfer_attributes & 1
return bytes(received)
| pyvisa-py/protocols/usbtmc.py | 13,564 | The Host uses the Bulk-IN endpoint to read USBTMC response messages from
the device.
The Host must first send a USBTMC command message that expects a response
before attempting to read a USBTMC response message.
The Host uses the Bulk-OUT endpoint to send USBTMC command messages to
the device.
From USB-TMC table2
Base class for drivers that communicate with instruments
via usb port using pyUSB
Request that the device abort a pending Bulk-IN operation.
:param transfer_size:
:param btag:
:param term_char:
:return:
Find connected USBTMC devices. See usbutil.find_devices for more info.
Constructs a correct response for quirky devices
Receive raw bytes to the instrument.
:param size: number of bytes to receive
:return: received bytes
:return type: bytes
Send raw bytes to the instrument.
:param data: bytes to be sent to the instrument
:type data: bytes
Send raw bytes to the instrument.
:param data: bytes to be sent to the instrument
:type data: bytes
pyvisa-py.protocols.usbtmc
~~~~~~~~~~~~~~~~~~~~~~~~~~
Implements Session to control USBTMC instruments
Loosely based on PyUSBTMC:python module to handle USB-TMC(Test and
Measurement class) devices.
by Noboru Yamamot, Accl. Lab, KEK, JAPAN
This file is an offspring of the Lantz Project.
:copyright: 2014-2018 by PyVISA-py Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
-*- coding: utf-8 -*- for USB488 Truncate data to the specified length (discard padding). check whether it contains a ';' and if throw away the first 12 bytes: Configuration number to be used. If None, the default will be used.: Interface index it be used: Receive and Send endpoints to be used. If None the first IN (or OUT): BULK endpoint will be used. Timeout expressed in ms as an integer and limited to 2**32-1 If left to None pyusb will use its default value Maximum number of bytes per transfer (for sending and receiving). Send INITIATE_ABORT_BULK_IN. According to USBTMC 1.00 4.2.1.4: wValue = bTag value of transfer to be aborted wIndex = Bulk-IN endpoint wLength = 0x0002 (length of device response) Abort Bulk-IN failed. Ignore it. Read remaining data from Bulk-IN endpoint. Send CHECK_ABORT_BULK_IN_STATUS until it completes. According to USBTMC 1.00 4.2.1.5: wValue = 0x0000 wIndex = Bulk-IN endpoint wLength = 0x0008 (length of device response) Send all data via one or more Bulk-OUT transfers. Set the EOM flag on the last transfer only. Send at least one transfer (possibly empty). Abort failed Bulk-IN operation. Detect EOM only when device sends all expected bytes. | 2,583 | en | 0.797806 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_scale_io_volume_source import V1ScaleIOVolumeSource
class TestV1ScaleIOVolumeSource(unittest.TestCase):
""" V1ScaleIOVolumeSource unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ScaleIOVolumeSource(self):
"""
Test V1ScaleIOVolumeSource
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_scale_io_volume_source.V1ScaleIOVolumeSource()
pass
if __name__ == '__main__':
unittest.main()
| kubernetes/test/test_v1_scale_io_volume_source.py | 1,006 | V1ScaleIOVolumeSource unit test stubs
Test V1ScaleIOVolumeSource
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 FIXME: construct object with mandatory attributes with example valuesmodel = kubernetes.client.models.v1_scale_io_volume_source.V1ScaleIOVolumeSource() | 444 | en | 0.489993 |
import datetime
import json
import os
import re
import time
import uuid
import warnings
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union
from urllib.parse import urljoin
# if simplejson is installed, `requests` defaults to using it instead of json
# this allows the client to gracefully handle either json or simplejson
try:
from simplejson.errors import JSONDecodeError
except ImportError:
from json.decoder import JSONDecodeError
import pendulum
import toml
from slugify import slugify
import prefect
from prefect.utilities.exceptions import (
AuthorizationError,
ClientError,
VersionLockError,
)
from prefect.utilities.graphql import (
EnumValue,
GraphQLResult,
compress,
parse_graphql,
with_args,
)
from prefect.utilities.logging import create_diagnostic_logger
if TYPE_CHECKING:
from prefect.core import Flow
import requests
JSONLike = Union[bool, dict, list, str, int, float, None]
# type definitions for GraphQL results
TaskRunInfoResult = NamedTuple(
"TaskRunInfoResult",
[
("id", str),
("task_id", str),
("task_slug", str),
("version", int),
("state", "prefect.engine.state.State"),
],
)
FlowRunInfoResult = NamedTuple(
"FlowRunInfoResult",
[
("id", str),
("name", str),
("flow_id", str),
("parameters", Dict[str, Any]),
("context", Dict[str, Any]),
("version", int),
("scheduled_start_time", datetime.datetime),
("state", "prefect.engine.state.State"),
("task_runs", List[TaskRunInfoResult]),
],
)
class Client:
"""
Client for communication with Prefect Cloud
If the arguments aren't specified the client initialization first checks the prefect
configuration and if the server is not set there it checks the current context. The
token will only be present in the current context.
Args:
- api_server (str, optional): the URL to send all GraphQL requests
to; if not provided, will be pulled from `cloud.graphql` config var
- api_token (str, optional): a Prefect Cloud API token, taken from
`config.cloud.auth_token` if not provided. If this token is USER-scoped, it may
be used to log in to any tenant that the user is a member of. In that case,
ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself
will be used as authorization.
"""
def __init__(self, api_server: str = None, api_token: str = None):
self._access_token = None
self._refresh_token = None
self._access_token_expires_at = pendulum.now()
self._active_tenant_id = None
self._attached_headers = {} # type: Dict[str, str]
self.logger = create_diagnostic_logger("Diagnostics")
# store api server
self.api_server = api_server or prefect.context.config.cloud.get("graphql")
# store api token
self._api_token = api_token or prefect.context.config.cloud.get(
"auth_token", None
)
if prefect.config.backend == "cloud":
if not self._api_token:
# if no api token was passed, attempt to load state from local storage
settings = self._load_local_settings()
self._api_token = settings.get("api_token")
if self._api_token:
self._active_tenant_id = settings.get("active_tenant_id")
if self._active_tenant_id:
try:
self.login_to_tenant(tenant_id=self._active_tenant_id)
except AuthorizationError:
# if an authorization error is raised, then the token is invalid and should
# be cleared
self.logout_from_tenant()
else:
# TODO: Separate put this functionality and clean up initial tenant access handling
if not self._active_tenant_id:
tenant_info = self.graphql({"query": {"tenant": {"id"}}})
if tenant_info.data.tenant:
self._active_tenant_id = tenant_info.data.tenant[0].id
def create_tenant(self, name: str, slug: str = None) -> str:
"""
Creates a new tenant.
Note this route only works when run against Prefect Server.
Args:
- name (str): the name of the tenant to create
- slug (str, optional): the slug of the tenant to create; defaults to name
Returns:
- str: the ID of the newly created tenant, or the ID of the currently active tenant
Raises:
- ValueError: if run against Prefect Cloud
"""
if prefect.config.backend != "server":
msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/"
raise ValueError(msg)
if slug is None:
slug = slugify(name)
tenant_info = self.graphql(
{
"mutation($input: create_tenant_input!)": {
"create_tenant(input: $input)": {"id"}
}
},
variables=dict(input=dict(name=name, slug=slug)),
)
return tenant_info.data.create_tenant.id
# -------------------------------------------------------------------------
# Utilities
def get(
self,
path: str,
server: str = None,
headers: dict = None,
params: Dict[str, JSONLike] = None,
token: str = None,
retry_on_api_error: bool = True,
) -> dict:
"""
Convenience function for calling the Prefect API with token auth and GET request
Args:
- path (str): the path of the API url. For example, to GET
http://prefect-server/v1/auth/login, path would be 'auth/login'.
- server (str, optional): the server to send the GET request to;
defaults to `self.api_server`
- headers (dict, optional): Headers to pass with the request
- params (dict): GET parameters
- token (str): an auth token. If not supplied, the `client.access_token` is used.
- retry_on_api_error (bool): whether the operation should be retried if the API returns
an API_ERROR code
Returns:
- dict: Dictionary representation of the request made
"""
response = self._request(
method="GET",
path=path,
params=params,
server=server,
headers=headers,
token=token,
retry_on_api_error=retry_on_api_error,
)
if response.text:
return response.json()
else:
return {}
def post(
self,
path: str,
server: str = None,
headers: dict = None,
params: Dict[str, JSONLike] = None,
token: str = None,
retry_on_api_error: bool = True,
) -> dict:
"""
Convenience function for calling the Prefect API with token auth and POST request
Args:
- path (str): the path of the API url. For example, to POST
http://prefect-server/v1/auth/login, path would be 'auth/login'.
- server (str, optional): the server to send the POST request to;
defaults to `self.api_server`
- headers(dict): headers to pass with the request
- params (dict): POST parameters
- token (str): an auth token. If not supplied, the `client.access_token` is used.
- retry_on_api_error (bool): whether the operation should be retried if the API returns
an API_ERROR code
Returns:
- dict: Dictionary representation of the request made
"""
response = self._request(
method="POST",
path=path,
params=params,
server=server,
headers=headers,
token=token,
retry_on_api_error=retry_on_api_error,
)
if response.text:
return response.json()
else:
return {}
def graphql(
self,
query: Any,
raise_on_error: bool = True,
headers: Dict[str, str] = None,
variables: Dict[str, JSONLike] = None,
token: str = None,
retry_on_api_error: bool = True,
) -> GraphQLResult:
"""
Convenience function for running queries against the Prefect GraphQL API
Args:
- query (Any): A representation of a graphql query to be executed. It will be
parsed by prefect.utilities.graphql.parse_graphql().
- raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL
returns any `errors`.
- headers (dict): any additional headers that should be passed as part of the
request
- variables (dict): Variables to be filled into a query with the key being
equivalent to the variables that are accepted by the query
- token (str): an auth token. If not supplied, the `client.access_token` is used.
- retry_on_api_error (bool): whether the operation should be retried if the API returns
an API_ERROR code
Returns:
- dict: Data returned from the GraphQL query
Raises:
- ClientError if there are errors raised by the GraphQL mutation
"""
result = self.post(
path="",
server=self.api_server,
headers=headers,
params=dict(query=parse_graphql(query), variables=json.dumps(variables)),
token=token,
retry_on_api_error=retry_on_api_error,
)
if raise_on_error and "errors" in result:
if "UNAUTHENTICATED" in str(result["errors"]):
raise AuthorizationError(result["errors"])
elif "Malformed Authorization header" in str(result["errors"]):
raise AuthorizationError(result["errors"])
elif (
result["errors"][0].get("extensions", {}).get("code")
== "VERSION_LOCKING_ERROR"
):
raise VersionLockError(result["errors"])
raise ClientError(result["errors"])
else:
return GraphQLResult(result) # type: ignore
def _send_request(
self,
session: "requests.Session",
method: str,
url: str,
params: Dict[str, JSONLike] = None,
headers: dict = None,
) -> "requests.models.Response":
if prefect.context.config.cloud.get("diagnostics") is True:
self.logger.debug(f"Preparing request to {url}")
clean_headers = {
head: re.sub("Bearer .*", "Bearer XXXX", val)
for head, val in headers.items() # type: ignore
}
self.logger.debug(f"Headers: {clean_headers}")
self.logger.debug(f"Request: {params}")
start_time = time.time()
if method == "GET":
response = session.get(url, headers=headers, params=params, timeout=30)
elif method == "POST":
response = session.post(url, headers=headers, json=params, timeout=30)
elif method == "DELETE":
response = session.delete(url, headers=headers, timeout=30)
else:
raise ValueError("Invalid method: {}".format(method))
if prefect.context.config.cloud.get("diagnostics") is True:
end_time = time.time()
self.logger.debug(f"Response: {response.json()}")
self.logger.debug(
f"Request duration: {round(end_time - start_time, 4)} seconds"
)
# Check if request returned a successful status
response.raise_for_status()
return response
def _request(
self,
method: str,
path: str,
params: Dict[str, JSONLike] = None,
server: str = None,
headers: dict = None,
token: str = None,
retry_on_api_error: bool = True,
) -> "requests.models.Response":
"""
Runs any specified request (GET, POST, DELETE) against the server
Args:
- method (str): The type of request to be made (GET, POST, DELETE)
- path (str): Path of the API URL
- params (dict, optional): Parameters used for the request
- server (str, optional): The server to make requests against, base API
server is used if not specified
- headers (dict, optional): Headers to pass with the request
- token (str): an auth token. If not supplied, the `client.access_token` is used.
- retry_on_api_error (bool): whether the operation should be retried if the API returns
an API_ERROR code
Returns:
- requests.models.Response: The response returned from the request
Raises:
- ClientError: if the client token is not in the context (due to not being logged in)
- ValueError: if a method is specified outside of the accepted GET, POST, DELETE
- requests.HTTPError: if a status code is returned that is not `200` or `401`
"""
if server is None:
server = self.api_server
assert isinstance(server, str) # mypy assert
if token is None:
token = self.get_auth_token()
# 'import requests' is expensive time-wise, we should do this just-in-time to keep
# the 'import prefect' time low
import requests
url = urljoin(server, path.lstrip("/")).rstrip("/")
params = params or {}
headers = headers or {}
if token:
headers["Authorization"] = "Bearer {}".format(token)
headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__)
if self._attached_headers:
headers.update(self._attached_headers)
session = requests.Session()
retry_total = 6 if prefect.config.backend == "cloud" else 1
retries = requests.packages.urllib3.util.retry.Retry(
total=retry_total,
backoff_factor=1,
status_forcelist=[500, 502, 503, 504],
method_whitelist=["DELETE", "GET", "POST"],
)
session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries))
response = self._send_request(
session=session, method=method, url=url, params=params, headers=headers
)
# parse the response
try:
json_resp = response.json()
except JSONDecodeError as exc:
if prefect.config.backend == "cloud" and "Authorization" not in headers:
raise ClientError(
"Malformed response received from Cloud - please ensure that you "
"have an API token properly configured."
) from exc
else:
raise ClientError("Malformed response received from API.") from exc
# check if there was an API_ERROR code in the response
if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error:
success, retry_count = False, 0
# retry up to six times
while success is False and retry_count < 6:
response = self._send_request(
session=session,
method=method,
url=url,
params=params,
headers=headers,
)
if "API_ERROR" in str(response.json().get("errors")):
retry_count += 1
time.sleep(0.25 * (2 ** (retry_count - 1)))
else:
success = True
return response
def attach_headers(self, headers: dict) -> None:
"""
Set headers to be attached to this Client
Args:
- headers (dict): A dictionary of headers to attach to this client. These headers
get added on to the existing dictionary of headers.
"""
self._attached_headers.update(headers)
# -------------------------------------------------------------------------
# Auth
# -------------------------------------------------------------------------
@property
def _local_settings_path(self) -> Path:
"""
Returns the local settings directory corresponding to the current API servers
"""
path = "{home}/client/{server}".format(
home=prefect.context.config.home_dir,
server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"),
)
return Path(os.path.expanduser(path)) / "settings.toml"
def _save_local_settings(self, settings: dict) -> None:
"""
Writes settings to local storage
"""
self._local_settings_path.parent.mkdir(exist_ok=True, parents=True)
with self._local_settings_path.open("w+") as f:
toml.dump(settings, f)
def _load_local_settings(self) -> dict:
"""
Loads settings from local storage
"""
if self._local_settings_path.exists():
with self._local_settings_path.open("r") as f:
return toml.load(f) # type: ignore
return {}
def save_api_token(self) -> None:
"""
Saves the API token in local storage.
"""
settings = self._load_local_settings()
settings["api_token"] = self._api_token
self._save_local_settings(settings)
def get_auth_token(self) -> str:
"""
Returns an auth token:
- if no explicit access token is stored, returns the api token
- if there is an access token:
- if there's a refresh token and the access token expires in the next 30 seconds,
then we refresh the access token and store the result
- return the access token
Returns:
- str: the access token
"""
if not self._access_token:
return self._api_token
expiration = self._access_token_expires_at or pendulum.now()
if self._refresh_token and pendulum.now().add(seconds=30) > expiration:
self._refresh_access_token()
return self._access_token
def get_available_tenants(self) -> List[Dict]:
"""
Returns a list of available tenants.
NOTE: this should only be called by users who have provided a USER-scoped API token.
Returns:
- List[Dict]: a list of dictionaries containing the id, slug, and name of
available tenants
"""
result = self.graphql(
{"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}},
# use the API token to see all available tenants
token=self._api_token,
) # type: ignore
return result.data.tenant # type: ignore
def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool:
"""
Log in to a specific tenant
NOTE: this should only be called by users who have provided a USER-scoped API token.
Args:
- tenant_slug (str): the tenant's slug
- tenant_id (str): the tenant's id
Returns:
- bool: True if the login was successful
Raises:
- ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided
- ValueError: if the `tenant_id` is not a valid UUID
- ValueError: if no matching tenants are found
"""
if tenant_slug is None and tenant_id is None:
raise ValueError(
"At least one of `tenant_slug` or `tenant_id` must be provided."
)
elif tenant_id:
try:
uuid.UUID(tenant_id)
except ValueError as exc:
raise ValueError("The `tenant_id` must be a valid UUID.") from exc
tenant = self.graphql(
{
"query($slug: String, $id: uuid)": {
"tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"}
}
},
variables=dict(slug=tenant_slug, id=tenant_id),
# use the API token to query the tenant
token=self._api_token,
) # type: ignore
if not tenant.data.tenant: # type: ignore
raise ValueError("No matching tenants found.")
tenant_id = tenant.data.tenant[0].id # type: ignore
if prefect.config.backend == "cloud":
payload = self.graphql(
{
"mutation($input: switch_tenant_input!)": {
"switch_tenant(input: $input)": {
"access_token",
"expires_at",
"refresh_token",
}
}
},
variables=dict(input=dict(tenant_id=tenant_id)),
# Use the API token to switch tenants
token=self._api_token,
) # type: ignore
self._access_token = payload.data.switch_tenant.access_token # type: ignore
self._access_token_expires_at = pendulum.parse( # type: ignore
payload.data.switch_tenant.expires_at # type: ignore
) # type: ignore
self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore
self._active_tenant_id = tenant_id
# save the tenant setting
settings = self._load_local_settings()
settings["active_tenant_id"] = self._active_tenant_id
self._save_local_settings(settings)
return True
def logout_from_tenant(self) -> None:
self._access_token = None
self._refresh_token = None
self._active_tenant_id = None
# remove the tenant setting
settings = self._load_local_settings()
settings["active_tenant_id"] = None
self._save_local_settings(settings)
def _refresh_access_token(self) -> bool:
"""
Refresh the client's JWT access token.
NOTE: this should only be called by users who have provided a USER-scoped API token.
Returns:
- bool: True if the refresh succeeds
"""
payload = self.graphql(
{
"mutation($input: refresh_token_input!)": {
"refresh_token(input: $input)": {
"access_token",
"expires_at",
"refresh_token",
}
}
},
variables=dict(input=dict(access_token=self._access_token)),
# pass the refresh token as the auth header
token=self._refresh_token,
) # type: ignore
self._access_token = payload.data.refresh_token.access_token # type: ignore
self._access_token_expires_at = pendulum.parse( # type: ignore
payload.data.refresh_token.expires_at # type: ignore
) # type: ignore
self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore
return True
# -------------------------------------------------------------------------
# Actions
# -------------------------------------------------------------------------
def register(
self,
flow: "Flow",
project_name: str = None,
build: bool = True,
set_schedule_active: bool = True,
version_group_id: str = None,
compressed: bool = True,
no_url: bool = False,
) -> str:
"""
Push a new flow to Prefect Cloud
Args:
- flow (Flow): a flow to register
- project_name (str, optional): the project that should contain this flow.
- build (bool, optional): if `True`, the flow's environment is built
prior to serialization; defaults to `True`
- set_schedule_active (bool, optional): if `False`, will set the schedule to
inactive in the database to prevent auto-scheduling runs (if the Flow has a
schedule). Defaults to `True`. This can be changed later.
- version_group_id (str, optional): the UUID version group ID to use for versioning
this Flow in Cloud; if not provided, the version group ID associated with this
Flow's project and name will be used.
- compressed (bool, optional): if `True`, the serialized flow will be; defaults to
`True` compressed
- no_url (bool, optional): if `True`, the stdout from this function will not
contain the URL link to the newly-registered flow in the Cloud UI
Returns:
- str: the ID of the newly-registered flow
Raises:
- ClientError: if the register failed
"""
required_parameters = {p for p in flow.parameters() if p.required}
if flow.schedule is not None and required_parameters:
required_names = {p.name for p in required_parameters}
if not all(
[
required_names <= set(c.parameter_defaults.keys())
for c in flow.schedule.clocks
]
):
raise ClientError(
"Flows with required parameters can not be scheduled automatically."
)
if any(e.key for e in flow.edges) and flow.result is None:
warnings.warn(
"No result handler was specified on your Flow. Cloud features such as "
"input caching and resuming task runs from failure may not work properly.",
stacklevel=2,
)
if compressed:
create_mutation = {
"mutation($input: create_flow_from_compressed_string_input!)": {
"create_flow_from_compressed_string(input: $input)": {"id"}
}
}
else:
create_mutation = {
"mutation($input: create_flow_input!)": {
"create_flow(input: $input)": {"id"}
}
}
project = None
if project_name is None:
raise TypeError(
"'project_name' is a required field when registering a flow."
)
query_project = {
"query": {
with_args("project", {"where": {"name": {"_eq": project_name}}}): {
"id": True
}
}
}
project = self.graphql(query_project).data.project # type: ignore
if not project:
raise ValueError(
"Project {} not found. Run `prefect create project '{}'` to create it.".format(
project_name, project_name
)
)
serialized_flow = flow.serialize(build=build) # type: Any
# Set Docker storage image in environment metadata if provided
if isinstance(flow.storage, prefect.environments.storage.Docker):
flow.environment.metadata["image"] = flow.storage.name
serialized_flow = flow.serialize(build=False)
# If no image ever set, default metadata to all_extras image on current version
if not flow.environment.metadata.get("image"):
version = prefect.__version__.split("+")[0]
flow.environment.metadata[
"image"
] = f"prefecthq/prefect:all_extras-{version}"
serialized_flow = flow.serialize(build=False)
# verify that the serialized flow can be deserialized
try:
prefect.serialization.flow.FlowSchema().load(serialized_flow)
except Exception as exc:
raise ValueError(
"Flow could not be deserialized successfully. Error was: {}".format(
repr(exc)
)
) from exc
if compressed:
serialized_flow = compress(serialized_flow)
res = self.graphql(
create_mutation,
variables=dict(
input=dict(
project_id=(project[0].id if project else None),
serialized_flow=serialized_flow,
set_schedule_active=set_schedule_active,
version_group_id=version_group_id,
)
),
retry_on_api_error=False,
) # type: Any
flow_id = (
res.data.create_flow_from_compressed_string.id
if compressed
else res.data.create_flow.id
)
if not no_url:
# Generate direct link to Cloud flow
flow_url = self.get_cloud_url("flow", flow_id)
prefix = "└── "
print("Flow URL: {}".format(flow_url))
# Extra information to improve visibility
msg = (
f" {prefix}ID: {flow_id}\n"
f" {prefix}Project: {project_name}\n"
f" {prefix}Labels: {list(flow.environment.labels)}"
)
print(msg)
return flow_id
def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str:
"""
Convenience method for creating Prefect Cloud URLs for a given subdirectory.
Args:
- subdirectory (str): the subdirectory to use (e.g., `"flow-run"`)
- id (str): the ID of the page
- as_user (bool, optional): whether this query is being made from a USER scoped token;
defaults to `True`. Only used internally for queries made from RUNNERs
Returns:
- str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory
and ID
Example:
```python
from prefect import Client
client = Client()
client.get_cloud_url("flow-run", "424242-ca-94611-111-55")
# returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55"
```
"""
# Generate direct link to UI
if prefect.config.backend == "cloud":
tenant_slug = self.get_default_tenant_slug(as_user=as_user)
else:
tenant_slug = ""
base_url = (
re.sub("api-", "", prefect.config.cloud.api)
if re.search("api-", prefect.config.cloud.api)
else re.sub("api", "cloud", prefect.config.cloud.api)
)
full_url = prefect.config.cloud.api
if tenant_slug:
full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id])
elif prefect.config.backend == "server":
full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id])
return full_url
def get_default_tenant_slug(self, as_user: bool = True) -> str:
"""
Get the default tenant slug for the currently authenticated user
Args:
- as_user (bool, optional): whether this query is being made from a USER scoped token;
defaults to `True`. Only used internally for queries made from RUNNERs
Returns:
- str: the slug of the current default tenant for this user
"""
if as_user:
query = {
"query": {"user": {"default_membership": {"tenant": "slug"}}}
} # type: dict
else:
query = {"query": {"tenant": {"slug"}}}
res = self.graphql(query)
if as_user:
user = res.get("data").user[0]
slug = user.default_membership.tenant.slug
else:
slug = res.get("data").tenant[0].slug
return slug
def create_project(self, project_name: str, project_description: str = None) -> str:
"""
Create a new Project
Args:
- project_name (str): the project that should contain this flow
- project_description (str, optional): the project description
Returns:
- str: the ID of the newly-created project
Raises:
- ClientError: if the project creation failed
"""
project_mutation = {
"mutation($input: create_project_input!)": {
"create_project(input: $input)": {"id"}
}
}
res = self.graphql(
project_mutation,
variables=dict(
input=dict(
name=project_name,
description=project_description,
tenant_id=self._active_tenant_id,
)
),
) # type: Any
return res.data.create_project.id
def create_flow_run(
self,
flow_id: str = None,
context: dict = None,
parameters: dict = None,
scheduled_start_time: datetime.datetime = None,
idempotency_key: str = None,
run_name: str = None,
version_group_id: str = None,
) -> str:
"""
Create a new flow run for the given flow id. If `start_time` is not provided, the flow
run will be scheduled to start immediately. If both `flow_id` and `version_group_id`
are provided, only the `flow_id` will be used.
Args:
- flow_id (str, optional): the id of the Flow you wish to schedule
- context (dict, optional): the run context
- parameters (dict, optional): a dictionary of parameter values to pass to the flow run
- scheduled_start_time (datetime, optional): the time to schedule the execution
for; if not provided, defaults to now
- idempotency_key (str, optional): an idempotency key; if provided, this run will
be cached for 24 hours. Any subsequent attempts to create a run with the same
idempotency key will return the ID of the originally created run (no new run
will be created after the first). An error will be raised if parameters or
context are provided and don't match the original. Each subsequent request
will reset the TTL for 24 hours.
- run_name (str, optional): The name assigned to this flow run
- version_group_id (str, optional): if provided, the unique unarchived flow within
this version group will be scheduled to run. This input can be used as a
stable API for running flows which are regularly updated.
Returns:
- str: the ID of the newly-created flow run
Raises:
- ClientError: if the GraphQL query is bad for any reason
"""
create_mutation = {
"mutation($input: create_flow_run_input!)": {
"create_flow_run(input: $input)": {"id": True}
}
}
if not flow_id and not version_group_id:
raise ValueError("One of flow_id or version_group_id must be provided")
if flow_id:
inputs = dict(flow_id=flow_id)
else:
inputs = dict(version_group_id=version_group_id) # type: ignore
if parameters is not None:
inputs.update(parameters=parameters) # type: ignore
if context is not None:
inputs.update(context=context) # type: ignore
if idempotency_key is not None:
inputs.update(idempotency_key=idempotency_key) # type: ignore
if scheduled_start_time is not None:
inputs.update(
scheduled_start_time=scheduled_start_time.isoformat()
) # type: ignore
if run_name is not None:
inputs.update(flow_run_name=run_name) # type: ignore
res = self.graphql(create_mutation, variables=dict(input=inputs))
return res.data.create_flow_run.id # type: ignore
def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult:
"""
Retrieves version and current state information for the given flow run.
Args:
- flow_run_id (str): the id of the flow run to get information for
Returns:
- GraphQLResult: an object representing information about the flow run
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
"""
query = {
"query": {
with_args("flow_run_by_pk", {"id": flow_run_id}): {
"id": True,
"name": True,
"flow_id": True,
"parameters": True,
"context": True,
"version": True,
"scheduled_start_time": True,
"serialized_state": True,
# load all task runs except dynamic task runs
with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): {
"id": True,
"task": {"id": True, "slug": True},
"version": True,
"serialized_state": True,
},
}
}
}
result = self.graphql(query).data.flow_run_by_pk # type: ignore
if result is None:
raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id))
# convert scheduled_start_time from string to datetime
result.scheduled_start_time = pendulum.parse(result.scheduled_start_time)
# create "state" attribute from serialized_state
result.state = prefect.engine.state.State.deserialize(
result.pop("serialized_state")
)
# reformat task_runs
task_runs = []
for tr in result.task_runs:
tr.state = prefect.engine.state.State.deserialize(
tr.pop("serialized_state")
)
task_info = tr.pop("task")
tr.task_id = task_info["id"]
tr.task_slug = task_info["slug"]
task_runs.append(TaskRunInfoResult(**tr))
result.task_runs = task_runs
result.context = (
result.context.to_dict() if result.context is not None else None
)
result.parameters = (
result.parameters.to_dict() if result.parameters is not None else None
)
return FlowRunInfoResult(**result)
def update_flow_run_heartbeat(self, flow_run_id: str) -> None:
"""
Convenience method for heartbeating a flow run.
Does NOT raise an error if the update fails.
Args:
- flow_run_id (str): the flow run ID to heartbeat
"""
mutation = {
"mutation": {
with_args(
"update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}}
): {"success"}
}
}
self.graphql(mutation, raise_on_error=True)
def update_task_run_heartbeat(self, task_run_id: str) -> None:
"""
Convenience method for heartbeating a task run.
Does NOT raise an error if the update fails.
Args:
- task_run_id (str): the task run ID to heartbeat
"""
mutation = {
"mutation": {
with_args(
"update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}}
): {"success"}
}
}
self.graphql(mutation, raise_on_error=True)
def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State":
"""
Retrieves the current state for a flow run.
Args:
- flow_run_id (str): the id for this flow run
Returns:
- State: a Prefect State object
"""
query = {
"query": {
with_args("flow_run_by_pk", {"id": flow_run_id}): {
"serialized_state": True,
}
}
}
flow_run = self.graphql(query).data.flow_run_by_pk
return prefect.engine.state.State.deserialize(flow_run.serialized_state)
def set_flow_run_state(
self,
flow_run_id: str,
state: "prefect.engine.state.State",
version: int = None,
) -> "prefect.engine.state.State":
"""
Sets new state for a flow run in the database.
Args:
- flow_run_id (str): the id of the flow run to set state for
- state (State): the new state for this flow run
- version (int, optional): the current version of the flow run state. This is optional
but it can be supplied to enforce version-locking.
Returns:
- State: the state the current flow run should be considered in
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
"""
mutation = {
"mutation($input: set_flow_run_states_input!)": {
"set_flow_run_states(input: $input)": {
"states": {"id", "status", "message"}
}
}
}
serialized_state = state.serialize()
result = self.graphql(
mutation,
variables=dict(
input=dict(
states=[
dict(
state=serialized_state,
flow_run_id=flow_run_id,
version=version,
)
]
)
),
) # type: Any
state_payload = result.data.set_flow_run_states.states[0]
if state_payload.status == "QUEUED":
# If appropriate, the state attribute of the Queued state can be
# set by the caller of this method
return prefect.engine.state.Queued(
message=state_payload.get("message"),
start_time=pendulum.now("UTC").add(
seconds=prefect.context.config.cloud.queue_interval
),
)
return state
def get_latest_cached_states(
self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime
) -> List["prefect.engine.state.State"]:
"""
Pulls all Cached states for the given task that were created after the provided date.
Args:
- task_id (str): the task id for this task run
- cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the
task id alone will be used
- created_after (datetime.datetime): the earliest date the state should have been
created at
Returns:
- List[State]: a list of Cached states created after the given date
"""
args = {
"where": {
"state": {"_eq": "Cached"},
"state_timestamp": {"_gte": created_after.isoformat()},
},
"order_by": {"state_timestamp": EnumValue("desc")},
"limit": 100,
} # type: Dict[str, Any]
# if a cache key was provided, match it against all tasks
if cache_key is not None:
args["where"].update({"cache_key": {"_eq": cache_key}})
# otherwise match against only this task, across all cache keys
else:
args["where"].update({"task_id": {"_eq": task_id}})
query = {"query": {with_args("task_run", args): "serialized_state"}}
result = self.graphql(query) # type: Any
deserializer = prefect.engine.state.State.deserialize
valid_states = [
deserializer(res.serialized_state) for res in result.data.task_run
]
return valid_states
def get_task_run_info(
self, flow_run_id: str, task_id: str, map_index: Optional[int] = None
) -> TaskRunInfoResult:
"""
Retrieves version and current state information for the given task run.
Args:
- flow_run_id (str): the id of the flow run that this task run lives in
- task_id (str): the task id for this task run
- map_index (int, optional): the mapping index for this task run; if
`None`, it is assumed this task is _not_ mapped
Returns:
- NamedTuple: a tuple containing `id, task_id, version, state`
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
"""
mutation = {
"mutation": {
with_args(
"get_or_create_task_run",
{
"input": {
"flow_run_id": flow_run_id,
"task_id": task_id,
"map_index": -1 if map_index is None else map_index,
}
},
): {
"id": True,
}
}
}
result = self.graphql(mutation) # type: Any
if result is None:
raise ClientError("Failed to create task run.")
task_run_id = result.data.get_or_create_task_run.id
query = {
"query": {
with_args("task_run_by_pk", {"id": task_run_id}): {
"version": True,
"serialized_state": True,
"task": {"slug": True},
}
}
}
task_run = self.graphql(query).data.task_run_by_pk # type: ignore
if task_run is None:
raise ClientError('Task run ID not found: "{}"'.format(task_run_id))
state = prefect.engine.state.State.deserialize(task_run.serialized_state)
return TaskRunInfoResult(
id=task_run_id,
task_id=task_id,
task_slug=task_run.task.slug,
version=task_run.version,
state=state,
)
def set_task_run_name(self, task_run_id: str, name: str) -> bool:
"""
Set the name of a task run
Args:
- task_run_id (str): the id of a task run
- name (str): a name for this task run
Returns:
- bool: whether or not the task run name was updated
"""
mutation = {
"mutation($input: set_task_run_name_input!)": {
"set_task_run_name(input: $input)": {
"success": True,
}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name))
)
return result.data.set_task_run_name.success
def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State":
"""
Retrieves the current state for a task run.
Args:
- task_run_id (str): the id for this task run
Returns:
- State: a Prefect State object
"""
query = {
"query": {
with_args("task_run_by_pk", {"id": task_run_id}): {
"serialized_state": True,
}
}
}
task_run = self.graphql(query).data.task_run_by_pk
return prefect.engine.state.State.deserialize(task_run.serialized_state)
def set_task_run_state(
self,
task_run_id: str,
state: "prefect.engine.state.State",
version: int = None,
cache_for: datetime.timedelta = None,
) -> "prefect.engine.state.State":
"""
Sets new state for a task run.
Args:
- task_run_id (str): the id of the task run to set state for
- state (State): the new state for this task run
- version (int, optional): the current version of the task run state. This is optional
but it can be supplied to enforce version-locking.
- cache_for (timedelta, optional): how long to store the result of this task for,
using the serializer set in config; if not provided, no caching occurs
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
Returns:
- State: the state the current task run should be considered in
"""
mutation = {
"mutation($input: set_task_run_states_input!)": {
"set_task_run_states(input: $input)": {
"states": {"id", "status", "message"}
}
}
}
serialized_state = state.serialize()
result = self.graphql(
mutation,
variables=dict(
input=dict(
states=[
dict(
state=serialized_state,
task_run_id=task_run_id,
version=version,
)
]
)
),
) # type: Any
state_payload = result.data.set_task_run_states.states[0]
if state_payload.status == "QUEUED":
# If appropriate, the state attribute of the Queued state can be
# set by the caller of this method
return prefect.engine.state.Queued(
message=state_payload.get("message"),
start_time=pendulum.now("UTC").add(
seconds=prefect.context.config.cloud.queue_interval
),
)
return state
def set_secret(self, name: str, value: Any) -> None:
"""
Set a secret with the given name and value.
Args:
- name (str): the name of the secret; used for retrieving the secret
during task runs
- value (Any): the value of the secret
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
- ValueError: if the secret-setting was unsuccessful
"""
mutation = {
"mutation($input: set_secret_input!)": {
"set_secret(input: $input)": {"success"}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(name=name, value=value))
) # type: Any
if not result.data.set_secret.success:
raise ValueError("Setting secret failed.")
def get_task_tag_limit(self, tag: str) -> Optional[int]:
"""
Retrieve the current task tag concurrency limit for a given tag.
Args:
- tag (str): the tag to update
Raises:
- ClientError: if the GraphQL query fails
"""
query = {
"query": {
with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): {
"limit": True
}
}
}
result = self.graphql(query) # type: Any
if result.data.task_tag_limit:
return result.data.task_tag_limit[0].limit
else:
return None
def update_task_tag_limit(self, tag: str, limit: int) -> None:
"""
Update the task tag concurrency limit for a given tag; requires tenant admin permissions.
Args:
- tag (str): the tag to update
- limit (int): the concurrency limit to enforce on the tag; should be a value >= 0
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
- ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided
"""
if limit < 0:
raise ValueError("Concurrency limits must be >= 0")
mutation = {
"mutation($input: update_task_tag_limit_input!)": {
"update_task_tag_limit(input: $input)": {"id"}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(tag=tag, limit=limit))
) # type: Any
if not result.data.update_task_tag_limit.id:
raise ValueError("Updating the task tag concurrency limit failed.")
def delete_task_tag_limit(self, limit_id: str) -> None:
"""
Deletes a given task tag concurrency limit; requires tenant admin permissions.
Args:
- limit_id (str): the ID of the tag to delete
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
- ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided
"""
mutation = {
"mutation($input: delete_task_tag_limit_input!)": {
"delete_task_tag_limit(input: $input)": {"success"}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(limit_id=limit_id))
) # type: Any
if not result.data.delete_task_tag_limit.success:
raise ValueError("Deleting the task tag concurrency limit failed.")
def write_run_logs(self, logs: List[Dict]) -> None:
"""
Uploads a collection of logs to Cloud.
Args:
- logs (List[Dict]): a list of log entries to add
Raises:
- ValueError: if uploading the logs fail
"""
mutation = {
"mutation($input: write_run_logs_input!)": {
"write_run_logs(input: $input)": {"success"}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(logs=logs))
) # type: Any
if not result.data.write_run_logs.success:
raise ValueError("Writing logs failed.")
def register_agent(
self,
agent_type: str,
name: str = None,
labels: List[str] = None,
agent_config_id: str = None,
) -> str:
"""
Register an agent with a backend API
Args:
- agent_type (str): The type of agent being registered
- name: (str, optional): The name of the agent being registered
- labels (List[str], optional): A list of any present labels on the agent
being registered
- agent_config_id (str, optional): The ID of an agent configuration to register with
Returns:
- The agent ID as a string
"""
mutation = {
"mutation($input: register_agent_input!)": {
"register_agent(input: $input)": {"id"}
}
}
result = self.graphql(
mutation,
variables=dict(
input=dict(
type=agent_type,
name=name,
labels=labels or [],
tenant_id=self._active_tenant_id,
agent_config_id=agent_config_id,
)
),
)
if not result.data.register_agent.id:
raise ValueError("Error registering agent")
return result.data.register_agent.id
def get_agent_config(self, agent_config_id: str) -> dict:
"""
Get agent config settings
Args:
- agent_config_id (str): The ID of an agent configuration to retrieve
Returns:
- dict: the agent configuration's `settings`
"""
query = {
"query": {
with_args(
"agent_config", {"where": {"id": {"_eq": agent_config_id}}}
): {"settings": True}
}
}
result = self.graphql(query) # type: Any
return result.data.agent_config[0].settings
| src/prefect/client/client.py | 55,892 | Client for communication with Prefect Cloud
If the arguments aren't specified the client initialization first checks the prefect
configuration and if the server is not set there it checks the current context. The
token will only be present in the current context.
Args:
- api_server (str, optional): the URL to send all GraphQL requests
to; if not provided, will be pulled from `cloud.graphql` config var
- api_token (str, optional): a Prefect Cloud API token, taken from
`config.cloud.auth_token` if not provided. If this token is USER-scoped, it may
be used to log in to any tenant that the user is a member of. In that case,
ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself
will be used as authorization.
Loads settings from local storage
Returns the local settings directory corresponding to the current API servers
Refresh the client's JWT access token.
NOTE: this should only be called by users who have provided a USER-scoped API token.
Returns:
- bool: True if the refresh succeeds
Runs any specified request (GET, POST, DELETE) against the server
Args:
- method (str): The type of request to be made (GET, POST, DELETE)
- path (str): Path of the API URL
- params (dict, optional): Parameters used for the request
- server (str, optional): The server to make requests against, base API
server is used if not specified
- headers (dict, optional): Headers to pass with the request
- token (str): an auth token. If not supplied, the `client.access_token` is used.
- retry_on_api_error (bool): whether the operation should be retried if the API returns
an API_ERROR code
Returns:
- requests.models.Response: The response returned from the request
Raises:
- ClientError: if the client token is not in the context (due to not being logged in)
- ValueError: if a method is specified outside of the accepted GET, POST, DELETE
- requests.HTTPError: if a status code is returned that is not `200` or `401`
Writes settings to local storage
Set headers to be attached to this Client
Args:
- headers (dict): A dictionary of headers to attach to this client. These headers
get added on to the existing dictionary of headers.
Create a new flow run for the given flow id. If `start_time` is not provided, the flow
run will be scheduled to start immediately. If both `flow_id` and `version_group_id`
are provided, only the `flow_id` will be used.
Args:
- flow_id (str, optional): the id of the Flow you wish to schedule
- context (dict, optional): the run context
- parameters (dict, optional): a dictionary of parameter values to pass to the flow run
- scheduled_start_time (datetime, optional): the time to schedule the execution
for; if not provided, defaults to now
- idempotency_key (str, optional): an idempotency key; if provided, this run will
be cached for 24 hours. Any subsequent attempts to create a run with the same
idempotency key will return the ID of the originally created run (no new run
will be created after the first). An error will be raised if parameters or
context are provided and don't match the original. Each subsequent request
will reset the TTL for 24 hours.
- run_name (str, optional): The name assigned to this flow run
- version_group_id (str, optional): if provided, the unique unarchived flow within
this version group will be scheduled to run. This input can be used as a
stable API for running flows which are regularly updated.
Returns:
- str: the ID of the newly-created flow run
Raises:
- ClientError: if the GraphQL query is bad for any reason
Create a new Project
Args:
- project_name (str): the project that should contain this flow
- project_description (str, optional): the project description
Returns:
- str: the ID of the newly-created project
Raises:
- ClientError: if the project creation failed
Creates a new tenant.
Note this route only works when run against Prefect Server.
Args:
- name (str): the name of the tenant to create
- slug (str, optional): the slug of the tenant to create; defaults to name
Returns:
- str: the ID of the newly created tenant, or the ID of the currently active tenant
Raises:
- ValueError: if run against Prefect Cloud
Deletes a given task tag concurrency limit; requires tenant admin permissions.
Args:
- limit_id (str): the ID of the tag to delete
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
- ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided
Convenience function for calling the Prefect API with token auth and GET request
Args:
- path (str): the path of the API url. For example, to GET
http://prefect-server/v1/auth/login, path would be 'auth/login'.
- server (str, optional): the server to send the GET request to;
defaults to `self.api_server`
- headers (dict, optional): Headers to pass with the request
- params (dict): GET parameters
- token (str): an auth token. If not supplied, the `client.access_token` is used.
- retry_on_api_error (bool): whether the operation should be retried if the API returns
an API_ERROR code
Returns:
- dict: Dictionary representation of the request made
Get agent config settings
Args:
- agent_config_id (str): The ID of an agent configuration to retrieve
Returns:
- dict: the agent configuration's `settings`
Returns an auth token:
- if no explicit access token is stored, returns the api token
- if there is an access token:
- if there's a refresh token and the access token expires in the next 30 seconds,
then we refresh the access token and store the result
- return the access token
Returns:
- str: the access token
Returns a list of available tenants.
NOTE: this should only be called by users who have provided a USER-scoped API token.
Returns:
- List[Dict]: a list of dictionaries containing the id, slug, and name of
available tenants
Convenience method for creating Prefect Cloud URLs for a given subdirectory.
Args:
- subdirectory (str): the subdirectory to use (e.g., `"flow-run"`)
- id (str): the ID of the page
- as_user (bool, optional): whether this query is being made from a USER scoped token;
defaults to `True`. Only used internally for queries made from RUNNERs
Returns:
- str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory
and ID
Example:
```python
from prefect import Client
client = Client()
client.get_cloud_url("flow-run", "424242-ca-94611-111-55")
# returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55"
```
Get the default tenant slug for the currently authenticated user
Args:
- as_user (bool, optional): whether this query is being made from a USER scoped token;
defaults to `True`. Only used internally for queries made from RUNNERs
Returns:
- str: the slug of the current default tenant for this user
Retrieves version and current state information for the given flow run.
Args:
- flow_run_id (str): the id of the flow run to get information for
Returns:
- GraphQLResult: an object representing information about the flow run
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
Retrieves the current state for a flow run.
Args:
- flow_run_id (str): the id for this flow run
Returns:
- State: a Prefect State object
Pulls all Cached states for the given task that were created after the provided date.
Args:
- task_id (str): the task id for this task run
- cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the
task id alone will be used
- created_after (datetime.datetime): the earliest date the state should have been
created at
Returns:
- List[State]: a list of Cached states created after the given date
Retrieves version and current state information for the given task run.
Args:
- flow_run_id (str): the id of the flow run that this task run lives in
- task_id (str): the task id for this task run
- map_index (int, optional): the mapping index for this task run; if
`None`, it is assumed this task is _not_ mapped
Returns:
- NamedTuple: a tuple containing `id, task_id, version, state`
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
Retrieves the current state for a task run.
Args:
- task_run_id (str): the id for this task run
Returns:
- State: a Prefect State object
Retrieve the current task tag concurrency limit for a given tag.
Args:
- tag (str): the tag to update
Raises:
- ClientError: if the GraphQL query fails
Convenience function for running queries against the Prefect GraphQL API
Args:
- query (Any): A representation of a graphql query to be executed. It will be
parsed by prefect.utilities.graphql.parse_graphql().
- raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL
returns any `errors`.
- headers (dict): any additional headers that should be passed as part of the
request
- variables (dict): Variables to be filled into a query with the key being
equivalent to the variables that are accepted by the query
- token (str): an auth token. If not supplied, the `client.access_token` is used.
- retry_on_api_error (bool): whether the operation should be retried if the API returns
an API_ERROR code
Returns:
- dict: Data returned from the GraphQL query
Raises:
- ClientError if there are errors raised by the GraphQL mutation
Log in to a specific tenant
NOTE: this should only be called by users who have provided a USER-scoped API token.
Args:
- tenant_slug (str): the tenant's slug
- tenant_id (str): the tenant's id
Returns:
- bool: True if the login was successful
Raises:
- ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided
- ValueError: if the `tenant_id` is not a valid UUID
- ValueError: if no matching tenants are found
Convenience function for calling the Prefect API with token auth and POST request
Args:
- path (str): the path of the API url. For example, to POST
http://prefect-server/v1/auth/login, path would be 'auth/login'.
- server (str, optional): the server to send the POST request to;
defaults to `self.api_server`
- headers(dict): headers to pass with the request
- params (dict): POST parameters
- token (str): an auth token. If not supplied, the `client.access_token` is used.
- retry_on_api_error (bool): whether the operation should be retried if the API returns
an API_ERROR code
Returns:
- dict: Dictionary representation of the request made
Push a new flow to Prefect Cloud
Args:
- flow (Flow): a flow to register
- project_name (str, optional): the project that should contain this flow.
- build (bool, optional): if `True`, the flow's environment is built
prior to serialization; defaults to `True`
- set_schedule_active (bool, optional): if `False`, will set the schedule to
inactive in the database to prevent auto-scheduling runs (if the Flow has a
schedule). Defaults to `True`. This can be changed later.
- version_group_id (str, optional): the UUID version group ID to use for versioning
this Flow in Cloud; if not provided, the version group ID associated with this
Flow's project and name will be used.
- compressed (bool, optional): if `True`, the serialized flow will be; defaults to
`True` compressed
- no_url (bool, optional): if `True`, the stdout from this function will not
contain the URL link to the newly-registered flow in the Cloud UI
Returns:
- str: the ID of the newly-registered flow
Raises:
- ClientError: if the register failed
Register an agent with a backend API
Args:
- agent_type (str): The type of agent being registered
- name: (str, optional): The name of the agent being registered
- labels (List[str], optional): A list of any present labels on the agent
being registered
- agent_config_id (str, optional): The ID of an agent configuration to register with
Returns:
- The agent ID as a string
Saves the API token in local storage.
Sets new state for a flow run in the database.
Args:
- flow_run_id (str): the id of the flow run to set state for
- state (State): the new state for this flow run
- version (int, optional): the current version of the flow run state. This is optional
but it can be supplied to enforce version-locking.
Returns:
- State: the state the current flow run should be considered in
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
Set a secret with the given name and value.
Args:
- name (str): the name of the secret; used for retrieving the secret
during task runs
- value (Any): the value of the secret
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
- ValueError: if the secret-setting was unsuccessful
Set the name of a task run
Args:
- task_run_id (str): the id of a task run
- name (str): a name for this task run
Returns:
- bool: whether or not the task run name was updated
Sets new state for a task run.
Args:
- task_run_id (str): the id of the task run to set state for
- state (State): the new state for this task run
- version (int, optional): the current version of the task run state. This is optional
but it can be supplied to enforce version-locking.
- cache_for (timedelta, optional): how long to store the result of this task for,
using the serializer set in config; if not provided, no caching occurs
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
Returns:
- State: the state the current task run should be considered in
Convenience method for heartbeating a flow run.
Does NOT raise an error if the update fails.
Args:
- flow_run_id (str): the flow run ID to heartbeat
Convenience method for heartbeating a task run.
Does NOT raise an error if the update fails.
Args:
- task_run_id (str): the task run ID to heartbeat
Update the task tag concurrency limit for a given tag; requires tenant admin permissions.
Args:
- tag (str): the tag to update
- limit (int): the concurrency limit to enforce on the tag; should be a value >= 0
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
- ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided
Uploads a collection of logs to Cloud.
Args:
- logs (List[Dict]): a list of log entries to add
Raises:
- ValueError: if uploading the logs fail
if simplejson is installed, `requests` defaults to using it instead of json this allows the client to gracefully handle either json or simplejson type definitions for GraphQL results type: Dict[str, str] store api server store api token if no api token was passed, attempt to load state from local storage if an authorization error is raised, then the token is invalid and should be cleared TODO: Separate put this functionality and clean up initial tenant access handling ------------------------------------------------------------------------- Utilities type: ignore type: ignore Check if request returned a successful status mypy assert 'import requests' is expensive time-wise, we should do this just-in-time to keep the 'import prefect' time low parse the response check if there was an API_ERROR code in the response retry up to six times ------------------------------------------------------------------------- Auth ------------------------------------------------------------------------- type: ignore use the API token to see all available tenants type: ignore type: ignore use the API token to query the tenant type: ignore type: ignore type: ignore Use the API token to switch tenants type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore save the tenant setting remove the tenant setting pass the refresh token as the auth header type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore ------------------------------------------------------------------------- Actions ------------------------------------------------------------------------- type: ignore type: Any Set Docker storage image in environment metadata if provided If no image ever set, default metadata to all_extras image on current version verify that the serialized flow can be deserialized type: Any Generate direct link to Cloud flow Extra information to improve visibility Generate direct link to UI type: dict type: Any type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore load all task runs except dynamic task runs type: ignore convert scheduled_start_time from string to datetime create "state" attribute from serialized_state reformat task_runs type: Any If appropriate, the state attribute of the Queued state can be set by the caller of this method type: Dict[str, Any] if a cache key was provided, match it against all tasks otherwise match against only this task, across all cache keys type: Any type: Any type: ignore type: Any If appropriate, the state attribute of the Queued state can be set by the caller of this method type: Any type: Any type: Any type: Any type: Any type: Any | 17,601 | en | 0.717949 |
# Enter your code here. Read input from STDIN. Print output to STOUT
import re
for _ in range(int(input())):
try:
re.compile(input())
print (True)
except re.error:
print (False)
| Hackerrank_python/9.erros and exceptions/66.Incorrect Regex.py | 222 | Enter your code here. Read input from STDIN. Print output to STOUT | 66 | en | 0.812699 |
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from .agent import agent
from .ci import ci
from .clean import clean
from .config import config
from .create import create
from .dep import dep
from .docs import docs
from .env import env
from .meta import meta
from .release import release
from .run import run
from .test import test
from .validate import validate
ALL_COMMANDS = (agent, ci, clean, config, create, dep, docs, env, meta, release, run, test, validate)
| datadog_checks_dev/datadog_checks/dev/tooling/commands/__init__.py | 533 | (C) Datadog, Inc. 2018-present All rights reserved Licensed under a 3-clause BSD style license (see LICENSE) | 108 | en | 0.821201 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mediapipe/framework/formats/annotation/rasterization.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='mediapipe/framework/formats/annotation/rasterization.proto',
package='mediapipe',
syntax='proto2',
serialized_options=_b('\n-com.google.mediapipe.formats.annotation.protoB\022RasterizationProto'),
serialized_pb=_b('\n:mediapipe/framework/formats/annotation/rasterization.proto\x12\tmediapipe\"|\n\rRasterization\x12\x33\n\x08interval\x18\x01 \x03(\x0b\x32!.mediapipe.Rasterization.Interval\x1a\x36\n\x08Interval\x12\t\n\x01y\x18\x01 \x02(\x05\x12\x0e\n\x06left_x\x18\x02 \x02(\x05\x12\x0f\n\x07right_x\x18\x03 \x02(\x05\x42\x43\n-com.google.mediapipe.formats.annotation.protoB\x12RasterizationProto')
)
_RASTERIZATION_INTERVAL = _descriptor.Descriptor(
name='Interval',
full_name='mediapipe.Rasterization.Interval',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='y', full_name='mediapipe.Rasterization.Interval.y', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='left_x', full_name='mediapipe.Rasterization.Interval.left_x', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='right_x', full_name='mediapipe.Rasterization.Interval.right_x', index=2,
number=3, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=143,
serialized_end=197,
)
_RASTERIZATION = _descriptor.Descriptor(
name='Rasterization',
full_name='mediapipe.Rasterization',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='interval', full_name='mediapipe.Rasterization.interval', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_RASTERIZATION_INTERVAL, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=73,
serialized_end=197,
)
_RASTERIZATION_INTERVAL.containing_type = _RASTERIZATION
_RASTERIZATION.fields_by_name['interval'].message_type = _RASTERIZATION_INTERVAL
DESCRIPTOR.message_types_by_name['Rasterization'] = _RASTERIZATION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Rasterization = _reflection.GeneratedProtocolMessageType('Rasterization', (_message.Message,), dict(
Interval = _reflection.GeneratedProtocolMessageType('Interval', (_message.Message,), dict(
DESCRIPTOR = _RASTERIZATION_INTERVAL,
__module__ = 'mediapipe.framework.formats.annotation.rasterization_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.Rasterization.Interval)
))
,
DESCRIPTOR = _RASTERIZATION,
__module__ = 'mediapipe.framework.formats.annotation.rasterization_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.Rasterization)
))
_sym_db.RegisterMessage(Rasterization)
_sym_db.RegisterMessage(Rasterization.Interval)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| GestureVolume/venv/lib/python3.8/site-packages/mediapipe/framework/formats/annotation/rasterization_pb2.py | 4,479 | -*- coding: utf-8 -*- Generated by the protocol buffer compiler. DO NOT EDIT! source: mediapipe/framework/formats/annotation/rasterization.proto @@protoc_insertion_point(imports) @@protoc_insertion_point(class_scope:mediapipe.Rasterization.Interval) @@protoc_insertion_point(class_scope:mediapipe.Rasterization) @@protoc_insertion_point(module_scope) | 351 | en | 0.468195 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Luis Rodriguez <luis.rodriguez@opendeusto.es>
#
import threading
import time
import json
class Watertank(object):
"""
Watertank Model
Output example:
{"water": 0.0, "inputs": [0.5, 0.5], "temperatures": [716, 20], "outputs": [1.0]}
Changes that have been applied lately to this model (Dec 2015)
- There is no longer a separate temperatures mode. Now there is a single model with temperatures.
- There are no longer temperature working ranges, temperature warnings, or temperature overloads. The
model will not prevent the pumps from working. Instead, the temperature will increase indefinitely. The experiment
client can thus deal with temperatures however it wishes (and it can in fact ignore them), with no effect.
- As a result of the previous change, temperature is no longer reported as in the [0,1] range according to the range.
Now it is reported in raw form.
"""
def __init__(self, tank_capacity, inputs, outputs, water_level):
self.initialize(tank_capacity, inputs, outputs, water_level)
def initialize(self, tank_capacity, inputs, outputs, water_level):
"""
Initializes the simulation with the specified data.
@param tank_capacity Capacity of the water tank, in liters.
@param Array containing the flow volume of the inputs (such as water pumps), in liters per second.
The flow can be modified dynamically, but no inputs can be added.
@param Array containing the outputs (such as a water hose or evaporation), in liters per second.
The flow can be modified dynamically, but no inputs can be added.
@param water_level The starting water level. Value from 0 to 1.
"""
self.tank_capacity = tank_capacity
self.inputs = inputs
self.outputs = outputs
self.current_volume = water_level * tank_capacity
self.firstPumpTemperature = 20
self.secondPumpTemperature = 20
self.firstPumpWorkRange = [20, 200]
self.secondPumpWorkRange = [20, 200]
self.pumpTemperatureVariationPerSeconds = 6 # Enough for 30 seconds?
self.simlock = threading.RLock()
self._thread = None
self._autoupdating = False
self._autoupdating_interval = 1000
def update(self, delta):
"""
Updates the simulation. Can be done automatically if the autoupdater is used.
@param delta Delta in seconds.
@see autoupdater_start
"""
total_output = 0
for out in self.outputs:
total_output += out * delta
# Calculates how much the pumps are putting in.
total_input = 0
# Handle inputs
pump1, pump2 = self.inputs
# If the first pump is turned on we increase the temperature and the total water input
if pump1 > 0:
# We multiply by 1.1 so that its temperature raises faster.
self.firstPumpTemperature += delta * self.pumpTemperatureVariationPerSeconds * 1.1
total_input += pump1 * delta
else:
self.firstPumpTemperature -= delta * self.pumpTemperatureVariationPerSeconds
self.firstPumpTemperature = max(20, self.firstPumpTemperature)
total_input -= pump1 * delta
# If the second pump is turned on we increase the temperature and the total water input
if pump2 > 0:
self.secondPumpTemperature += delta * self.pumpTemperatureVariationPerSeconds
total_input += pump2 * delta
else:
self.secondPumpTemperature -= delta * self.pumpTemperatureVariationPerSeconds
self.secondPumpTemperature = max(20, self.secondPumpTemperature)
total_input -= pump2 * delta
increment = total_input - total_output
with self.simlock:
self.current_volume += increment
# Ensure the volume stays realistic
if self.current_volume >= self.tank_capacity:
self.current_volume = self.tank_capacity
elif self.current_volume < 0:
self.current_volume = 0.0
def t_updater(self):
"""
This internal method is used by the autoupdating thread to update
the simulation every few seconds (specified as the autoupdater interval).
"""
while self._autoupdating:
time.sleep(self._autoupdating_interval)
self.update(self._autoupdating_interval)
def autoupdater_start(self, interval):
"""
Starts the autoupdating thread. That is, a thread that will call update
every so often. If started, it should eventually be stopped. Otherwise,
it will run forever in the background.
@param interval Interval between updates, in seconds.
@see autoupdater_stop
"""
self._autoupdating = True
self._autoupdating_interval = interval
self._thread = threading.Thread(None, self.t_updater)
self._thread.start()
def autoupdater_stop(self):
"""
Stops the autoupdating thread. This method is non-blocking. It will signal
the thread to stop, but may take a while before it *really* does stop.
There is a blocking version of this method.
@see autoupdater_join
"""
self._autoupdating = False
def autoupdater_join(self):
"""
Stops the autoupdating thread, and joins that thread until it really does stop.
May block forever if for some reason the thread won't stop, but that
should not happen.
"""
self._autoupdating = False
self._thread.join(0)
def set_input(self, input_number, input_flow):
"""
Sets the value for an input in the simulation.
@param input_number Number identifying the input. The input should exist.
@param input_flow New flow of the input, in liters per second.
"""
with self.simlock:
self.inputs[input_number] = input_flow
def set_output(self, output_number, output_flow):
"""
Sets the value for an output in the simulation.
@param output_number Number identifying the output. The output should exist.
@param output_flow New flow of the output, in liters per second.
"""
with self.simlock:
self.outputs[output_number] = output_flow
def set_inputs(self, inputs):
"""
Redefines the whole array of inputs.
@param inputs Array containing the flow of every input.
"""
with self.simlock:
self.inputs = inputs
def set_outputs(self, outputs):
"""
Redefines the whole array of outputs.
@param outputs Array containing the flow of every output.
"""
with self.simlock:
self.outputs = outputs
def get_temperatures(self):
"""
Get temperatures.
:return:
"""
return [self.firstPumpTemperature, self.secondPumpTemperature]
def get_water_volume(self):
"""
Gets the current water volume in liters. It will vary dynamically according to the
simulation's state.
"""
with self.simlock:
return self.current_volume
def get_water_level(self):
"""
Gets the current water level, as a number from 0 to 1 (empty to full). It will vary dynamically
according to the simulation's state.
"""
with self.simlock:
return 1.0 * self.current_volume / self.tank_capacity
def get_json_state(self, input_capacities, output_capacities):
"""
Gets a json-encoded description of the simulation's state.
As of now, it takes output and input capacities as arguments because the JSON state
is described through relative values. (For instance, first output at 0.3 capacity).
@param input_capacities An array containing the maximum capacities of the input.
@param output_capacities An array containing the maximum capacities of the output.
"""
if len(self.inputs) != len(input_capacities):
return "{}"
inputs = []
for inp, cap in zip(self.inputs, input_capacities):
inputs.append(1.0 * inp / cap)
outputs = []
for inp, cap in zip(self.outputs, output_capacities):
outputs.append(1.0 * inp / cap)
state = {"water": self.get_water_level(), "inputs": inputs, "outputs": outputs}
# Report the RAW temperature
temperatures = [0, 0]
temperatures[0] = self.firstPumpTemperature
temperatures[1] = self.secondPumpTemperature
state["temperatures"] = temperatures
return json.dumps(state)
if __name__ == '__main__':
from mock import patch
import unittest
def fake_sleep(t):
# TODO
a = [1 for i in range(100000)] # very fast kludge to add minor delay
b = len(a)
pass
class TestWatertankSimulation(unittest.TestCase):
def test_nothing(self):
pass
def _get_state(self, w):
js = w.get_json_state([20, 20], [100])
d = json.loads(js)
return d
@patch("time.sleep", fake_sleep)
def test_waterlevel_increase_decrease(self):
w = Watertank(1000, [100, 100], [100], 0.5)
w.autoupdater_start(1)
initial_level = self._get_state(w)["water"]
i = 0
while (i < 15):
time.sleep(0.5)
i += 1
other_level = self._get_state(w)["water"]
# Check that the water level did increase
self.assertGreater(other_level, initial_level)
w.set_outputs([400])
i = 0
while (i < 15):
time.sleep(0.5)
i += 1
dec_level = self._get_state(w)["water"]
# Check that the water level did decrease
self.assertGreater(other_level, dec_level)
@patch("time.sleep", fake_sleep)
def test_temperature_increase_decrease(self):
w = Watertank(1000, [100, 100], [100], 0.5)
w.autoupdater_start(1)
t0 = self._get_state(w)["temperatures"][0]
i = 0
while (i < 15):
time.sleep(0.5)
i += 1
t1 = self._get_state(w)["temperatures"][0]
# Check that the water level did increase
self.assertGreater(t1, t0)
w.set_inputs([0, 0])
i = 0
while (i < 15):
time.sleep(0.5)
i += 1
t2 = self._get_state(w)["temperatures"][0]
# Check that the water level did decrease
self.assertGreater(t1, t2)
# @patch("time.sleep", fake_sleep)
# def test_first(self):
# w = Watertank(1000, [100, 100], [100], 0.5)
# w.autoupdater_start(1)
#
# i = 0
# while (i < 15):
# print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100])
# time.sleep(0.5)
# i += 1
#
# print "...."
# i = 0
# w.set_outputs([100])
# w.set_inputs([10, 10])
# while (i < 30):
# print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100])
# time.sleep(0.5)
# i += 1
#
# w.autoupdater_join()
#
# @patch("time.sleep", fake_sleep)
# def test_second(self):
# w = Watertank(1000, [100, 100], [100], 0.5)
#
# i = 0
# while i < 15:
# print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100])
# w.update(1)
# i += 1
#
# print "...."
# i = 0
# w.set_outputs([100])
# w.set_inputs([10, 10])
# while i < 15:
# print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100])
# w.update(1)
# i += 1
unittest.main() | server/src/experiments/ud_xilinx/watertank_simulation.py | 12,701 | Watertank Model
Output example:
{"water": 0.0, "inputs": [0.5, 0.5], "temperatures": [716, 20], "outputs": [1.0]}
Changes that have been applied lately to this model (Dec 2015)
- There is no longer a separate temperatures mode. Now there is a single model with temperatures.
- There are no longer temperature working ranges, temperature warnings, or temperature overloads. The
model will not prevent the pumps from working. Instead, the temperature will increase indefinitely. The experiment
client can thus deal with temperatures however it wishes (and it can in fact ignore them), with no effect.
- As a result of the previous change, temperature is no longer reported as in the [0,1] range according to the range.
Now it is reported in raw form.
Stops the autoupdating thread, and joins that thread until it really does stop.
May block forever if for some reason the thread won't stop, but that
should not happen.
Starts the autoupdating thread. That is, a thread that will call update
every so often. If started, it should eventually be stopped. Otherwise,
it will run forever in the background.
@param interval Interval between updates, in seconds.
@see autoupdater_stop
Stops the autoupdating thread. This method is non-blocking. It will signal
the thread to stop, but may take a while before it *really* does stop.
There is a blocking version of this method.
@see autoupdater_join
Gets a json-encoded description of the simulation's state.
As of now, it takes output and input capacities as arguments because the JSON state
is described through relative values. (For instance, first output at 0.3 capacity).
@param input_capacities An array containing the maximum capacities of the input.
@param output_capacities An array containing the maximum capacities of the output.
Get temperatures.
:return:
Gets the current water level, as a number from 0 to 1 (empty to full). It will vary dynamically
according to the simulation's state.
Gets the current water volume in liters. It will vary dynamically according to the
simulation's state.
Initializes the simulation with the specified data.
@param tank_capacity Capacity of the water tank, in liters.
@param Array containing the flow volume of the inputs (such as water pumps), in liters per second.
The flow can be modified dynamically, but no inputs can be added.
@param Array containing the outputs (such as a water hose or evaporation), in liters per second.
The flow can be modified dynamically, but no inputs can be added.
@param water_level The starting water level. Value from 0 to 1.
Sets the value for an input in the simulation.
@param input_number Number identifying the input. The input should exist.
@param input_flow New flow of the input, in liters per second.
Redefines the whole array of inputs.
@param inputs Array containing the flow of every input.
Sets the value for an output in the simulation.
@param output_number Number identifying the output. The output should exist.
@param output_flow New flow of the output, in liters per second.
Redefines the whole array of outputs.
@param outputs Array containing the flow of every output.
This internal method is used by the autoupdating thread to update
the simulation every few seconds (specified as the autoupdater interval).
Updates the simulation. Can be done automatically if the autoupdater is used.
@param delta Delta in seconds.
@see autoupdater_start
!/usr/bin/python -*- coding: utf-8 -*- Copyright (C) 2005 onwards University of Deusto All rights reserved. This software is licensed as described in the file COPYING, which you should have received as part of this distribution. This software consists of contributions made by many individuals, listed below: Author: Luis Rodriguez <luis.rodriguez@opendeusto.es> Enough for 30 seconds? Calculates how much the pumps are putting in. Handle inputs If the first pump is turned on we increase the temperature and the total water input We multiply by 1.1 so that its temperature raises faster. If the second pump is turned on we increase the temperature and the total water input Ensure the volume stays realistic Report the RAW temperature TODO very fast kludge to add minor delay Check that the water level did increase Check that the water level did decrease Check that the water level did increase Check that the water level did decrease @patch("time.sleep", fake_sleep) def test_first(self): w = Watertank(1000, [100, 100], [100], 0.5) w.autoupdater_start(1) i = 0 while (i < 15): print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100]) time.sleep(0.5) i += 1 print "...." i = 0 w.set_outputs([100]) w.set_inputs([10, 10]) while (i < 30): print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100]) time.sleep(0.5) i += 1 w.autoupdater_join() @patch("time.sleep", fake_sleep) def test_second(self): w = Watertank(1000, [100, 100], [100], 0.5) i = 0 while i < 15: print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100]) w.update(1) i += 1 print "...." i = 0 w.set_outputs([100]) w.set_inputs([10, 10]) while i < 15: print w.tank_capacity, w.get_water_level(), w.get_water_volume(), w.get_json_state([20, 20], [100]) w.update(1) i += 1 | 5,438 | en | 0.799177 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/creature/npc/base/shared_dantari_base_male.iff"
result.attribute_template_id = 9
result.stfName("npc_name","dantari_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | data/scripts/templates/object/creature/npc/base/shared_dantari_base_male.py | 455 | NOTICE: THIS FILE IS AUTOGENERATED MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES BEGIN MODIFICATIONS END MODIFICATIONS | 168 | en | 0.698026 |
import datetime
import itertools
import logging
import os
import platform
import time
from collections import defaultdict
from operator import itemgetter
from typing import (
AbstractSet,
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Sequence,
Set,
Tuple,
Union,
)
import django.db.utils
import ujson
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.files import File
from django.db import IntegrityError, connection, transaction
from django.db.models import Count, Exists, F, Max, OuterRef, Q, Sum
from django.db.models.query import QuerySet
from django.utils.html import escape
from django.utils.timezone import now as timezone_now
from django.utils.translation import override as override_language
from django.utils.translation import ugettext as _
from psycopg2.extras import execute_values
from psycopg2.sql import SQL
from typing_extensions import TypedDict
from analytics.lib.counts import COUNT_STATS, RealmCount, do_increment_logging_stat
from analytics.models import StreamCount
from confirmation import settings as confirmation_settings
from confirmation.models import (
Confirmation,
confirmation_url,
create_confirmation_link,
generate_key,
)
from zerver.decorator import statsd_increment
from zerver.lib import retention as retention
from zerver.lib.addressee import Addressee
from zerver.lib.alert_words import (
add_user_alert_words,
get_alert_word_automaton,
remove_user_alert_words,
)
from zerver.lib.avatar import avatar_url, avatar_url_from_dict
from zerver.lib.bot_config import ConfigError, get_bot_config, get_bot_configs, set_bot_config
from zerver.lib.bulk_create import bulk_create_users
from zerver.lib.cache import (
bot_dict_fields,
cache_delete,
cache_delete_many,
cache_set,
cache_set_many,
cache_with_key,
delete_user_profile_caches,
display_recipient_cache_key,
flush_user_profile,
to_dict_cache_key_id,
user_profile_by_api_key_cache_key,
user_profile_by_email_cache_key,
)
from zerver.lib.context_managers import lockfile
from zerver.lib.create_user import create_user, get_display_email_address
from zerver.lib.email_mirror_helpers import encode_email_address, encode_email_address_helper
from zerver.lib.email_notifications import enqueue_welcome_emails
from zerver.lib.email_validation import (
email_reserved_for_system_bots_error,
get_existing_user_errors,
get_realm_email_validator,
validate_email_is_valid,
)
from zerver.lib.emoji import get_emoji_file_name
from zerver.lib.exceptions import (
ErrorCode,
JsonableError,
MarkdownRenderingException,
StreamDoesNotExistError,
StreamWithIDDoesNotExistError,
)
from zerver.lib.export import get_realm_exports_serialized
from zerver.lib.external_accounts import DEFAULT_EXTERNAL_ACCOUNTS
from zerver.lib.hotspots import get_next_hotspots
from zerver.lib.i18n import get_language_name
from zerver.lib.markdown import MentionData, topic_links
from zerver.lib.markdown import version as markdown_version
from zerver.lib.message import (
MessageDict,
access_message,
render_markdown,
truncate_body,
truncate_topic,
update_first_visible_message_id,
)
from zerver.lib.pysa import mark_sanitized
from zerver.lib.queue import queue_json_publish
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.realm_logo import get_realm_logo_data
from zerver.lib.retention import move_messages_to_archive
from zerver.lib.send_email import (
FromAddress,
clear_scheduled_emails,
clear_scheduled_invitation_emails,
send_email,
send_email_to_admins,
)
from zerver.lib.server_initialization import create_internal_realm, server_initialized
from zerver.lib.sessions import delete_user_sessions
from zerver.lib.storage import static_path
from zerver.lib.stream_recipient import StreamRecipientMap
from zerver.lib.stream_subscription import (
get_active_subscriptions_for_stream_id,
get_active_subscriptions_for_stream_ids,
get_bulk_stream_subscriber_info,
get_stream_subscriptions_for_user,
get_stream_subscriptions_for_users,
get_subscribed_stream_ids_for_user,
num_subscribers_for_stream_id,
)
from zerver.lib.stream_topic import StreamTopicTarget
from zerver.lib.streams import (
access_stream_for_send_message,
check_stream_name,
create_stream_if_needed,
get_default_value_for_history_public_to_subscribers,
render_stream_description,
send_stream_creation_event,
subscribed_to_stream,
)
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
from zerver.lib.topic import (
LEGACY_PREV_TOPIC,
ORIG_TOPIC,
TOPIC_LINKS,
TOPIC_NAME,
filter_by_exact_message_topic,
filter_by_topic_name_via_message,
save_message_for_edit_use_case,
update_messages_for_topic_edit,
)
from zerver.lib.topic_mutes import add_topic_mute, get_topic_mutes, remove_topic_mute
from zerver.lib.types import ProfileFieldData
from zerver.lib.upload import (
claim_attachment,
delete_avatar_image,
delete_export_tarball,
delete_message_image,
upload_emoji_image,
)
from zerver.lib.user_groups import access_user_group_by_id, create_user_group
from zerver.lib.user_status import update_user_status
from zerver.lib.users import (
check_bot_name_available,
check_full_name,
format_user_row,
get_api_key,
user_profile_to_user_row,
)
from zerver.lib.utils import generate_api_key, log_statsd_event
from zerver.lib.validator import check_widget_content
from zerver.lib.widget import do_widget_post_save_actions
from zerver.models import (
MAX_MESSAGE_LENGTH,
Attachment,
Client,
CustomProfileField,
CustomProfileFieldValue,
DefaultStream,
DefaultStreamGroup,
EmailChangeStatus,
Message,
MultiuseInvite,
PreregistrationUser,
Reaction,
Realm,
RealmAuditLog,
RealmDomain,
RealmEmoji,
RealmFilter,
Recipient,
ScheduledEmail,
ScheduledMessage,
Service,
Stream,
SubMessage,
Subscription,
UserActivity,
UserActivityInterval,
UserGroup,
UserGroupMembership,
UserHotspot,
UserMessage,
UserPresence,
UserProfile,
UserStatus,
active_non_guest_user_ids,
active_user_ids,
custom_profile_fields_for_realm,
filter_to_valid_prereg_users,
get_active_streams,
get_bot_dicts_in_realm,
get_bot_services,
get_client,
get_default_stream_groups,
get_huddle_recipient,
get_huddle_user_ids,
get_old_unclaimed_attachments,
get_stream,
get_stream_by_id_in_realm,
get_stream_cache_key,
get_system_bot,
get_user_by_delivery_email,
get_user_by_id_in_realm_including_cross_realm,
get_user_profile_by_id,
is_cross_realm_bot_email,
query_for_ids,
realm_filters_for_realm,
stream_name_in_use,
validate_attachment_request,
)
from zerver.tornado.event_queue import send_event
if settings.BILLING_ENABLED:
from corporate.lib.stripe import downgrade_now, update_license_ledger_if_needed
# This will be used to type annotate parameters in a function if the function
# works on both str and unicode in python 2 but in python 3 it only works on str.
SizedTextIterable = Union[Sequence[str], AbstractSet[str]]
ONBOARDING_TOTAL_MESSAGES = 1000
ONBOARDING_UNREAD_MESSAGES = 20
STREAM_ASSIGNMENT_COLORS = [
"#76ce90", "#fae589", "#a6c7e5", "#e79ab5",
"#bfd56f", "#f4ae55", "#b0a5fd", "#addfe5",
"#f5ce6e", "#c2726a", "#94c849", "#bd86e5",
"#ee7e4a", "#a6dcbf", "#95a5fd", "#53a063",
"#9987e1", "#e4523d", "#c2c2c2", "#4f8de4",
"#c6a8ad", "#e7cc4d", "#c8bebf", "#a47462"]
def subscriber_info(user_id: int) -> Dict[str, Any]:
return {
'id': user_id,
'flags': ['read']
}
# Store an event in the log for re-importing messages
def log_event(event: MutableMapping[str, Any]) -> None:
if settings.EVENT_LOG_DIR is None:
return
if "timestamp" not in event:
event["timestamp"] = time.time()
if not os.path.exists(settings.EVENT_LOG_DIR):
os.mkdir(settings.EVENT_LOG_DIR)
template = os.path.join(settings.EVENT_LOG_DIR,
'%s.' + platform.node() +
timezone_now().strftime('.%Y-%m-%d'))
with lockfile(template % ('lock',)):
with open(template % ('events',), 'a') as log:
log.write(ujson.dumps(event) + '\n')
def can_access_stream_user_ids(stream: Stream) -> Set[int]:
# return user ids of users who can access the attributes of
# a stream, such as its name/description.
if stream.is_public():
# For a public stream, this is everyone in the realm
# except unsubscribed guest users
return public_stream_user_ids(stream)
else:
# for a private stream, it's subscribers plus realm admins.
return private_stream_user_ids(
stream.id) | {user.id for user in stream.realm.get_admin_users_and_bots()}
def private_stream_user_ids(stream_id: int) -> Set[int]:
# TODO: Find similar queries elsewhere and de-duplicate this code.
subscriptions = get_active_subscriptions_for_stream_id(stream_id)
return {sub['user_profile_id'] for sub in subscriptions.values('user_profile_id')}
def public_stream_user_ids(stream: Stream) -> Set[int]:
guest_subscriptions = get_active_subscriptions_for_stream_id(
stream.id).filter(user_profile__role=UserProfile.ROLE_GUEST)
guest_subscriptions = {sub['user_profile_id'] for sub in guest_subscriptions.values('user_profile_id')}
return set(active_non_guest_user_ids(stream.realm_id)) | guest_subscriptions
def bot_owner_user_ids(user_profile: UserProfile) -> Set[int]:
is_private_bot = (
user_profile.default_sending_stream and
user_profile.default_sending_stream.invite_only or
user_profile.default_events_register_stream and
user_profile.default_events_register_stream.invite_only)
if is_private_bot:
return {user_profile.bot_owner_id}
else:
users = {user.id for user in user_profile.realm.get_human_admin_users()}
users.add(user_profile.bot_owner_id)
return users
def realm_user_count(realm: Realm) -> int:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False).count()
def realm_user_count_by_role(realm: Realm) -> Dict[str, Any]:
human_counts = {UserProfile.ROLE_REALM_ADMINISTRATOR: 0,
UserProfile.ROLE_REALM_OWNER: 0,
UserProfile.ROLE_MEMBER: 0,
UserProfile.ROLE_GUEST: 0}
for value_dict in list(UserProfile.objects.filter(
realm=realm, is_bot=False, is_active=True).values('role').annotate(Count('role'))):
human_counts[value_dict['role']] = value_dict['role__count']
bot_count = UserProfile.objects.filter(realm=realm, is_bot=True, is_active=True).count()
return {
RealmAuditLog.ROLE_COUNT_HUMANS: human_counts,
RealmAuditLog.ROLE_COUNT_BOTS: bot_count,
}
def get_signups_stream(realm: Realm) -> Stream:
# This one-liner helps us work around a lint rule.
return get_stream("signups", realm)
def notify_new_user(user_profile: UserProfile) -> None:
sender_email = settings.NOTIFICATION_BOT
sender = get_system_bot(sender_email)
user_count = realm_user_count(user_profile.realm)
signup_notifications_stream = user_profile.realm.get_signup_notifications_stream()
# Send notification to realm signup notifications stream if it exists
# Don't send notification for the first user in a realm
if signup_notifications_stream is not None and user_count > 1:
with override_language(user_profile.realm.default_language):
message = _("{user} just signed up for Zulip. (total: {user_count})").format(
user=f"@_**{user_profile.full_name}|{user_profile.id}**",
user_count=user_count
)
internal_send_stream_message(
user_profile.realm,
sender,
signup_notifications_stream,
_("signups"),
message
)
# We also send a notification to the Zulip administrative realm
admin_realm = sender.realm
try:
# Check whether the stream exists
signups_stream = get_signups_stream(admin_realm)
with override_language(admin_realm.default_language):
# We intentionally use the same strings as above to avoid translation burden.
message = _("{user} just signed up for Zulip. (total: {user_count})").format(
user=f"{user_profile.full_name} <`{user_profile.email}`>",
user_count=user_count
)
internal_send_stream_message(
admin_realm,
sender,
signups_stream,
user_profile.realm.display_subdomain,
message
)
except Stream.DoesNotExist:
# If the signups stream hasn't been created in the admin
# realm, don't auto-create it to send to it; just do nothing.
pass
def notify_invites_changed(user_profile: UserProfile) -> None:
event = dict(type="invites_changed")
admin_ids = [user.id for user in
user_profile.realm.get_admin_users_and_bots()]
send_event(user_profile.realm, event, admin_ids)
def add_new_user_history(user_profile: UserProfile, streams: Iterable[Stream]) -> None:
"""Give you the last ONBOARDING_TOTAL_MESSAGES messages on your public
streams, so you have something to look at in your home view once
you finish the tutorial. The most recent ONBOARDING_UNREAD_MESSAGES
are marked unread.
"""
one_week_ago = timezone_now() - datetime.timedelta(weeks=1)
recipient_ids = [stream.recipient_id for stream in streams if not stream.invite_only]
recent_messages = Message.objects.filter(recipient_id__in=recipient_ids,
date_sent__gt=one_week_ago).order_by("-id")
message_ids_to_use = list(reversed(recent_messages.values_list(
'id', flat=True)[0:ONBOARDING_TOTAL_MESSAGES]))
if len(message_ids_to_use) == 0:
return
# Handle the race condition where a message arrives between
# bulk_add_subscriptions above and the Message query just above
already_ids = set(UserMessage.objects.filter(message_id__in=message_ids_to_use,
user_profile=user_profile).values_list("message_id",
flat=True))
# Mark the newest ONBOARDING_UNREAD_MESSAGES as unread.
marked_unread = 0
ums_to_create = []
for message_id in reversed(message_ids_to_use):
if message_id in already_ids:
continue
um = UserMessage(user_profile=user_profile, message_id=message_id)
if marked_unread < ONBOARDING_UNREAD_MESSAGES:
marked_unread += 1
else:
um.flags = UserMessage.flags.read
ums_to_create.append(um)
UserMessage.objects.bulk_create(reversed(ums_to_create))
# Does the processing for a new user account:
# * Subscribes to default/invitation streams
# * Fills in some recent historical messages
# * Notifies other users in realm and Zulip about the signup
# * Deactivates PreregistrationUser objects
# * subscribe the user to newsletter if newsletter_data is specified
def process_new_human_user(user_profile: UserProfile,
prereg_user: Optional[PreregistrationUser]=None,
newsletter_data: Optional[Mapping[str, str]]=None,
default_stream_groups: Sequence[DefaultStreamGroup]=[],
realm_creation: bool=False) -> None:
mit_beta_user = user_profile.realm.is_zephyr_mirror_realm
if prereg_user is not None:
prereg_user.status = confirmation_settings.STATUS_ACTIVE
prereg_user.save(update_fields=['status'])
streams = prereg_user.streams.all()
acting_user: Optional[UserProfile] = prereg_user.referred_by
else:
streams = []
acting_user = None
# If the user's invitation didn't explicitly list some streams, we
# add the default streams
if len(streams) == 0:
streams = get_default_subs(user_profile)
for default_stream_group in default_stream_groups:
default_stream_group_streams = default_stream_group.streams.all()
for stream in default_stream_group_streams:
if stream not in streams:
streams.append(stream)
bulk_add_subscriptions(streams, [user_profile], acting_user=acting_user)
add_new_user_history(user_profile, streams)
# mit_beta_users don't have a referred_by field
if not mit_beta_user and prereg_user is not None and prereg_user.referred_by is not None:
# This is a cross-realm private message.
with override_language(prereg_user.referred_by.default_language):
internal_send_private_message(
user_profile.realm,
get_system_bot(settings.NOTIFICATION_BOT),
prereg_user.referred_by,
_("{user} accepted your invitation to join Zulip!").format(user=f"{user_profile.full_name} <`{user_profile.email}`>")
)
# Mark any other PreregistrationUsers that are STATUS_ACTIVE as
# inactive so we can keep track of the PreregistrationUser we
# actually used for analytics
if prereg_user is not None:
PreregistrationUser.objects.filter(
email__iexact=user_profile.delivery_email).exclude(id=prereg_user.id)\
.update(status=confirmation_settings.STATUS_REVOKED)
if prereg_user.referred_by is not None:
notify_invites_changed(user_profile)
else:
PreregistrationUser.objects.filter(email__iexact=user_profile.delivery_email)\
.update(status=confirmation_settings.STATUS_REVOKED)
notify_new_user(user_profile)
# Clear any scheduled invitation emails to prevent them
# from being sent after the user is created.
clear_scheduled_invitation_emails(user_profile.delivery_email)
if user_profile.realm.send_welcome_emails:
enqueue_welcome_emails(user_profile, realm_creation)
# We have an import loop here; it's intentional, because we want
# to keep all the onboarding code in zerver/lib/onboarding.py.
from zerver.lib.onboarding import send_initial_pms
send_initial_pms(user_profile)
if newsletter_data is not None:
# If the user was created automatically via the API, we may
# not want to register them for the newsletter
queue_json_publish(
"signups",
{
'email_address': user_profile.delivery_email,
'user_id': user_profile.id,
'merge_fields': {
'NAME': user_profile.full_name,
'REALM_ID': user_profile.realm_id,
'OPTIN_IP': newsletter_data["IP"],
'OPTIN_TIME': datetime.datetime.isoformat(timezone_now().replace(microsecond=0)),
},
},
lambda event: None)
def notify_created_user(user_profile: UserProfile) -> None:
user_row = user_profile_to_user_row(user_profile)
person = format_user_row(user_profile.realm, user_profile, user_row,
# Since we don't know what the client
# supports at this point in the code, we
# just assume client_gravatar and
# user_avatar_url_field_optional = False :(
client_gravatar=False,
user_avatar_url_field_optional=False,
# We assume there's no custom profile
# field data for a new user; initial
# values are expected to be added in a
# later event.
custom_profile_field_data={})
event: Dict[str, Any] = dict(type="realm_user", op="add", person=person)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def created_bot_event(user_profile: UserProfile) -> Dict[str, Any]:
def stream_name(stream: Optional[Stream]) -> Optional[str]:
if not stream:
return None
return stream.name
default_sending_stream_name = stream_name(user_profile.default_sending_stream)
default_events_register_stream_name = stream_name(user_profile.default_events_register_stream)
bot = dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name,
bot_type=user_profile.bot_type,
is_active=user_profile.is_active,
api_key=get_api_key(user_profile),
default_sending_stream=default_sending_stream_name,
default_events_register_stream=default_events_register_stream_name,
default_all_public_streams=user_profile.default_all_public_streams,
avatar_url=avatar_url(user_profile),
services = get_service_dicts_for_bot(user_profile.id),
)
# Set the owner key only when the bot has an owner.
# The default bots don't have an owner. So don't
# set the owner key while reactivating them.
if user_profile.bot_owner is not None:
bot['owner_id'] = user_profile.bot_owner.id
return dict(type="realm_bot", op="add", bot=bot)
def notify_created_bot(user_profile: UserProfile) -> None:
event = created_bot_event(user_profile)
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
def create_users(realm: Realm, name_list: Iterable[Tuple[str, str]], bot_type: Optional[int]=None) -> None:
user_set = set()
for full_name, email in name_list:
user_set.add((email, full_name, True))
bulk_create_users(realm, user_set, bot_type)
def do_create_user(email: str, password: Optional[str], realm: Realm, full_name: str,
bot_type: Optional[int]=None, role: Optional[int]=None,
bot_owner: Optional[UserProfile]=None, tos_version: Optional[str]=None,
timezone: str="", avatar_source: str=UserProfile.AVATAR_FROM_GRAVATAR,
default_sending_stream: Optional[Stream]=None,
default_events_register_stream: Optional[Stream]=None,
default_all_public_streams: Optional[bool]=None,
prereg_user: Optional[PreregistrationUser]=None,
newsletter_data: Optional[Dict[str, str]]=None,
default_stream_groups: Sequence[DefaultStreamGroup]=[],
source_profile: Optional[UserProfile]=None,
realm_creation: bool=False,
acting_user: Optional[UserProfile]=None) -> UserProfile:
user_profile = create_user(email=email, password=password, realm=realm,
full_name=full_name,
role=role, bot_type=bot_type, bot_owner=bot_owner,
tos_version=tos_version, timezone=timezone, avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams,
source_profile=source_profile)
event_time = user_profile.date_joined
if not acting_user:
acting_user = user_profile
RealmAuditLog.objects.create(
realm=user_profile.realm, acting_user=acting_user, modified_user=user_profile,
event_type=RealmAuditLog.USER_CREATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
# Note that for bots, the caller will send an additional event
# with bot-specific info like services.
notify_created_user(user_profile)
if bot_type is None:
process_new_human_user(user_profile, prereg_user=prereg_user,
newsletter_data=newsletter_data,
default_stream_groups=default_stream_groups,
realm_creation=realm_creation)
return user_profile
def do_activate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None) -> None:
user_profile.is_active = True
user_profile.is_mirror_dummy = False
user_profile.set_unusable_password()
user_profile.date_joined = timezone_now()
user_profile.tos_version = settings.TOS_VERSION
user_profile.save(update_fields=["is_active", "date_joined", "password",
"is_mirror_dummy", "tos_version"])
event_time = user_profile.date_joined
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user,
event_type=RealmAuditLog.USER_ACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
def do_reactivate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None) -> None:
# Unlike do_activate_user, this is meant for re-activating existing users,
# so it doesn't reset their password, etc.
user_profile.is_active = True
user_profile.save(update_fields=["is_active"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user,
event_type=RealmAuditLog.USER_REACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
if user_profile.is_bot:
notify_created_bot(user_profile)
def active_humans_in_realm(realm: Realm) -> Sequence[UserProfile]:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False)
def do_set_realm_property(realm: Realm, name: str, value: Any,
acting_user: Optional[UserProfile] = None) -> None:
"""Takes in a realm object, the name of an attribute to update, the
value to update and and the user who initiated the update.
"""
property_type = Realm.property_types[name]
assert isinstance(value, property_type), (
f'Cannot update {name}: {value} is not an instance of {property_type}')
old_value = getattr(realm, name)
setattr(realm, name, value)
realm.save(update_fields=[name])
event = dict(
type='realm',
op='update',
property=name,
value=value,
)
send_event(realm, event, active_user_ids(realm.id))
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_PROPERTY_CHANGED, event_time=event_time,
acting_user=acting_user, extra_data=ujson.dumps({
RealmAuditLog.OLD_VALUE: {'property': name, 'value': old_value},
RealmAuditLog.NEW_VALUE: {'property': name, 'value': value}
}))
if name == "email_address_visibility":
if Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE not in [old_value, value]:
# We use real email addresses on UserProfile.email only if
# EMAIL_ADDRESS_VISIBILITY_EVERYONE is configured, so
# changes between values that will not require changing
# that field, so we can save work and return here.
return
user_profiles = UserProfile.objects.filter(realm=realm, is_bot=False)
for user_profile in user_profiles:
user_profile.email = get_display_email_address(user_profile, realm)
# TODO: Design a bulk event for this or force-reload all clients
send_user_email_update_event(user_profile)
UserProfile.objects.bulk_update(user_profiles, ['email'])
for user_profile in user_profiles:
flush_user_profile(sender=UserProfile, instance=user_profile)
def do_set_realm_authentication_methods(realm: Realm,
authentication_methods: Dict[str, bool],
acting_user: Optional[UserProfile]=None) -> None:
old_value = realm.authentication_methods_dict()
for key, value in list(authentication_methods.items()):
index = getattr(realm.authentication_methods, key).number
realm.authentication_methods.set_bit(index, int(value))
realm.save(update_fields=['authentication_methods'])
updated_value = realm.authentication_methods_dict()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_PROPERTY_CHANGED, event_time=timezone_now(),
acting_user=acting_user, extra_data=ujson.dumps({
RealmAuditLog.OLD_VALUE: {'property': 'authentication_methods', 'value': old_value},
RealmAuditLog.NEW_VALUE: {'property': 'authentication_methods', 'value': updated_value}
}))
event = dict(
type="realm",
op="update_dict",
property='default',
data=dict(authentication_methods=updated_value),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_message_editing(realm: Realm,
allow_message_editing: bool,
message_content_edit_limit_seconds: int,
allow_community_topic_editing: bool) -> None:
realm.allow_message_editing = allow_message_editing
realm.message_content_edit_limit_seconds = message_content_edit_limit_seconds
realm.allow_community_topic_editing = allow_community_topic_editing
realm.save(update_fields=['allow_message_editing',
'allow_community_topic_editing',
'message_content_edit_limit_seconds',
],
)
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(allow_message_editing=allow_message_editing,
message_content_edit_limit_seconds=message_content_edit_limit_seconds,
allow_community_topic_editing=allow_community_topic_editing),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_message_deleting(realm: Realm,
message_content_delete_limit_seconds: int) -> None:
realm.message_content_delete_limit_seconds = message_content_delete_limit_seconds
realm.save(update_fields=['message_content_delete_limit_seconds'])
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(message_content_delete_limit_seconds=message_content_delete_limit_seconds),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_notifications_stream(realm: Realm, stream: Optional[Stream], stream_id: int) -> None:
realm.notifications_stream = stream
realm.save(update_fields=['notifications_stream'])
event = dict(
type="realm",
op="update",
property="notifications_stream_id",
value=stream_id,
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_signup_notifications_stream(realm: Realm, stream: Optional[Stream],
stream_id: int) -> None:
realm.signup_notifications_stream = stream
realm.save(update_fields=['signup_notifications_stream'])
event = dict(
type="realm",
op="update",
property="signup_notifications_stream_id",
value=stream_id,
)
send_event(realm, event, active_user_ids(realm.id))
def do_deactivate_realm(realm: Realm, acting_user: Optional[UserProfile]=None) -> None:
"""
Deactivate this realm. Do NOT deactivate the users -- we need to be able to
tell the difference between users that were intentionally deactivated,
e.g. by a realm admin, and users who can't currently use Zulip because their
realm has been deactivated.
"""
if realm.deactivated:
return
realm.deactivated = True
realm.save(update_fields=["deactivated"])
if settings.BILLING_ENABLED:
downgrade_now(realm)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_DEACTIVATED, event_time=event_time,
acting_user=acting_user, extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm),
}))
ScheduledEmail.objects.filter(realm=realm).delete()
for user in active_humans_in_realm(realm):
# Don't deactivate the users, but do delete their sessions so they get
# bumped to the login screen, where they'll get a realm deactivation
# notice when they try to log in.
delete_user_sessions(user)
event = dict(type="realm", op="deactivated",
realm_id=realm.id)
send_event(realm, event, active_user_ids(realm.id))
def do_reactivate_realm(realm: Realm) -> None:
realm.deactivated = False
realm.save(update_fields=["deactivated"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_REACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm),
}))
def do_change_realm_subdomain(realm: Realm, new_subdomain: str) -> None:
realm.string_id = new_subdomain
realm.save(update_fields=["string_id"])
def do_scrub_realm(realm: Realm, acting_user: Optional[UserProfile]=None) -> None:
users = UserProfile.objects.filter(realm=realm)
for user in users:
do_delete_messages_by_sender(user)
do_delete_avatar_image(user, acting_user=acting_user)
user.full_name = f"Scrubbed {generate_key()[:15]}"
scrubbed_email = f"scrubbed-{generate_key()[:15]}@{realm.host}"
user.email = scrubbed_email
user.delivery_email = scrubbed_email
user.save(update_fields=["full_name", "email", "delivery_email"])
do_remove_realm_custom_profile_fields(realm)
Attachment.objects.filter(realm=realm).delete()
RealmAuditLog.objects.create(realm=realm, event_time=timezone_now(),
acting_user=acting_user,
event_type=RealmAuditLog.REALM_SCRUBBED)
def do_deactivate_user(user_profile: UserProfile,
acting_user: Optional[UserProfile]=None,
_cascade: bool=True) -> None:
if not user_profile.is_active:
return
if user_profile.realm.is_zephyr_mirror_realm: # nocoverage
# For zephyr mirror users, we need to make them a mirror dummy
# again; otherwise, other users won't get the correct behavior
# when trying to send messages to this person inside Zulip.
#
# Ideally, we need to also ensure their zephyr mirroring bot
# isn't running, but that's a separate issue.
user_profile.is_mirror_dummy = True
user_profile.is_active = False
user_profile.save(update_fields=["is_active"])
delete_user_sessions(user_profile)
clear_scheduled_emails([user_profile.id])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user,
event_type=RealmAuditLog.USER_DEACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time, increment=-1)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
event = dict(type="realm_user", op="remove",
person=dict(user_id=user_profile.id,
full_name=user_profile.full_name))
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
if user_profile.is_bot:
event = dict(type="realm_bot", op="remove",
bot=dict(user_id=user_profile.id,
full_name=user_profile.full_name))
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
if _cascade:
bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True,
bot_owner=user_profile)
for profile in bot_profiles:
do_deactivate_user(profile, acting_user=acting_user, _cascade=False)
def do_deactivate_stream(stream: Stream, log: bool=True, acting_user: Optional[UserProfile]=None) -> None:
# Get the affected user ids *before* we deactivate everybody.
affected_user_ids = can_access_stream_user_ids(stream)
get_active_subscriptions_for_stream_id(stream.id).update(active=False)
was_invite_only = stream.invite_only
stream.deactivated = True
stream.invite_only = True
# Preserve as much as possible the original stream name while giving it a
# special prefix that both indicates that the stream is deactivated and
# frees up the original name for reuse.
old_name = stream.name
new_name = ("!DEACTIVATED:" + old_name)[:Stream.MAX_NAME_LENGTH]
for i in range(20):
if stream_name_in_use(new_name, stream.realm_id):
# This stream has already been deactivated, keep prepending !s until
# we have a unique stream name or you've hit a rename limit.
new_name = ("!" + new_name)[:Stream.MAX_NAME_LENGTH]
else:
break
# If you don't have a unique name at this point, this will fail later in the
# code path.
stream.name = new_name[:Stream.MAX_NAME_LENGTH]
stream.save(update_fields=['name', 'deactivated', 'invite_only'])
# If this is a default stream, remove it, properly sending a
# notification to browser clients.
if DefaultStream.objects.filter(realm_id=stream.realm_id, stream_id=stream.id).exists():
do_remove_default_stream(stream)
default_stream_groups_for_stream = DefaultStreamGroup.objects.filter(streams__id=stream.id)
for group in default_stream_groups_for_stream:
do_remove_streams_from_default_stream_group(stream.realm, group, [stream])
# Remove the old stream information from remote cache.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
cache_delete(old_cache_key)
stream_dict = stream.to_dict()
stream_dict.update(dict(name=old_name, invite_only=was_invite_only))
event = dict(type="stream", op="delete",
streams=[stream_dict])
send_event(stream.realm, event, affected_user_ids)
event_time = timezone_now()
RealmAuditLog.objects.create(realm=stream.realm, acting_user=acting_user,
modified_stream=stream, event_type=RealmAuditLog.STREAM_DEACTIVATED,
event_time=event_time)
def send_user_email_update_event(user_profile: UserProfile) -> None:
payload = dict(user_id=user_profile.id,
new_email=user_profile.email)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
def do_change_user_delivery_email(user_profile: UserProfile, new_email: str) -> None:
delete_user_profile_caches([user_profile])
user_profile.delivery_email = new_email
if user_profile.email_address_is_realm_public():
user_profile.email = new_email
user_profile.save(update_fields=["email", "delivery_email"])
else:
user_profile.save(update_fields=["delivery_email"])
# We notify just the target user (and eventually org admins, only
# when email_address_visibility=EMAIL_ADDRESS_VISIBILITY_ADMINS)
# about their new delivery email, since that field is private.
payload = dict(user_id=user_profile.id,
delivery_email=new_email)
event = dict(type='realm_user', op='update', person=payload)
send_event(user_profile.realm, event, [user_profile.id])
if user_profile.avatar_source == UserProfile.AVATAR_FROM_GRAVATAR:
# If the user is using Gravatar to manage their email address,
# their Gravatar just changed, and we need to notify other
# clients.
notify_avatar_url_change(user_profile)
if user_profile.email_address_is_realm_public():
# Additionally, if we're also changing the publicly visible
# email, we send a new_email event as well.
send_user_email_update_event(user_profile)
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile, event_type=RealmAuditLog.USER_EMAIL_CHANGED,
event_time=event_time)
def do_start_email_change_process(user_profile: UserProfile, new_email: str) -> None:
old_email = user_profile.delivery_email
obj = EmailChangeStatus.objects.create(new_email=new_email, old_email=old_email,
user_profile=user_profile, realm=user_profile.realm)
activation_url = create_confirmation_link(obj, Confirmation.EMAIL_CHANGE)
from zerver.context_processors import common_context
context = common_context(user_profile)
context.update({
'old_email': old_email,
'new_email': new_email,
'activate_url': activation_url,
})
language = user_profile.default_language
send_email('zerver/emails/confirm_new_email', to_emails=[new_email],
from_name=FromAddress.security_email_from_name(language=language),
from_address=FromAddress.tokenized_no_reply_address(),
language=language, context=context,
realm=user_profile.realm)
def compute_irc_user_fullname(email: str) -> str:
return email.split("@")[0] + " (IRC)"
def compute_jabber_user_fullname(email: str) -> str:
return email.split("@")[0] + " (XMPP)"
@cache_with_key(lambda realm, email, f: user_profile_by_email_cache_key(email),
timeout=3600*24*7)
def create_mirror_user_if_needed(realm: Realm, email: str,
email_to_fullname: Callable[[str], str]) -> UserProfile:
try:
return get_user_by_delivery_email(email, realm)
except UserProfile.DoesNotExist:
try:
# Forge a user for this person
return create_user(
email=email,
password=None,
realm=realm,
full_name=email_to_fullname(email),
active=False,
is_mirror_dummy=True,
)
except IntegrityError:
return get_user_by_delivery_email(email, realm)
def send_welcome_bot_response(message: MutableMapping[str, Any]) -> None:
welcome_bot = get_system_bot(settings.WELCOME_BOT)
human_recipient_id = message['message'].sender.recipient_id
if Message.objects.filter(sender=welcome_bot, recipient_id=human_recipient_id).count() < 2:
content = (
_("Congratulations on your first reply!") +
" "
":tada:"
"\n"
"\n" +
_("Feel free to continue using this space to practice your new messaging "
"skills. Or, try clicking on some of the stream names to your left!")
)
internal_send_private_message(
message['realm'], welcome_bot, message['message'].sender, content)
def render_incoming_message(message: Message,
content: str,
user_ids: Set[int],
realm: Realm,
mention_data: Optional[MentionData]=None,
email_gateway: bool=False) -> str:
realm_alert_words_automaton = get_alert_word_automaton(realm)
try:
rendered_content = render_markdown(
message=message,
content=content,
realm=realm,
realm_alert_words_automaton = realm_alert_words_automaton,
mention_data=mention_data,
email_gateway=email_gateway,
)
except MarkdownRenderingException:
raise JsonableError(_('Unable to render message'))
return rendered_content
class RecipientInfoResult(TypedDict):
active_user_ids: Set[int]
push_notify_user_ids: Set[int]
stream_email_user_ids: Set[int]
stream_push_user_ids: Set[int]
wildcard_mention_user_ids: Set[int]
um_eligible_user_ids: Set[int]
long_term_idle_user_ids: Set[int]
default_bot_user_ids: Set[int]
service_bot_tuples: List[Tuple[int, int]]
def get_recipient_info(recipient: Recipient,
sender_id: int,
stream_topic: Optional[StreamTopicTarget],
possibly_mentioned_user_ids: AbstractSet[int]=set(),
possible_wildcard_mention: bool=True) -> RecipientInfoResult:
stream_push_user_ids: Set[int] = set()
stream_email_user_ids: Set[int] = set()
wildcard_mention_user_ids: Set[int] = set()
if recipient.type == Recipient.PERSONAL:
# The sender and recipient may be the same id, so
# de-duplicate using a set.
message_to_user_ids = list({recipient.type_id, sender_id})
assert(len(message_to_user_ids) in [1, 2])
elif recipient.type == Recipient.STREAM:
# Anybody calling us w/r/t a stream message needs to supply
# stream_topic. We may eventually want to have different versions
# of this function for different message types.
assert(stream_topic is not None)
user_ids_muting_topic = stream_topic.user_ids_muting_topic()
subscription_rows = stream_topic.get_active_subscriptions().annotate(
user_profile_email_notifications=F('user_profile__enable_stream_email_notifications'),
user_profile_push_notifications=F('user_profile__enable_stream_push_notifications'),
user_profile_wildcard_mentions_notify=F(
'user_profile__wildcard_mentions_notify'),
).values(
'user_profile_id',
'push_notifications',
'email_notifications',
'wildcard_mentions_notify',
'user_profile_email_notifications',
'user_profile_push_notifications',
'user_profile_wildcard_mentions_notify',
'is_muted',
).order_by('user_profile_id')
message_to_user_ids = [
row['user_profile_id']
for row in subscription_rows
]
def should_send(setting: str, row: Dict[str, Any]) -> bool:
# This implements the structure that the UserProfile stream notification settings
# are defaults, which can be overridden by the stream-level settings (if those
# values are not null).
if row['is_muted']:
return False
if row['user_profile_id'] in user_ids_muting_topic:
return False
if row[setting] is not None:
return row[setting]
return row['user_profile_' + setting]
stream_push_user_ids = {
row['user_profile_id']
for row in subscription_rows
# Note: muting a stream overrides stream_push_notify
if should_send('push_notifications', row)
}
stream_email_user_ids = {
row['user_profile_id']
for row in subscription_rows
# Note: muting a stream overrides stream_email_notify
if should_send('email_notifications', row)
}
if possible_wildcard_mention:
# If there's a possible wildcard mention, we need to
# determine which users would receive a wildcard mention
# notification for this message should the message indeed
# contain a wildcard mention.
#
# We don't have separate values for push/email
# notifications here; at this stage, we're just
# determining whether this wildcard mention should be
# treated as a mention (and follow the user's mention
# notification preferences) or a normal message.
wildcard_mention_user_ids = {
row['user_profile_id']
for row in subscription_rows
if should_send("wildcard_mentions_notify", row)
}
elif recipient.type == Recipient.HUDDLE:
message_to_user_ids = get_huddle_user_ids(recipient)
else:
raise ValueError('Bad recipient type')
message_to_user_id_set = set(message_to_user_ids)
user_ids = set(message_to_user_id_set)
# Important note: Because we haven't rendered markdown yet, we
# don't yet know which of these possibly-mentioned users was
# actually mentioned in the message (in other words, the
# mention syntax might have been in a code block or otherwise
# escaped). `get_ids_for` will filter these extra user rows
# for our data structures not related to bots
user_ids |= possibly_mentioned_user_ids
if user_ids:
query = UserProfile.objects.filter(
is_active=True,
).values(
'id',
'enable_online_push_notifications',
'is_bot',
'bot_type',
'long_term_idle',
)
# query_for_ids is fast highly optimized for large queries, and we
# need this codepath to be fast (it's part of sending messages)
query = query_for_ids(
query=query,
user_ids=sorted(list(user_ids)),
field='id',
)
rows = list(query)
else:
# TODO: We should always have at least one user_id as a recipient
# of any message we send. Right now the exception to this
# rule is `notify_new_user`, which, at least in a possibly
# contrived test scenario, can attempt to send messages
# to an inactive bot. When we plug that hole, we can avoid
# this `else` clause and just `assert(user_ids)`.
#
# UPDATE: It's February 2020 (and a couple years after the above
# comment was written). We have simplified notify_new_user
# so that it should be a little easier to reason about.
# There is currently some cleanup to how we handle cross
# realm bots that is still under development. Once that
# effort is complete, we should be able to address this
# to-do.
rows = []
def get_ids_for(f: Callable[[Dict[str, Any]], bool]) -> Set[int]:
"""Only includes users on the explicit message to line"""
return {
row['id']
for row in rows
if f(row)
} & message_to_user_id_set
def is_service_bot(row: Dict[str, Any]) -> bool:
return row['is_bot'] and (row['bot_type'] in UserProfile.SERVICE_BOT_TYPES)
active_user_ids = get_ids_for(lambda r: True)
push_notify_user_ids = get_ids_for(
lambda r: r['enable_online_push_notifications'],
)
# Service bots don't get UserMessage rows.
um_eligible_user_ids = get_ids_for(
lambda r: not is_service_bot(r),
)
long_term_idle_user_ids = get_ids_for(
lambda r: r['long_term_idle'],
)
# These two bot data structures need to filter from the full set
# of users who either are receiving the message or might have been
# mentioned in it, and so can't use get_ids_for.
#
# Further in the do_send_messages code path, once
# `mentioned_user_ids` has been computed via markdown, we'll filter
# these data structures for just those users who are either a
# direct recipient or were mentioned; for now, we're just making
# sure we have the data we need for that without extra database
# queries.
default_bot_user_ids = {
row['id']
for row in rows
if row['is_bot'] and row['bot_type'] == UserProfile.DEFAULT_BOT
}
service_bot_tuples = [
(row['id'], row['bot_type'])
for row in rows
if is_service_bot(row)
]
info: RecipientInfoResult = dict(
active_user_ids=active_user_ids,
push_notify_user_ids=push_notify_user_ids,
stream_push_user_ids=stream_push_user_ids,
stream_email_user_ids=stream_email_user_ids,
wildcard_mention_user_ids=wildcard_mention_user_ids,
um_eligible_user_ids=um_eligible_user_ids,
long_term_idle_user_ids=long_term_idle_user_ids,
default_bot_user_ids=default_bot_user_ids,
service_bot_tuples=service_bot_tuples,
)
return info
def get_service_bot_events(sender: UserProfile, service_bot_tuples: List[Tuple[int, int]],
mentioned_user_ids: Set[int], active_user_ids: Set[int],
recipient_type: int) -> Dict[str, List[Dict[str, Any]]]:
event_dict: Dict[str, List[Dict[str, Any]]] = defaultdict(list)
# Avoid infinite loops by preventing messages sent by bots from generating
# Service events.
if sender.is_bot:
return event_dict
def maybe_add_event(user_profile_id: int, bot_type: int) -> None:
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
queue_name = 'outgoing_webhooks'
elif bot_type == UserProfile.EMBEDDED_BOT:
queue_name = 'embedded_bots'
else:
logging.error(
'Unexpected bot_type for Service bot id=%s: %s',
user_profile_id, bot_type,
)
return
is_stream = (recipient_type == Recipient.STREAM)
# Important note: service_bot_tuples may contain service bots
# who were not actually mentioned in the message (e.g. if
# mention syntax for that bot appeared in a code block).
# Thus, it is important to filter any users who aren't part of
# either mentioned_user_ids (the actual mentioned users) or
# active_user_ids (the actual recipients).
#
# So even though this is implied by the logic below, we filter
# these not-actually-mentioned users here, to help keep this
# function future-proof.
if user_profile_id not in mentioned_user_ids and user_profile_id not in active_user_ids:
return
# Mention triggers, for stream messages
if is_stream and user_profile_id in mentioned_user_ids:
trigger = 'mention'
# PM triggers for personal and huddle messages
elif (not is_stream) and (user_profile_id in active_user_ids):
trigger = 'private_message'
else:
return
event_dict[queue_name].append({
'trigger': trigger,
'user_profile_id': user_profile_id,
})
for user_profile_id, bot_type in service_bot_tuples:
maybe_add_event(
user_profile_id=user_profile_id,
bot_type=bot_type,
)
return event_dict
def do_schedule_messages(messages: Sequence[Mapping[str, Any]]) -> List[int]:
scheduled_messages: List[ScheduledMessage] = []
for message in messages:
scheduled_message = ScheduledMessage()
scheduled_message.sender = message['message'].sender
scheduled_message.recipient = message['message'].recipient
topic_name = message['message'].topic_name()
scheduled_message.set_topic_name(topic_name=topic_name)
scheduled_message.content = message['message'].content
scheduled_message.sending_client = message['message'].sending_client
scheduled_message.stream = message['stream']
scheduled_message.realm = message['realm']
scheduled_message.scheduled_timestamp = message['deliver_at']
if message['delivery_type'] == 'send_later':
scheduled_message.delivery_type = ScheduledMessage.SEND_LATER
elif message['delivery_type'] == 'remind':
scheduled_message.delivery_type = ScheduledMessage.REMIND
scheduled_messages.append(scheduled_message)
ScheduledMessage.objects.bulk_create(scheduled_messages)
return [scheduled_message.id for scheduled_message in scheduled_messages]
def do_send_messages(messages_maybe_none: Sequence[Optional[MutableMapping[str, Any]]],
email_gateway: bool=False,
mark_as_read: Sequence[int]=[]) -> List[int]:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
# Filter out messages which didn't pass internal_prep_message properly
messages = [message for message in messages_maybe_none if message is not None]
# Filter out zephyr mirror anomalies where the message was already sent
already_sent_ids: List[int] = []
new_messages: List[MutableMapping[str, Any]] = []
for message in messages:
if isinstance(message['message'], int):
already_sent_ids.append(message['message'])
else:
new_messages.append(message)
messages = new_messages
links_for_embed: Set[str] = set()
# For consistency, changes to the default values for these gets should also be applied
# to the default args in do_send_message
for message in messages:
message['rendered_content'] = message.get('rendered_content', None)
message['stream'] = message.get('stream', None)
message['local_id'] = message.get('local_id', None)
message['sender_queue_id'] = message.get('sender_queue_id', None)
message['realm'] = message.get('realm', message['message'].sender.realm)
mention_data = MentionData(
realm_id=message['realm'].id,
content=message['message'].content,
)
message['mention_data'] = mention_data
if message['message'].is_stream_message():
stream_id = message['message'].recipient.type_id
stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget(
stream_id=stream_id,
topic_name=message['message'].topic_name(),
)
else:
stream_topic = None
info = get_recipient_info(
recipient=message['message'].recipient,
sender_id=message['message'].sender_id,
stream_topic=stream_topic,
possibly_mentioned_user_ids=mention_data.get_user_ids(),
possible_wildcard_mention=mention_data.message_has_wildcards(),
)
message['active_user_ids'] = info['active_user_ids']
message['push_notify_user_ids'] = info['push_notify_user_ids']
message['stream_push_user_ids'] = info['stream_push_user_ids']
message['stream_email_user_ids'] = info['stream_email_user_ids']
message['um_eligible_user_ids'] = info['um_eligible_user_ids']
message['long_term_idle_user_ids'] = info['long_term_idle_user_ids']
message['default_bot_user_ids'] = info['default_bot_user_ids']
message['service_bot_tuples'] = info['service_bot_tuples']
# Render our messages.
assert message['message'].rendered_content is None
rendered_content = render_incoming_message(
message['message'],
message['message'].content,
message['active_user_ids'],
message['realm'],
mention_data=message['mention_data'],
email_gateway=email_gateway,
)
message['message'].rendered_content = rendered_content
message['message'].rendered_content_version = markdown_version
links_for_embed |= message['message'].links_for_preview
# Add members of the mentioned user groups into `mentions_user_ids`.
for group_id in message['message'].mentions_user_group_ids:
members = message['mention_data'].get_group_members(group_id)
message['message'].mentions_user_ids.update(members)
# Only send data to Tornado about wildcard mentions if message
# rendering determined the message had an actual wildcard
# mention in it (and not e.g. wildcard mention syntax inside a
# code block).
if message['message'].mentions_wildcard:
message['wildcard_mention_user_ids'] = info['wildcard_mention_user_ids']
else:
message['wildcard_mention_user_ids'] = []
'''
Once we have the actual list of mentioned ids from message
rendering, we can patch in "default bots" (aka normal bots)
who were directly mentioned in this message as eligible to
get UserMessage rows.
'''
mentioned_user_ids = message['message'].mentions_user_ids
default_bot_user_ids = message['default_bot_user_ids']
mentioned_bot_user_ids = default_bot_user_ids & mentioned_user_ids
message['um_eligible_user_ids'] |= mentioned_bot_user_ids
# Save the message receipts in the database
user_message_flags: Dict[int, Dict[int, List[str]]] = defaultdict(dict)
with transaction.atomic():
Message.objects.bulk_create([message['message'] for message in messages])
# Claim attachments in message
for message in messages:
if do_claim_attachments(message['message'],
message['message'].potential_attachment_path_ids):
message['message'].has_attachment = True
message['message'].save(update_fields=['has_attachment'])
ums: List[UserMessageLite] = []
for message in messages:
# Service bots (outgoing webhook bots and embedded bots) don't store UserMessage rows;
# they will be processed later.
mentioned_user_ids = message['message'].mentions_user_ids
user_messages = create_user_messages(
message=message['message'],
um_eligible_user_ids=message['um_eligible_user_ids'],
long_term_idle_user_ids=message['long_term_idle_user_ids'],
stream_push_user_ids = message['stream_push_user_ids'],
stream_email_user_ids = message['stream_email_user_ids'],
mentioned_user_ids=mentioned_user_ids,
mark_as_read=mark_as_read,
)
for um in user_messages:
user_message_flags[message['message'].id][um.user_profile_id] = um.flags_list()
ums.extend(user_messages)
message['message'].service_queue_events = get_service_bot_events(
sender=message['message'].sender,
service_bot_tuples=message['service_bot_tuples'],
mentioned_user_ids=mentioned_user_ids,
active_user_ids=message['active_user_ids'],
recipient_type=message['message'].recipient.type,
)
bulk_insert_ums(ums)
for message in messages:
do_widget_post_save_actions(message)
for message in messages:
realm_id: Optional[int] = None
if message['message'].is_stream_message():
if message['stream'] is None:
stream_id = message['message'].recipient.type_id
message['stream'] = Stream.objects.select_related().get(id=stream_id)
assert message['stream'] is not None # assert needed because stubs for django are missing
realm_id = message['stream'].realm_id
# Deliver events to the real-time push system, as well as
# enqueuing any additional processing triggered by the message.
wide_message_dict = MessageDict.wide_dict(message['message'], realm_id)
user_flags = user_message_flags.get(message['message'].id, {})
sender = message['message'].sender
message_type = wide_message_dict['type']
presence_idle_user_ids = get_active_presence_idle_user_ids(
realm=sender.realm,
sender_id=sender.id,
message_type=message_type,
active_user_ids=message['active_user_ids'],
user_flags=user_flags,
)
event = dict(
type='message',
message=message['message'].id,
message_dict=wide_message_dict,
presence_idle_user_ids=presence_idle_user_ids,
)
'''
TODO: We may want to limit user_ids to only those users who have
UserMessage rows, if only for minor performance reasons.
For now we queue events for all subscribers/sendees of the
message, since downstream code may still do notifications
that don't require UserMessage rows.
Our automated tests have gotten better on this codepath,
but we may have coverage gaps, so we should be careful
about changing the next line.
'''
user_ids = message['active_user_ids'] | set(user_flags.keys())
users = [
dict(
id=user_id,
flags=user_flags.get(user_id, []),
always_push_notify=(user_id in message['push_notify_user_ids']),
stream_push_notify=(user_id in message['stream_push_user_ids']),
stream_email_notify=(user_id in message['stream_email_user_ids']),
wildcard_mention_notify=(user_id in message['wildcard_mention_user_ids']),
)
for user_id in user_ids
]
if message['message'].is_stream_message():
# Note: This is where authorization for single-stream
# get_updates happens! We only attach stream data to the
# notify new_message request if it's a public stream,
# ensuring that in the tornado server, non-public stream
# messages are only associated to their subscribed users.
assert message['stream'] is not None # assert needed because stubs for django are missing
if message['stream'].is_public():
event['realm_id'] = message['stream'].realm_id
event['stream_name'] = message['stream'].name
if message['stream'].invite_only:
event['invite_only'] = True
if message['stream'].first_message_id is None:
message['stream'].first_message_id = message['message'].id
message['stream'].save(update_fields=["first_message_id"])
if message['local_id'] is not None:
event['local_id'] = message['local_id']
if message['sender_queue_id'] is not None:
event['sender_queue_id'] = message['sender_queue_id']
send_event(message['realm'], event, users)
if links_for_embed:
event_data = {
'message_id': message['message'].id,
'message_content': message['message'].content,
'message_realm_id': message['realm'].id,
'urls': links_for_embed}
queue_json_publish('embed_links', event_data)
if message['message'].recipient.type == Recipient.PERSONAL:
welcome_bot_id = get_system_bot(settings.WELCOME_BOT).id
if (welcome_bot_id in message['active_user_ids'] and
welcome_bot_id != message['message'].sender_id):
send_welcome_bot_response(message)
for queue_name, events in message['message'].service_queue_events.items():
for event in events:
queue_json_publish(
queue_name,
{
"message": wide_message_dict,
"trigger": event['trigger'],
"user_profile_id": event["user_profile_id"],
},
)
# Note that this does not preserve the order of message ids
# returned. In practice, this shouldn't matter, as we only
# mirror single zephyr messages at a time and don't otherwise
# intermingle sending zephyr messages with other messages.
return already_sent_ids + [message['message'].id for message in messages]
class UserMessageLite:
'''
The Django ORM is too slow for bulk operations. This class
is optimized for the simple use case of inserting a bunch of
rows into zerver_usermessage.
'''
def __init__(self, user_profile_id: int, message_id: int, flags: int) -> None:
self.user_profile_id = user_profile_id
self.message_id = message_id
self.flags = flags
def flags_list(self) -> List[str]:
return UserMessage.flags_list_for_flags(self.flags)
def create_user_messages(message: Message,
um_eligible_user_ids: AbstractSet[int],
long_term_idle_user_ids: AbstractSet[int],
stream_push_user_ids: AbstractSet[int],
stream_email_user_ids: AbstractSet[int],
mentioned_user_ids: AbstractSet[int],
mark_as_read: Sequence[int] = []) -> List[UserMessageLite]:
ums_to_create = []
for user_profile_id in um_eligible_user_ids:
um = UserMessageLite(
user_profile_id=user_profile_id,
message_id=message.id,
flags=0,
)
ums_to_create.append(um)
# These properties on the Message are set via
# render_markdown by code in the markdown inline patterns
wildcard = message.mentions_wildcard
ids_with_alert_words = message.user_ids_with_alert_words
for um in ums_to_create:
if (um.user_profile_id == message.sender.id and
message.sent_by_human()) or \
um.user_profile_id in mark_as_read:
um.flags |= UserMessage.flags.read
if wildcard:
um.flags |= UserMessage.flags.wildcard_mentioned
if um.user_profile_id in mentioned_user_ids:
um.flags |= UserMessage.flags.mentioned
if um.user_profile_id in ids_with_alert_words:
um.flags |= UserMessage.flags.has_alert_word
if message.recipient.type in [Recipient.HUDDLE, Recipient.PERSONAL]:
um.flags |= UserMessage.flags.is_private
# For long_term_idle (aka soft-deactivated) users, we are allowed
# to optimize by lazily not creating UserMessage rows that would
# have the default 0 flag set (since the soft-reactivation logic
# knows how to create those when the user comes back). We need to
# create the UserMessage rows for these long_term_idle users
# non-lazily in a few cases:
#
# * There are nonzero flags (e.g. the user was mentioned), since
# that case is rare and this saves a lot of complexity in
# soft-reactivation.
#
# * If the user is going to be notified (e.g. they get push/email
# notifications for every message on a stream), since in that
# case the notifications code will call `access_message` on the
# message to re-verify permissions, and for private streams,
# will get an error if the UserMessage row doesn't exist yet.
#
# See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html#soft-deactivation
# for details on this system.
user_messages = []
for um in ums_to_create:
if (um.user_profile_id in long_term_idle_user_ids and
um.user_profile_id not in stream_push_user_ids and
um.user_profile_id not in stream_email_user_ids and
message.is_stream_message() and
int(um.flags) == 0):
continue
user_messages.append(um)
return user_messages
def bulk_insert_ums(ums: List[UserMessageLite]) -> None:
'''
Doing bulk inserts this way is much faster than using Django,
since we don't have any ORM overhead. Profiling with 1000
users shows a speedup of 0.436 -> 0.027 seconds, so we're
talking about a 15x speedup.
'''
if not ums:
return
vals = [
(um.user_profile_id, um.message_id, um.flags)
for um in ums
]
query = SQL('''
INSERT into
zerver_usermessage (user_profile_id, message_id, flags)
VALUES %s
''')
with connection.cursor() as cursor:
execute_values(cursor.cursor, query, vals)
def do_add_submessage(realm: Realm,
sender_id: int,
message_id: int,
msg_type: str,
content: str,
) -> None:
submessage = SubMessage(
sender_id=sender_id,
message_id=message_id,
msg_type=msg_type,
content=content,
)
submessage.save()
event = dict(
type="submessage",
msg_type=msg_type,
message_id=message_id,
submessage_id=submessage.id,
sender_id=sender_id,
content=content,
)
ums = UserMessage.objects.filter(message_id=message_id)
target_user_ids = [um.user_profile_id for um in ums]
send_event(realm, event, target_user_ids)
def notify_reaction_update(user_profile: UserProfile, message: Message,
reaction: Reaction, op: str) -> None:
user_dict = {'user_id': user_profile.id,
'email': user_profile.email,
'full_name': user_profile.full_name}
event: Dict[str, Any] = {
'type': 'reaction',
'op': op,
'user_id': user_profile.id,
# TODO: We plan to remove this redundant user_dict object once
# clients are updated to support accessing use user_id. See
# https://github.com/zulip/zulip/pull/14711 for details.
'user': user_dict,
'message_id': message.id,
'emoji_name': reaction.emoji_name,
'emoji_code': reaction.emoji_code,
'reaction_type': reaction.reaction_type,
}
# Update the cached message since new reaction is added.
update_to_dict_cache([message])
# Recipients for message update events, including reactions, are
# everyone who got the original message. This means reactions
# won't live-update in preview narrows, but it's the right
# performance tradeoff, since otherwise we'd need to send all
# reactions to public stream messages to every browser for every
# client in the organization, which doesn't scale.
#
# However, to ensure that reactions do live-update for any user
# who has actually participated in reacting to a message, we add a
# "historical" UserMessage row for any user who reacts to message,
# subscribing them to future notifications.
ums = UserMessage.objects.filter(message=message.id)
send_event(user_profile.realm, event, [um.user_profile_id for um in ums])
def do_add_reaction(user_profile: UserProfile, message: Message,
emoji_name: str, emoji_code: str, reaction_type: str) -> None:
reaction = Reaction(user_profile=user_profile, message=message,
emoji_name=emoji_name, emoji_code=emoji_code,
reaction_type=reaction_type)
try:
reaction.save()
except django.db.utils.IntegrityError: # nocoverage
# This can happen when a race results in the check in views
# code not catching an attempt to double-add a reaction, or
# perhaps if the emoji_name/emoji_code mapping is busted.
raise JsonableError(_("Reaction already exists."))
notify_reaction_update(user_profile, message, reaction, "add")
def do_remove_reaction(user_profile: UserProfile, message: Message,
emoji_code: str, reaction_type: str) -> None:
reaction = Reaction.objects.filter(user_profile=user_profile,
message=message,
emoji_code=emoji_code,
reaction_type=reaction_type).get()
reaction.delete()
notify_reaction_update(user_profile, message, reaction, "remove")
def do_send_typing_notification(
realm: Realm,
sender: UserProfile,
recipient_user_profiles: List[UserProfile],
operator: str) -> None:
sender_dict = {'user_id': sender.id, 'email': sender.email}
# Include a list of recipients in the event body to help identify where the typing is happening
recipient_dicts = [{'user_id': profile.id, 'email': profile.email}
for profile in recipient_user_profiles]
event = dict(
type='typing',
op=operator,
sender=sender_dict,
recipients=recipient_dicts,
)
# Only deliver the notification to active user recipients
user_ids_to_notify = [
user.id
for user in recipient_user_profiles
if user.is_active
]
send_event(realm, event, user_ids_to_notify)
# check_send_typing_notification:
# Checks the typing notification and sends it
def check_send_typing_notification(sender: UserProfile,
user_ids: List[int],
operator: str) -> None:
realm = sender.realm
if len(user_ids) == 0:
raise JsonableError(_('Missing parameter: \'to\' (recipient)'))
elif operator not in ('start', 'stop'):
raise JsonableError(_('Invalid \'op\' value (should be start or stop)'))
''' The next chunk of code will go away when we upgrade old mobile
users away from versions of mobile that send emails. For the
small number of very outdated mobile clients, we do double work
here in terms of fetching users, but this structure reduces lots
of other unnecessary duplicated code and will make it convenient
to mostly delete code when we desupport old versions of the
app.'''
if sender.id not in user_ids:
user_ids.append(sender.id)
# If any of the user_ids being sent in are invalid, we will
# just reject the whole request, since a partial list of user_ids
# can create confusion related to huddles. Plus it's a good
# sign that a client is confused (or possibly even malicious) if
# we get bad user_ids.
user_profiles = []
for user_id in user_ids:
try:
# We include cross-bot realms as possible recipients,
# so that clients can know which huddle conversation
# is relevant here.
user_profile = get_user_by_id_in_realm_including_cross_realm(
user_id, sender.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("Invalid user ID {}").format(user_id))
user_profiles.append(user_profile)
do_send_typing_notification(
realm=realm,
sender=sender,
recipient_user_profiles=user_profiles,
operator=operator,
)
def ensure_stream(realm: Realm,
stream_name: str,
invite_only: bool=False,
stream_description: str="",
acting_user: Optional[UserProfile]=None) -> Stream:
return create_stream_if_needed(realm, stream_name,
invite_only=invite_only,
stream_description=stream_description,
acting_user=acting_user)[0]
def get_recipient_from_user_profiles(recipient_profiles: Sequence[UserProfile],
forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile) -> Recipient:
# Avoid mutating the passed in list of recipient_profiles.
recipient_profiles_map = {}
for user_profile in recipient_profiles:
recipient_profiles_map[user_profile.id] = user_profile
if forwarded_mirror_message:
# In our mirroring integrations with some third-party
# protocols, bots subscribed to the third-party protocol
# forward to Zulip messages that they received in the
# third-party service. The permissions model for that
# forwarding is that users can only submit to Zulip private
# messages they personally received, and here we do the check
# for whether forwarder_user_profile is among the private
# message recipients of the message.
assert forwarder_user_profile is not None
if forwarder_user_profile.id not in recipient_profiles_map:
raise ValidationError(_("User not authorized for this query"))
# If the private message is just between the sender and
# another person, force it to be a personal internally
if (len(recipient_profiles_map) == 2 and sender.id in recipient_profiles_map):
del recipient_profiles_map[sender.id]
assert len(recipient_profiles_map) != 0
if len(recipient_profiles_map) == 1:
user_profile = list(recipient_profiles_map.values())[0]
return user_profile.recipient
# Otherwise, we need a huddle. Make sure the sender is included in huddle messages
recipient_profiles_map[sender.id] = sender
user_ids: Set[int] = {user_id for user_id in recipient_profiles_map}
return get_huddle_recipient(user_ids)
def validate_recipient_user_profiles(user_profiles: Sequence[UserProfile],
sender: UserProfile,
allow_deactivated: bool=False) -> Sequence[UserProfile]:
recipient_profiles_map: Dict[int, UserProfile] = {}
# We exempt cross-realm bots from the check that all the recipients
# are in the same realm.
realms = set()
if not is_cross_realm_bot_email(sender.email):
realms.add(sender.realm_id)
for user_profile in user_profiles:
if (not user_profile.is_active and not user_profile.is_mirror_dummy and
not allow_deactivated) or user_profile.realm.deactivated:
raise ValidationError(_("'{email}' is no longer using Zulip.").format(email=user_profile.email))
recipient_profiles_map[user_profile.id] = user_profile
if not is_cross_realm_bot_email(user_profile.email):
realms.add(user_profile.realm_id)
if len(realms) > 1:
raise ValidationError(_("You can't send private messages outside of your organization."))
return list(recipient_profiles_map.values())
def recipient_for_user_profiles(user_profiles: Sequence[UserProfile], forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile, allow_deactivated: bool=False) -> Recipient:
recipient_profiles = validate_recipient_user_profiles(user_profiles, sender,
allow_deactivated=allow_deactivated)
return get_recipient_from_user_profiles(recipient_profiles, forwarded_mirror_message,
forwarder_user_profile, sender)
def already_sent_mirrored_message_id(message: Message) -> Optional[int]:
if message.recipient.type == Recipient.HUDDLE:
# For huddle messages, we use a 10-second window because the
# timestamps aren't guaranteed to actually match between two
# copies of the same message.
time_window = datetime.timedelta(seconds=10)
else:
time_window = datetime.timedelta(seconds=0)
query = Message.objects.filter(
sender=message.sender,
recipient=message.recipient,
content=message.content,
sending_client=message.sending_client,
date_sent__gte=message.date_sent - time_window,
date_sent__lte=message.date_sent + time_window)
messages = filter_by_exact_message_topic(
query=query,
message=message,
)
if messages.exists():
return messages[0].id
return None
def extract_stream_indicator(s: str) -> Union[str, int]:
# Users can pass stream name as either an id or a name,
# and if they choose to pass a name, they may JSON encode
# it for legacy reasons.
try:
data = ujson.loads(s)
except (ValueError, TypeError):
# If there was no JSON encoding, then we just
# have a raw stream name.
return s
# We should stop supporting this odd use case
# once we improve our documentation.
if isinstance(data, list):
if len(data) != 1: # nocoverage
raise JsonableError(_("Expected exactly one stream"))
data = data[0]
if isinstance(data, str):
# We had a JSON-encoded stream name.
return data
if isinstance(data, int):
# We had a stream id.
return data
raise JsonableError(_("Invalid data type for stream"))
def extract_private_recipients(s: str) -> Union[List[str], List[int]]:
# We try to accept multiple incoming formats for recipients.
# See test_extract_recipients() for examples of what we allow.
try:
data = ujson.loads(s)
except (ValueError, TypeError):
data = s
if isinstance(data, str):
data = data.split(',')
if not isinstance(data, list):
raise JsonableError(_("Invalid data type for recipients"))
if not data:
# We don't complain about empty message recipients here
return data
if isinstance(data[0], str):
return get_validated_emails(data)
if not isinstance(data[0], int):
raise JsonableError(_("Invalid data type for recipients"))
return get_validated_user_ids(data)
def get_validated_user_ids(user_ids: Iterable[int]) -> List[int]:
for user_id in user_ids:
if not isinstance(user_id, int):
raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both."))
return list(set(user_ids))
def get_validated_emails(emails: Iterable[str]) -> List[str]:
for email in emails:
if not isinstance(email, str):
raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both."))
return list(filter(bool, {email.strip() for email in emails}))
def check_send_stream_message(sender: UserProfile, client: Client, stream_name: str,
topic: str, body: str, realm: Optional[Realm]=None) -> int:
addressee = Addressee.for_stream_name(stream_name, topic)
message = check_message(sender, client, addressee, body, realm)
return do_send_messages([message])[0]
def check_send_private_message(sender: UserProfile, client: Client,
receiving_user: UserProfile, body: str) -> int:
addressee = Addressee.for_user_profile(receiving_user)
message = check_message(sender, client, addressee, body)
return do_send_messages([message])[0]
# check_send_message:
# Returns the id of the sent message. Has same argspec as check_message.
def check_send_message(sender: UserProfile, client: Client, message_type_name: str,
message_to: Union[Sequence[int], Sequence[str]],
topic_name: Optional[str],
message_content: str, realm: Optional[Realm]=None,
forged: bool=False, forged_timestamp: Optional[float]=None,
forwarder_user_profile: Optional[UserProfile]=None,
local_id: Optional[str]=None,
sender_queue_id: Optional[str]=None,
widget_content: Optional[str]=None) -> int:
addressee = Addressee.legacy_build(
sender,
message_type_name,
message_to,
topic_name)
message = check_message(sender, client, addressee,
message_content, realm, forged, forged_timestamp,
forwarder_user_profile, local_id, sender_queue_id,
widget_content)
return do_send_messages([message])[0]
def check_schedule_message(sender: UserProfile, client: Client,
message_type_name: str,
message_to: Union[Sequence[str], Sequence[int]],
topic_name: Optional[str], message_content: str,
delivery_type: str, deliver_at: datetime.datetime,
realm: Optional[Realm]=None,
forwarder_user_profile: Optional[UserProfile]=None,
) -> int:
addressee = Addressee.legacy_build(
sender,
message_type_name,
message_to,
topic_name)
message = check_message(sender, client, addressee,
message_content, realm=realm,
forwarder_user_profile=forwarder_user_profile)
message['deliver_at'] = deliver_at
message['delivery_type'] = delivery_type
recipient = message['message'].recipient
if (delivery_type == 'remind' and (recipient.type != Recipient.STREAM and
recipient.type_id != sender.id)):
raise JsonableError(_("Reminders can only be set for streams."))
return do_schedule_messages([message])[0]
def check_default_stream_group_name(group_name: str) -> None:
if group_name.strip() == "":
raise JsonableError(_("Invalid default stream group name '{}'").format(group_name))
if len(group_name) > DefaultStreamGroup.MAX_NAME_LENGTH:
raise JsonableError(_("Default stream group name too long (limit: {} characters)").format(
DefaultStreamGroup.MAX_NAME_LENGTH,
))
for i in group_name:
if ord(i) == 0:
raise JsonableError(_("Default stream group name '{}' contains NULL (0x00) characters.").format(
group_name,
))
def send_rate_limited_pm_notification_to_bot_owner(sender: UserProfile,
realm: Realm,
content: str) -> None:
"""
Sends a PM error notification to a bot's owner if one hasn't already
been sent in the last 5 minutes.
"""
if sender.realm.is_zephyr_mirror_realm or sender.realm.deactivated:
return
if not sender.is_bot or sender.bot_owner is None:
return
# Don't send these notifications for cross-realm bot messages
# (e.g. from EMAIL_GATEWAY_BOT) since the owner for
# EMAIL_GATEWAY_BOT is probably the server administrator, not
# the owner of the bot who could potentially fix the problem.
if sender.realm != realm:
return
# We warn the user once every 5 minutes to avoid a flood of
# PMs on a misconfigured integration, re-using the
# UserProfile.last_reminder field, which is not used for bots.
last_reminder = sender.last_reminder
waitperiod = datetime.timedelta(minutes=UserProfile.BOT_OWNER_STREAM_ALERT_WAITPERIOD)
if last_reminder and timezone_now() - last_reminder <= waitperiod:
return
internal_send_private_message(realm, get_system_bot(settings.NOTIFICATION_BOT),
sender.bot_owner, content)
sender.last_reminder = timezone_now()
sender.save(update_fields=['last_reminder'])
def send_pm_if_empty_stream(stream: Optional[Stream],
realm: Realm,
sender: UserProfile,
stream_name: Optional[str]=None,
stream_id: Optional[int]=None) -> None:
"""If a bot sends a message to a stream that doesn't exist or has no
subscribers, sends a notification to the bot owner (if not a
cross-realm bot) so that the owner can correct the issue."""
if not sender.is_bot or sender.bot_owner is None:
return
arg_dict = {
"bot_identity": f"`{sender.delivery_email}`",
"stream_id": stream_id,
"stream_name": f"#**{stream_name}**",
"new_stream_link": "#streams/new",
}
if sender.bot_owner is not None:
with override_language(sender.bot_owner.default_language):
if stream is None:
if stream_id is not None:
content = _("Your bot {bot_identity} tried to send a message to stream ID "
"{stream_id}, but there is no stream with that ID.").format(**arg_dict)
else:
assert(stream_name is not None)
content = _("Your bot {bot_identity} tried to send a message to stream "
"{stream_name}, but that stream does not exist. "
"Click [here]({new_stream_link}) to create it.").format(**arg_dict)
else:
if num_subscribers_for_stream_id(stream.id) > 0:
return
content = _("Your bot {bot_identity} tried to send a message to "
"stream {stream_name}. The stream exists but "
"does not have any subscribers.").format(**arg_dict)
send_rate_limited_pm_notification_to_bot_owner(sender, realm, content)
def validate_stream_name_with_pm_notification(stream_name: str, realm: Realm,
sender: UserProfile) -> Stream:
stream_name = stream_name.strip()
check_stream_name(stream_name)
try:
stream = get_stream(stream_name, realm)
send_pm_if_empty_stream(stream, realm, sender)
except Stream.DoesNotExist:
send_pm_if_empty_stream(None, realm, sender, stream_name=stream_name)
raise StreamDoesNotExistError(escape(stream_name))
return stream
def validate_stream_id_with_pm_notification(stream_id: int, realm: Realm,
sender: UserProfile) -> Stream:
try:
stream = get_stream_by_id_in_realm(stream_id, realm)
send_pm_if_empty_stream(stream, realm, sender)
except Stream.DoesNotExist:
send_pm_if_empty_stream(None, realm, sender, stream_id=stream_id)
raise StreamWithIDDoesNotExistError(stream_id)
return stream
def check_private_message_policy(realm: Realm, sender: UserProfile,
user_profiles: Sequence[UserProfile]) -> None:
if realm.private_message_policy == Realm.PRIVATE_MESSAGE_POLICY_DISABLED:
if sender.is_bot or (len(user_profiles) == 1 and user_profiles[0].is_bot):
# We allow PMs only between users and bots, to avoid
# breaking the tutorial as well as automated
# notifications from system bots to users.
return
raise JsonableError(_("Private messages are disabled in this organization."))
# check_message:
# Returns message ready for sending with do_send_message on success or the error message (string) on error.
def check_message(sender: UserProfile, client: Client, addressee: Addressee,
message_content_raw: str, realm: Optional[Realm]=None, forged: bool=False,
forged_timestamp: Optional[float]=None,
forwarder_user_profile: Optional[UserProfile]=None,
local_id: Optional[str]=None,
sender_queue_id: Optional[str]=None,
widget_content: Optional[str]=None) -> Dict[str, Any]:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
stream = None
message_content = message_content_raw.rstrip()
if len(message_content) == 0:
raise JsonableError(_("Message must not be empty"))
if '\x00' in message_content:
raise JsonableError(_("Message must not contain null bytes"))
message_content = truncate_body(message_content)
if realm is None:
realm = sender.realm
if addressee.is_stream():
topic_name = addressee.topic()
topic_name = truncate_topic(topic_name)
stream_name = addressee.stream_name()
stream_id = addressee.stream_id()
if stream_name is not None:
stream = validate_stream_name_with_pm_notification(stream_name, realm, sender)
elif stream_id is not None:
stream = validate_stream_id_with_pm_notification(stream_id, realm, sender)
else:
stream = addressee.stream()
assert stream is not None
recipient = stream.recipient
# This will raise JsonableError if there are problems.
if sender.bot_type != sender.OUTGOING_WEBHOOK_BOT:
access_stream_for_send_message(
sender=sender,
stream=stream,
forwarder_user_profile=forwarder_user_profile)
elif addressee.is_private():
user_profiles = addressee.user_profiles()
mirror_message = client and client.name in ["zephyr_mirror", "irc_mirror",
"jabber_mirror", "JabberMirror"]
check_private_message_policy(realm, sender, user_profiles)
# API Super-users who set the `forged` flag are allowed to
# forge messages sent by any user, so we disable the
# `forwarded_mirror_message` security check in that case.
forwarded_mirror_message = mirror_message and not forged
try:
recipient = recipient_for_user_profiles(user_profiles,
forwarded_mirror_message,
forwarder_user_profile, sender)
except ValidationError as e:
assert isinstance(e.messages[0], str)
raise JsonableError(e.messages[0])
else:
# This is defensive code--Addressee already validates
# the message type.
raise AssertionError("Invalid message type")
message = Message()
message.sender = sender
message.content = message_content
message.recipient = recipient
if addressee.is_stream():
message.set_topic_name(topic_name)
if forged and forged_timestamp is not None:
# Forged messages come with a timestamp
message.date_sent = timestamp_to_datetime(forged_timestamp)
else:
message.date_sent = timezone_now()
message.sending_client = client
# We render messages later in the process.
assert message.rendered_content is None
if client.name == "zephyr_mirror":
id = already_sent_mirrored_message_id(message)
if id is not None:
return {'message': id}
if widget_content is not None:
try:
widget_content = ujson.loads(widget_content)
except Exception:
raise JsonableError(_('Widgets: API programmer sent invalid JSON content'))
try:
check_widget_content(widget_content)
except ValidationError as error:
raise JsonableError(_('Widgets: {error_msg}').format(
error_msg=error.message,
))
return {'message': message, 'stream': stream, 'local_id': local_id,
'sender_queue_id': sender_queue_id, 'realm': realm,
'widget_content': widget_content}
def _internal_prep_message(realm: Realm,
sender: UserProfile,
addressee: Addressee,
content: str) -> Optional[Dict[str, Any]]:
"""
Create a message object and checks it, but doesn't send it or save it to the database.
The internal function that calls this can therefore batch send a bunch of created
messages together as one database query.
Call do_send_messages with a list of the return values of this method.
"""
# Remove any null bytes from the content
if len(content) > MAX_MESSAGE_LENGTH:
content = content[0:3900] + "\n\n[message was too long and has been truncated]"
# If we have a stream name, and the stream doesn't exist, we
# create it here (though this code path should probably be removed
# eventually, moving that responsibility to the caller). If
# addressee.stream_name() is None (i.e. we're sending to a stream
# by ID), we skip this, as the stream object must already exist.
if addressee.is_stream():
stream_name = addressee.stream_name()
if stream_name is not None:
ensure_stream(realm, stream_name, acting_user=sender)
try:
return check_message(sender, get_client("Internal"), addressee,
content, realm=realm)
except JsonableError as e:
logging.exception("Error queueing internal message by %s: %s", sender.delivery_email, e.msg)
return None
def internal_prep_stream_message(
realm: Realm, sender: UserProfile,
stream: Stream, topic: str, content: str,
) -> Optional[Dict[str, Any]]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_stream(stream, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_prep_stream_message_by_name(
realm: Realm, sender: UserProfile,
stream_name: str, topic: str, content: str,
) -> Optional[Dict[str, Any]]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_stream_name(stream_name, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_prep_private_message(realm: Realm,
sender: UserProfile,
recipient_user: UserProfile,
content: str) -> Optional[Dict[str, Any]]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_user_profile(recipient_user)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_send_private_message(realm: Realm,
sender: UserProfile,
recipient_user: UserProfile,
content: str) -> Optional[int]:
message = internal_prep_private_message(realm, sender, recipient_user, content)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_stream_message(
realm: Realm,
sender: UserProfile,
stream: Stream,
topic: str,
content: str,
email_gateway: bool=False) -> Optional[int]:
message = internal_prep_stream_message(
realm, sender, stream,
topic, content,
)
if message is None:
return None
message_ids = do_send_messages([message], email_gateway=email_gateway)
return message_ids[0]
def internal_send_stream_message_by_name(
realm: Realm, sender: UserProfile,
stream_name: str, topic: str, content: str,
) -> Optional[int]:
message = internal_prep_stream_message_by_name(
realm, sender, stream_name,
topic, content,
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_huddle_message(realm: Realm, sender: UserProfile, emails: List[str],
content: str) -> Optional[int]:
addressee = Addressee.for_private(emails, realm)
message = _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def pick_color(user_profile: UserProfile, subs: Iterable[Subscription]) -> str:
# These colors are shared with the palette in subs.js.
used_colors = [sub.color for sub in subs if sub.active]
available_colors = [s for s in STREAM_ASSIGNMENT_COLORS if s not in used_colors]
if available_colors:
return available_colors[0]
else:
return STREAM_ASSIGNMENT_COLORS[len(used_colors) % len(STREAM_ASSIGNMENT_COLORS)]
def validate_user_access_to_subscribers(user_profile: Optional[UserProfile],
stream: Stream) -> None:
""" Validates whether the user can view the subscribers of a stream. Raises a JsonableError if:
* The user and the stream are in different realms
* The realm is MIT and the stream is not invite only.
* The stream is invite only, requesting_user is passed, and that user
does not subscribe to the stream.
"""
validate_user_access_to_subscribers_helper(
user_profile,
{"realm_id": stream.realm_id,
"invite_only": stream.invite_only},
# We use a lambda here so that we only compute whether the
# user is subscribed if we have to
lambda user_profile: subscribed_to_stream(user_profile, stream.id))
def validate_user_access_to_subscribers_helper(
user_profile: Optional[UserProfile],
stream_dict: Mapping[str, Any],
check_user_subscribed: Callable[[UserProfile], bool],
) -> None:
"""Helper for validate_user_access_to_subscribers that doesn't require
a full stream object. This function is a bit hard to read,
because it is carefully optimized for performance in the two code
paths we call it from:
* In `bulk_get_subscriber_user_ids`, we already know whether the
user was subscribed via `sub_dict`, and so we want to avoid a
database query at all (especially since it calls this in a loop);
* In `validate_user_access_to_subscribers`, we want to only check
if the user is subscribed when we absolutely have to, since it
costs a database query.
The `check_user_subscribed` argument is a function that reports
whether the user is subscribed to the stream.
Note also that we raise a ValidationError in cases where the
caller is doing the wrong thing (maybe these should be
AssertionErrors), and JsonableError for 400 type errors.
"""
if user_profile is None:
raise ValidationError("Missing user to validate access for")
if user_profile.realm_id != stream_dict["realm_id"]:
raise ValidationError("Requesting user not in given realm")
# Guest users can access subscribed public stream's subscribers
if user_profile.is_guest:
if check_user_subscribed(user_profile):
return
# We could put an AssertionError here; in that we don't have
# any code paths that would allow a guest user to access other
# streams in the first place.
if not user_profile.can_access_public_streams() and not stream_dict["invite_only"]:
raise JsonableError(_("Subscriber data is not available for this stream"))
# Organization administrators can view subscribers for all streams.
if user_profile.is_realm_admin:
return
if (stream_dict["invite_only"] and not check_user_subscribed(user_profile)):
raise JsonableError(_("Unable to retrieve subscribers for private stream"))
def bulk_get_subscriber_user_ids(stream_dicts: Iterable[Mapping[str, Any]],
user_profile: UserProfile,
sub_dict: Mapping[int, bool],
stream_recipient: StreamRecipientMap) -> Dict[int, List[int]]:
"""sub_dict maps stream_id => whether the user is subscribed to that stream."""
target_stream_dicts = []
for stream_dict in stream_dicts:
stream_recipient.populate_with(stream_id=stream_dict["id"],
recipient_id=stream_dict["recipient_id"])
try:
validate_user_access_to_subscribers_helper(
user_profile,
stream_dict,
lambda user_profile: sub_dict[stream_dict["id"]],
)
except JsonableError:
continue
target_stream_dicts.append(stream_dict)
stream_ids = [stream['id'] for stream in target_stream_dicts]
recipient_ids = sorted([
stream_recipient.recipient_id_for(stream_id)
for stream_id in stream_ids
])
result: Dict[int, List[int]] = {stream["id"]: [] for stream in stream_dicts}
if not recipient_ids:
return result
'''
The raw SQL below leads to more than a 2x speedup when tested with
20k+ total subscribers. (For large realms with lots of default
streams, this function deals with LOTS of data, so it is important
to optimize.)
'''
query = SQL('''
SELECT
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
FROM
zerver_subscription
INNER JOIN zerver_userprofile ON
zerver_userprofile.id = zerver_subscription.user_profile_id
WHERE
zerver_subscription.recipient_id in %(recipient_ids)s AND
zerver_subscription.active AND
zerver_userprofile.is_active
ORDER BY
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
''')
cursor = connection.cursor()
cursor.execute(query, {"recipient_ids": tuple(recipient_ids)})
rows = cursor.fetchall()
cursor.close()
recip_to_stream_id = stream_recipient.recipient_to_stream_id_dict()
'''
Using groupby/itemgetter here is important for performance, at scale.
It makes it so that all interpreter overhead is just O(N) in nature.
'''
for recip_id, recip_rows in itertools.groupby(rows, itemgetter(0)):
user_profile_ids = [r[1] for r in recip_rows]
stream_id = recip_to_stream_id[recip_id]
result[stream_id] = list(user_profile_ids)
return result
def get_subscribers_query(stream: Stream, requesting_user: Optional[UserProfile]) -> QuerySet:
# TODO: Make a generic stub for QuerySet
""" Build a query to get the subscribers list for a stream, raising a JsonableError if:
'realm' is optional in stream.
The caller can refine this query with select_related(), values(), etc. depending
on whether it wants objects or just certain fields
"""
validate_user_access_to_subscribers(requesting_user, stream)
# Note that non-active users may still have "active" subscriptions, because we
# want to be able to easily reactivate them with their old subscriptions. This
# is why the query here has to look at the UserProfile.is_active flag.
subscriptions = get_active_subscriptions_for_stream_id(stream.id).filter(
user_profile__is_active=True,
)
return subscriptions
def get_subscriber_emails(stream: Stream,
requesting_user: Optional[UserProfile]=None) -> List[str]:
subscriptions_query = get_subscribers_query(stream, requesting_user)
subscriptions = subscriptions_query.values('user_profile__email')
return [subscription['user_profile__email'] for subscription in subscriptions]
def notify_subscriptions_added(user_profile: UserProfile,
sub_pairs: Iterable[Tuple[Subscription, Stream]],
stream_user_ids: Callable[[Stream], List[int]],
recent_traffic: Dict[int, int],
no_log: bool=False) -> None:
if not no_log:
log_event({'type': 'subscription_added',
'user': user_profile.email,
'names': [stream.name for sub, stream in sub_pairs],
'realm': user_profile.realm.string_id})
sub_dicts = []
for (subscription, stream) in sub_pairs:
sub_dict = stream.to_dict()
for field_name in Subscription.API_FIELDS:
if field_name == "active":
# Skip the "active" field, it's implied by context
continue
sub_dict[field_name] = getattr(subscription, field_name)
sub_dict['in_home_view'] = not subscription.is_muted
sub_dict['email_address'] = encode_email_address(stream, show_sender=True)
sub_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic(
stream.id, stream.date_created, recent_traffic)
sub_dict['subscribers'] = stream_user_ids(stream)
sub_dicts.append(sub_dict)
# Send a notification to the user who subscribed.
event = dict(type="subscription", op="add",
subscriptions=sub_dicts)
send_event(user_profile.realm, event, [user_profile.id])
def get_peer_user_ids_for_stream_change(stream: Stream,
altered_user_ids: Iterable[int],
subscribed_user_ids: Iterable[int]) -> Set[int]:
'''
altered_user_ids is the user_ids that we are adding/removing
subscribed_user_ids is the already-subscribed user_ids
Based on stream policy, we notify the correct bystanders, while
not notifying altered_users (who get subscribers via another event)
'''
if stream.invite_only:
# PRIVATE STREAMS
# Realm admins can access all private stream subscribers. Send them an
# event even if they aren't subscribed to stream.
realm_admin_ids = [user.id for user in stream.realm.get_admin_users_and_bots()]
user_ids_to_notify = []
user_ids_to_notify.extend(realm_admin_ids)
user_ids_to_notify.extend(subscribed_user_ids)
return set(user_ids_to_notify) - set(altered_user_ids)
else:
# PUBLIC STREAMS
# We now do "peer_add" or "peer_remove" events even for streams
# users were never subscribed to, in order for the neversubscribed
# structure to stay up-to-date.
return set(active_non_guest_user_ids(stream.realm_id)) - set(altered_user_ids)
def get_user_ids_for_streams(streams: Iterable[Stream]) -> Dict[int, List[int]]:
stream_ids = [stream.id for stream in streams]
all_subs = get_active_subscriptions_for_stream_ids(stream_ids).filter(
user_profile__is_active=True,
).values(
'recipient__type_id',
'user_profile_id',
).order_by(
'recipient__type_id',
)
get_stream_id = itemgetter('recipient__type_id')
all_subscribers_by_stream: Dict[int, List[int]] = defaultdict(list)
for stream_id, rows in itertools.groupby(all_subs, get_stream_id):
user_ids = [row['user_profile_id'] for row in rows]
all_subscribers_by_stream[stream_id] = user_ids
return all_subscribers_by_stream
def get_last_message_id() -> int:
# We generally use this function to populate RealmAuditLog, and
# the max id here is actually systemwide, not per-realm. I
# assume there's some advantage in not filtering by realm.
last_id = Message.objects.aggregate(Max('id'))['id__max']
if last_id is None:
# During initial realm creation, there might be 0 messages in
# the database; in that case, the `aggregate` query returns
# None. Since we want an int for "beginning of time", use -1.
last_id = -1
return last_id
SubT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
def bulk_add_subscriptions(streams: Iterable[Stream],
users: Iterable[UserProfile],
color_map: Mapping[str, str]={},
from_stream_creation: bool=False,
acting_user: Optional[UserProfile]=None) -> SubT:
users = list(users)
recipients_map: Dict[int, int] = {stream.id: stream.recipient_id for stream in streams}
recipient_ids: List[int] = [recipient_id for recipient_id in recipients_map.values()]
stream_map: Dict[int, Stream] = {}
for stream in streams:
stream_map[recipients_map[stream.id]] = stream
subs_by_user: Dict[int, List[Subscription]] = defaultdict(list)
all_subs_query = get_stream_subscriptions_for_users(users).select_related('user_profile')
for sub in all_subs_query:
subs_by_user[sub.user_profile_id].append(sub)
realm = users[0].realm
already_subscribed: List[Tuple[UserProfile, Stream]] = []
subs_to_activate: List[Tuple[Subscription, Stream]] = []
new_subs: List[Tuple[UserProfile, int, Stream]] = []
for user_profile in users:
needs_new_sub: Set[int] = set(recipient_ids)
for sub in subs_by_user[user_profile.id]:
if sub.recipient_id in needs_new_sub:
needs_new_sub.remove(sub.recipient_id)
if sub.active:
already_subscribed.append((user_profile, stream_map[sub.recipient_id]))
else:
subs_to_activate.append((sub, stream_map[sub.recipient_id]))
# Mark the sub as active, without saving, so that
# pick_color will consider this to be an active
# subscription when picking colors
sub.active = True
for recipient_id in needs_new_sub:
new_subs.append((user_profile, recipient_id, stream_map[recipient_id]))
subs_to_add: List[Tuple[Subscription, Stream]] = []
for (user_profile, recipient_id, stream) in new_subs:
if stream.name in color_map:
color = color_map[stream.name]
else:
color = pick_color(user_profile, subs_by_user[user_profile.id])
sub_to_add = Subscription(user_profile=user_profile, active=True,
color=color, recipient_id=recipient_id)
subs_by_user[user_profile.id].append(sub_to_add)
subs_to_add.append((sub_to_add, stream))
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(realm))
Subscription.objects.bulk_create([sub for (sub, stream) in subs_to_add])
sub_ids = [sub.id for (sub, stream) in subs_to_activate]
Subscription.objects.filter(id__in=sub_ids).update(active=True)
occupied_streams_after = list(get_occupied_streams(realm))
# Log Subscription Activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs: (List[RealmAuditLog]) = []
for (sub, stream) in subs_to_add:
all_subscription_logs.append(RealmAuditLog(realm=realm,
acting_user=acting_user,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED,
event_time=event_time))
for (sub, stream) in subs_to_activate:
all_subscription_logs.append(RealmAuditLog(realm=realm,
acting_user=acting_user,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_ACTIVATED,
event_time=event_time))
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
new_occupied_streams = [stream for stream in
set(occupied_streams_after) - set(occupied_streams_before)
if not stream.invite_only]
if new_occupied_streams and not from_stream_creation:
event: Dict[str, object] = dict(
type="stream",
op="occupy",
streams=[stream.to_dict() for stream in new_occupied_streams],
)
send_event(realm, event, active_user_ids(realm.id))
# Notify all existing users on streams that users have joined
# First, get all users subscribed to the streams that we care about
# We fetch all subscription information upfront, as it's used throughout
# the following code and we want to minize DB queries
all_subscribers_by_stream = get_user_ids_for_streams(streams=streams)
def fetch_stream_subscriber_user_ids(stream: Stream) -> List[int]:
if stream.is_in_zephyr_realm and not stream.invite_only:
return []
user_ids = all_subscribers_by_stream[stream.id]
return user_ids
sub_tuples_by_user: Dict[int, List[Tuple[Subscription, Stream]]] = defaultdict(list)
new_streams: Set[Tuple[int, int]] = set()
for (sub, stream) in subs_to_add + subs_to_activate:
sub_tuples_by_user[sub.user_profile.id].append((sub, stream))
new_streams.add((sub.user_profile.id, stream.id))
# We now send several types of events to notify browsers. The
# first batch is notifications to users on invite-only streams
# that the stream exists.
for stream in streams:
if not stream.is_public():
# Users newly added to invite-only streams
# need a `create` notification. The former, because
# they need the stream to exist before
# they get the "subscribe" notification, and the latter so
# they can manage the new stream.
# Realm admins already have all created private streams.
realm_admin_ids = [user.id for user in realm.get_admin_users_and_bots()]
new_users_ids = [user.id for user in users if (user.id, stream.id) in new_streams and
user.id not in realm_admin_ids]
send_stream_creation_event(stream, new_users_ids)
stream_ids = {stream.id for stream in streams}
recent_traffic = get_streams_traffic(stream_ids=stream_ids)
# The second batch is events for the users themselves that they
# were subscribed to the new streams.
for user_profile in users:
if len(sub_tuples_by_user[user_profile.id]) == 0:
continue
sub_pairs = sub_tuples_by_user[user_profile.id]
notify_subscriptions_added(user_profile, sub_pairs, fetch_stream_subscriber_user_ids,
recent_traffic)
# The second batch is events for other users who are tracking the
# subscribers lists of streams in their browser; everyone for
# public streams and only existing subscribers for private streams.
for stream in streams:
if stream.is_in_zephyr_realm and not stream.invite_only:
continue
new_user_ids = [user.id for user in users if (user.id, stream.id) in new_streams]
subscribed_user_ids = all_subscribers_by_stream[stream.id]
peer_user_ids = get_peer_user_ids_for_stream_change(
stream=stream,
altered_user_ids=new_user_ids,
subscribed_user_ids=subscribed_user_ids,
)
if peer_user_ids:
for new_user_id in new_user_ids:
event = dict(type="subscription", op="peer_add",
stream_id=stream.id,
user_id=new_user_id)
send_event(realm, event, peer_user_ids)
return ([(user_profile, stream) for (user_profile, recipient_id, stream) in new_subs] +
[(sub.user_profile, stream) for (sub, stream) in subs_to_activate],
already_subscribed)
def get_available_notification_sounds() -> List[str]:
notification_sounds_path = static_path('audio/notification_sounds')
available_notification_sounds = []
for file_name in os.listdir(notification_sounds_path):
root, ext = os.path.splitext(file_name)
if '.' in root: # nocoverage
# Exclude e.g. zulip.abcd1234.ogg (generated by production hash-naming)
# to avoid spurious duplicates.
continue
if ext == '.ogg':
available_notification_sounds.append(root)
return available_notification_sounds
def notify_subscriptions_removed(user_profile: UserProfile, streams: Iterable[Stream],
no_log: bool=False) -> None:
if not no_log:
log_event({'type': 'subscription_removed',
'user': user_profile.email,
'names': [stream.name for stream in streams],
'realm': user_profile.realm.string_id})
payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams]
event = dict(type="subscription", op="remove",
subscriptions=payload)
send_event(user_profile.realm, event, [user_profile.id])
SubAndRemovedT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
def bulk_remove_subscriptions(users: Iterable[UserProfile],
streams: Iterable[Stream],
acting_client: Client,
acting_user: Optional[UserProfile]=None) -> SubAndRemovedT:
users = list(users)
streams = list(streams)
stream_dict = {stream.id: stream for stream in streams}
existing_subs_by_user = get_bulk_stream_subscriber_info(users, stream_dict)
def get_non_subscribed_tups() -> List[Tuple[UserProfile, Stream]]:
stream_ids = {stream.id for stream in streams}
not_subscribed: List[Tuple[UserProfile, Stream]] = []
for user_profile in users:
user_sub_stream_info = existing_subs_by_user[user_profile.id]
subscribed_stream_ids = {
stream.id
for (sub, stream) in user_sub_stream_info
}
not_subscribed_stream_ids = stream_ids - subscribed_stream_ids
for stream_id in not_subscribed_stream_ids:
stream = stream_dict[stream_id]
not_subscribed.append((user_profile, stream))
return not_subscribed
not_subscribed = get_non_subscribed_tups()
subs_to_deactivate: List[Tuple[Subscription, Stream]] = []
sub_ids_to_deactivate: List[int] = []
# This loop just flattens out our data into big lists for
# bulk operations.
for tup_list in existing_subs_by_user.values():
for (sub, stream) in tup_list:
subs_to_deactivate.append((sub, stream))
sub_ids_to_deactivate.append(sub.id)
our_realm = users[0].realm
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(our_realm))
Subscription.objects.filter(
id__in=sub_ids_to_deactivate,
) .update(active=False)
occupied_streams_after = list(get_occupied_streams(our_realm))
# Log Subscription Activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs: (List[RealmAuditLog]) = []
for (sub, stream) in subs_to_deactivate:
all_subscription_logs.append(RealmAuditLog(realm=sub.user_profile.realm,
acting_user=acting_user,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_DEACTIVATED,
event_time=event_time))
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
altered_user_dict: Dict[int, List[UserProfile]] = defaultdict(list)
streams_by_user: Dict[int, List[Stream]] = defaultdict(list)
for (sub, stream) in subs_to_deactivate:
streams_by_user[sub.user_profile_id].append(stream)
altered_user_dict[stream.id].append(sub.user_profile)
for user_profile in users:
if len(streams_by_user[user_profile.id]) == 0:
continue
notify_subscriptions_removed(user_profile, streams_by_user[user_profile.id])
event = {'type': 'mark_stream_messages_as_read',
'client_id': acting_client.id,
'user_profile_id': user_profile.id,
'stream_ids': [stream.id for stream in streams]}
queue_json_publish("deferred_work", event)
all_subscribers_by_stream = get_user_ids_for_streams(streams=streams)
def send_peer_remove_event(stream: Stream) -> None:
if stream.is_in_zephyr_realm and not stream.invite_only:
return
altered_users = altered_user_dict[stream.id]
altered_user_ids = [u.id for u in altered_users]
subscribed_user_ids = all_subscribers_by_stream[stream.id]
peer_user_ids = get_peer_user_ids_for_stream_change(
stream=stream,
altered_user_ids=altered_user_ids,
subscribed_user_ids=subscribed_user_ids,
)
if peer_user_ids:
for removed_user in altered_users:
event = dict(type="subscription",
op="peer_remove",
stream_id=stream.id,
user_id=removed_user.id)
send_event(our_realm, event, peer_user_ids)
for stream in streams:
send_peer_remove_event(stream=stream)
new_vacant_streams = [stream for stream in
set(occupied_streams_before) - set(occupied_streams_after)]
new_vacant_private_streams = [stream for stream in new_vacant_streams
if stream.invite_only]
new_vacant_public_streams = [stream for stream in new_vacant_streams
if not stream.invite_only]
if new_vacant_public_streams:
event = dict(type="stream", op="vacate",
streams=[stream.to_dict()
for stream in new_vacant_public_streams])
send_event(our_realm, event, active_user_ids(our_realm.id))
if new_vacant_private_streams:
# Deactivate any newly-vacant private streams
for stream in new_vacant_private_streams:
do_deactivate_stream(stream, acting_user=acting_user)
return (
[(sub.user_profile, stream) for (sub, stream) in subs_to_deactivate],
not_subscribed,
)
def log_subscription_property_change(user_email: str, stream_name: str, property: str,
value: Any) -> None:
event = {'type': 'subscription_property',
'property': property,
'user': user_email,
'stream_name': stream_name,
'value': value}
log_event(event)
def do_change_subscription_property(user_profile: UserProfile, sub: Subscription,
stream: Stream, property_name: str, value: Any,
) -> None:
database_property_name = property_name
event_property_name = property_name
database_value = value
event_value = value
# For this property, is_muted is used in the database, but
# in_home_view in the API, since we haven't migrated the events
# API to the new name yet.
if property_name == "in_home_view":
database_property_name = "is_muted"
database_value = not value
if property_name == "is_muted":
event_property_name = "in_home_view"
event_value = not value
setattr(sub, database_property_name, database_value)
sub.save(update_fields=[database_property_name])
log_subscription_property_change(user_profile.email, stream.name,
database_property_name, database_value)
event = dict(type="subscription",
op="update",
email=user_profile.email,
property=event_property_name,
value=event_value,
stream_id=stream.id,
name=stream.name)
send_event(user_profile.realm, event, [user_profile.id])
def do_change_password(user_profile: UserProfile, password: str, commit: bool=True) -> None:
user_profile.set_password(password)
if commit:
user_profile.save(update_fields=["password"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile, event_type=RealmAuditLog.USER_PASSWORD_CHANGED,
event_time=event_time)
def do_change_full_name(user_profile: UserProfile, full_name: str,
acting_user: Optional[UserProfile]) -> None:
old_name = user_profile.full_name
user_profile.full_name = full_name
user_profile.save(update_fields=["full_name"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_FULL_NAME_CHANGED,
event_time=event_time, extra_data=old_name)
payload = dict(user_id=user_profile.id,
full_name=user_profile.full_name)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot', op='update', bot=payload),
bot_owner_user_ids(user_profile))
def check_change_full_name(user_profile: UserProfile, full_name_raw: str,
acting_user: UserProfile) -> str:
"""Verifies that the user's proposed full name is valid. The caller
is responsible for checking check permissions. Returns the new
full name, which may differ from what was passed in (because this
function strips whitespace)."""
new_full_name = check_full_name(full_name_raw)
do_change_full_name(user_profile, new_full_name, acting_user)
return new_full_name
def check_change_bot_full_name(user_profile: UserProfile, full_name_raw: str,
acting_user: UserProfile) -> None:
new_full_name = check_full_name(full_name_raw)
if new_full_name == user_profile.full_name:
# Our web app will try to patch full_name even if the user didn't
# modify the name in the form. We just silently ignore those
# situations.
return
check_bot_name_available(
realm_id=user_profile.realm_id,
full_name=new_full_name,
)
do_change_full_name(user_profile, new_full_name, acting_user)
def do_change_bot_owner(user_profile: UserProfile, bot_owner: UserProfile,
acting_user: UserProfile) -> None:
previous_owner = user_profile.bot_owner
user_profile.bot_owner = bot_owner
user_profile.save() # Can't use update_fields because of how the foreign key works.
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_BOT_OWNER_CHANGED,
event_time=event_time)
update_users = bot_owner_user_ids(user_profile)
# For admins, update event is sent instead of delete/add
# event. bot_data of admin contains all the
# bots and none of them should be removed/(added again).
# Delete the bot from previous owner's bot data.
if previous_owner and not previous_owner.is_realm_admin:
send_event(user_profile.realm,
dict(type='realm_bot',
op="delete",
bot=dict(
user_id=user_profile.id,
)),
{previous_owner.id})
# Do not send update event for previous bot owner.
update_users = update_users - {previous_owner.id}
# Notify the new owner that the bot has been added.
if not bot_owner.is_realm_admin:
add_event = created_bot_event(user_profile)
send_event(user_profile.realm, add_event, {bot_owner.id})
# Do not send update event for bot_owner.
update_users = update_users - {bot_owner.id}
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
owner_id=user_profile.bot_owner.id,
)),
update_users)
# Since `bot_owner_id` is included in the user profile dict we need
# to update the users dict with the new bot owner id
event: Dict[str, Any] = dict(
type="realm_user",
op="update",
person=dict(
user_id=user_profile.id,
bot_owner_id=user_profile.bot_owner.id,
),
)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_tos_version(user_profile: UserProfile, tos_version: str) -> None:
user_profile.tos_version = tos_version
user_profile.save(update_fields=["tos_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_TOS_VERSION_CHANGED,
event_time=event_time)
def do_regenerate_api_key(user_profile: UserProfile, acting_user: UserProfile) -> str:
old_api_key = user_profile.api_key
new_api_key = generate_api_key()
user_profile.api_key = new_api_key
user_profile.save(update_fields=["api_key"])
# We need to explicitly delete the old API key from our caches,
# because the on-save handler for flushing the UserProfile object
# in zerver/lib/cache.py only has access to the new API key.
cache_delete(user_profile_by_api_key_cache_key(old_api_key))
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_API_KEY_CHANGED,
event_time=event_time)
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
api_key=new_api_key,
)),
bot_owner_user_ids(user_profile))
event = {'type': 'clear_push_device_tokens',
'user_profile_id': user_profile.id}
queue_json_publish("deferred_work", event)
return new_api_key
def notify_avatar_url_change(user_profile: UserProfile) -> None:
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
avatar_url=avatar_url(user_profile),
)),
bot_owner_user_ids(user_profile))
payload = dict(
avatar_source=user_profile.avatar_source,
avatar_url=avatar_url(user_profile),
avatar_url_medium=avatar_url(user_profile, medium=True),
avatar_version=user_profile.avatar_version,
# Even clients using client_gravatar don't need the email,
# since we're sending the URL anyway.
user_id=user_profile.id,
)
send_event(user_profile.realm,
dict(type='realm_user',
op='update',
person=payload),
active_user_ids(user_profile.realm_id))
def do_change_avatar_fields(user_profile: UserProfile, avatar_source: str,
skip_notify: bool=False, acting_user: Optional[UserProfile]=None) -> None:
user_profile.avatar_source = avatar_source
user_profile.avatar_version += 1
user_profile.save(update_fields=["avatar_source", "avatar_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile,
event_type=RealmAuditLog.USER_AVATAR_SOURCE_CHANGED,
extra_data={'avatar_source': avatar_source},
event_time=event_time, acting_user=acting_user)
if not skip_notify:
notify_avatar_url_change(user_profile)
def do_delete_avatar_image(user: UserProfile, acting_user: Optional[UserProfile]=None) -> None:
do_change_avatar_fields(user, UserProfile.AVATAR_FROM_GRAVATAR, acting_user=acting_user)
delete_avatar_image(user)
def do_change_icon_source(realm: Realm, icon_source: str, log: bool=True) -> None:
realm.icon_source = icon_source
realm.icon_version += 1
realm.save(update_fields=["icon_source", "icon_version"])
if log:
log_event({'type': 'realm_change_icon',
'realm': realm.string_id,
'icon_source': icon_source})
send_event(realm,
dict(type='realm',
op='update_dict',
property="icon",
data=dict(icon_source=realm.icon_source,
icon_url=realm_icon_url(realm))),
active_user_ids(realm.id))
def do_change_logo_source(realm: Realm, logo_source: str, night: bool, acting_user: Optional[UserProfile]=None) -> None:
if not night:
realm.logo_source = logo_source
realm.logo_version += 1
realm.save(update_fields=["logo_source", "logo_version"])
else:
realm.night_logo_source = logo_source
realm.night_logo_version += 1
realm.save(update_fields=["night_logo_source", "night_logo_version"])
RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_LOGO_CHANGED,
realm=realm, event_time=timezone_now(),
acting_user=acting_user)
event = dict(type='realm',
op='update_dict',
property="night_logo" if night else "logo",
data=get_realm_logo_data(realm, night))
send_event(realm, event, active_user_ids(realm.id))
def do_change_plan_type(realm: Realm, plan_type: int) -> None:
old_value = realm.plan_type
realm.plan_type = plan_type
realm.save(update_fields=['plan_type'])
RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_PLAN_TYPE_CHANGED,
realm=realm, event_time=timezone_now(),
extra_data={'old_value': old_value, 'new_value': plan_type})
if plan_type == Realm.STANDARD:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.SELF_HOSTED:
realm.max_invites = None # type: ignore[assignment] # Apparent mypy bug with Optional[int] setter.
realm.message_visibility_limit = None
realm.upload_quota_gb = None
elif plan_type == Realm.STANDARD_FREE:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.LIMITED:
realm.max_invites = settings.INVITES_DEFAULT_REALM_DAILY_MAX
realm.message_visibility_limit = Realm.MESSAGE_VISIBILITY_LIMITED
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_LIMITED
else:
raise AssertionError("Invalid plan type")
update_first_visible_message_id(realm)
realm.save(update_fields=['_max_invites', 'message_visibility_limit', 'upload_quota_gb'])
event = {'type': 'realm', 'op': 'update', 'property': 'plan_type', 'value': plan_type,
'extra_data': {'upload_quota': realm.upload_quota_bytes()}}
send_event(realm, event, active_user_ids(realm.id))
def do_change_default_sending_stream(user_profile: UserProfile, stream: Optional[Stream],
log: bool=True) -> None:
user_profile.default_sending_stream = stream
user_profile.save(update_fields=['default_sending_stream'])
if log:
log_event({'type': 'user_change_default_sending_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name: Optional[str] = stream.name
else:
stream_name = None
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
default_sending_stream=stream_name,
)),
bot_owner_user_ids(user_profile))
def do_change_default_events_register_stream(user_profile: UserProfile,
stream: Optional[Stream],
log: bool=True) -> None:
user_profile.default_events_register_stream = stream
user_profile.save(update_fields=['default_events_register_stream'])
if log:
log_event({'type': 'user_change_default_events_register_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name: Optional[str] = stream.name
else:
stream_name = None
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
default_events_register_stream=stream_name,
)),
bot_owner_user_ids(user_profile))
def do_change_default_all_public_streams(user_profile: UserProfile, value: bool,
log: bool=True) -> None:
user_profile.default_all_public_streams = value
user_profile.save(update_fields=['default_all_public_streams'])
if log:
log_event({'type': 'user_change_default_all_public_streams',
'user': user_profile.email,
'value': str(value)})
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
default_all_public_streams=user_profile.default_all_public_streams,
)),
bot_owner_user_ids(user_profile))
def do_change_user_role(user_profile: UserProfile, value: int, acting_user: Optional[UserProfile]=None) -> None:
old_value = user_profile.role
user_profile.role = value
user_profile.save(update_fields=["role"])
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user,
event_type=RealmAuditLog.USER_ROLE_CHANGED, event_time=timezone_now(),
extra_data=ujson.dumps({
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
event = dict(type="realm_user", op="update",
person=dict(user_id=user_profile.id, role=user_profile.role))
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_is_api_super_user(user_profile: UserProfile, value: bool) -> None:
user_profile.is_api_super_user = value
user_profile.save(update_fields=["is_api_super_user"])
def do_change_stream_invite_only(stream: Stream, invite_only: bool,
history_public_to_subscribers: Optional[bool]=None) -> None:
history_public_to_subscribers = get_default_value_for_history_public_to_subscribers(
stream.realm,
invite_only,
history_public_to_subscribers,
)
stream.invite_only = invite_only
stream.history_public_to_subscribers = history_public_to_subscribers
stream.save(update_fields=['invite_only', 'history_public_to_subscribers'])
event = dict(
op="update",
type="stream",
property="invite_only",
value=invite_only,
history_public_to_subscribers=history_public_to_subscribers,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_change_stream_web_public(stream: Stream, is_web_public: bool) -> None:
stream.is_web_public = is_web_public
stream.save(update_fields=['is_web_public'])
def do_change_stream_post_policy(stream: Stream, stream_post_policy: int) -> None:
stream.stream_post_policy = stream_post_policy
stream.save(update_fields=['stream_post_policy'])
event = dict(
op="update",
type="stream",
property="stream_post_policy",
value=stream_post_policy,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
# Backwards-compatibility code: We removed the
# is_announcement_only property in early 2020, but we send a
# duplicate event for legacy mobile clients that might want the
# data.
event = dict(
op="update",
type="stream",
property="is_announcement_only",
value=stream.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_rename_stream(stream: Stream,
new_name: str,
user_profile: UserProfile,
log: bool=True) -> Dict[str, str]:
old_name = stream.name
stream.name = new_name
stream.save(update_fields=["name"])
if log:
log_event({'type': 'stream_name_change',
'realm': stream.realm.string_id,
'new_name': new_name})
recipient_id = stream.recipient_id
messages = Message.objects.filter(recipient_id=recipient_id).only("id")
# Update the display recipient and stream, which are easy single
# items to set.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
new_cache_key = get_stream_cache_key(stream.name, stream.realm_id)
if old_cache_key != new_cache_key:
cache_delete(old_cache_key)
cache_set(new_cache_key, stream)
cache_set(display_recipient_cache_key(recipient_id), stream.name)
# Delete cache entries for everything else, which is cheaper and
# clearer than trying to set them. display_recipient is the out of
# date field in all cases.
cache_delete_many(
to_dict_cache_key_id(message.id) for message in messages)
new_email = encode_email_address(stream, show_sender=True)
# We will tell our users to essentially
# update stream.name = new_name where name = old_name
# and update stream.email = new_email where name = old_name.
# We could optimize this by trying to send one message, but the
# client code really wants one property update at a time, and
# updating stream names is a pretty infrequent operation.
# More importantly, we want to key these updates by id, not name,
# since id is the immutable primary key, and obviously name is not.
data_updates = [
['email_address', new_email],
['name', new_name],
]
for property, value in data_updates:
event = dict(
op="update",
type="stream",
property=property,
value=value,
stream_id=stream.id,
name=old_name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
sender = get_system_bot(settings.NOTIFICATION_BOT)
with override_language(stream.realm.default_language):
internal_send_stream_message(
stream.realm,
sender,
stream,
Realm.STREAM_EVENTS_NOTIFICATION_TOPIC,
_('{user_name} renamed stream {old_stream_name} to {new_stream_name}.').format(
user_name=f"@_**{user_profile.full_name}|{user_profile.id}**",
old_stream_name=f"**{old_name}**",
new_stream_name=f"**{new_name}**",
),
)
# Even though the token doesn't change, the web client needs to update the
# email forwarding address to display the correctly-escaped new name.
return {"email_address": new_email}
def do_change_stream_description(stream: Stream, new_description: str) -> None:
stream.description = new_description
stream.rendered_description = render_stream_description(new_description)
stream.save(update_fields=['description', 'rendered_description'])
event = dict(
type='stream',
op='update',
property='description',
name=stream.name,
stream_id=stream.id,
value=new_description,
rendered_description=stream.rendered_description,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_change_stream_message_retention_days(stream: Stream, message_retention_days: Optional[int]=None) -> None:
stream.message_retention_days = message_retention_days
stream.save(update_fields=['message_retention_days'])
event = dict(
op="update",
type="stream",
property="message_retention_days",
value=message_retention_days,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_create_realm(string_id: str, name: str,
emails_restricted_to_domains: Optional[bool]=None) -> Realm:
if Realm.objects.filter(string_id=string_id).exists():
raise AssertionError(f"Realm {string_id} already exists!")
if not server_initialized():
logging.info("Server not yet initialized. Creating the internal realm first.")
create_internal_realm()
kwargs: Dict[str, Any] = {}
if emails_restricted_to_domains is not None:
kwargs['emails_restricted_to_domains'] = emails_restricted_to_domains
realm = Realm(string_id=string_id, name=name, **kwargs)
realm.save()
# Create stream once Realm object has been saved
notifications_stream = ensure_stream(
realm, Realm.DEFAULT_NOTIFICATION_STREAM_NAME,
stream_description="Everyone is added to this stream by default. Welcome! :octopus:", acting_user=None)
realm.notifications_stream = notifications_stream
# With the current initial streams situation, the only public
# stream is the notifications_stream.
DefaultStream.objects.create(stream=notifications_stream, realm=realm)
signup_notifications_stream = ensure_stream(
realm, Realm.INITIAL_PRIVATE_STREAM_NAME, invite_only=True,
stream_description="A private stream for core team members.", acting_user=None)
realm.signup_notifications_stream = signup_notifications_stream
realm.save(update_fields=['notifications_stream', 'signup_notifications_stream'])
if settings.BILLING_ENABLED:
do_change_plan_type(realm, Realm.LIMITED)
# Log the event
log_event({"type": "realm_created",
"string_id": string_id,
"emails_restricted_to_domains": emails_restricted_to_domains})
sender = get_system_bot(settings.NOTIFICATION_BOT)
admin_realm = sender.realm
# Send a notification to the admin realm
with override_language(admin_realm.default_language):
signup_message = _("Signups enabled")
try:
signups_stream = get_signups_stream(admin_realm)
topic = realm.display_subdomain
internal_send_stream_message(
admin_realm,
sender,
signups_stream,
topic,
signup_message,
)
except Stream.DoesNotExist: # nocoverage
# If the signups stream hasn't been created in the admin
# realm, don't auto-create it to send to it; just do nothing.
pass
return realm
def do_change_notification_settings(user_profile: UserProfile, name: str,
value: Union[bool, int, str], log: bool=True) -> None:
"""Takes in a UserProfile object, the name of a global notification
preference to update, and the value to update to
"""
notification_setting_type = UserProfile.notification_setting_types[name]
assert isinstance(value, notification_setting_type), (
f'Cannot update {name}: {value} is not an instance of {notification_setting_type}')
setattr(user_profile, name, value)
# Disabling digest emails should clear a user's email queue
if name == 'enable_digest_emails' and not value:
clear_scheduled_emails([user_profile.id], ScheduledEmail.DIGEST)
user_profile.save(update_fields=[name])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': name,
'setting': value}
if log:
log_event(event)
send_event(user_profile.realm, event, [user_profile.id])
def do_change_enter_sends(user_profile: UserProfile, enter_sends: bool) -> None:
user_profile.enter_sends = enter_sends
user_profile.save(update_fields=["enter_sends"])
def do_set_user_display_setting(user_profile: UserProfile,
setting_name: str,
setting_value: Union[bool, str, int]) -> None:
property_type = UserProfile.property_types[setting_name]
assert isinstance(setting_value, property_type)
setattr(user_profile, setting_name, setting_value)
user_profile.save(update_fields=[setting_name])
event = {'type': 'update_display_settings',
'user': user_profile.email,
'setting_name': setting_name,
'setting': setting_value}
if setting_name == "default_language":
assert isinstance(setting_value, str)
event['language_name'] = get_language_name(setting_value)
send_event(user_profile.realm, event, [user_profile.id])
# Updates to the timezone display setting are sent to all users
if setting_name == "timezone":
payload = dict(email=user_profile.email,
user_id=user_profile.id,
timezone=user_profile.timezone)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
def lookup_default_stream_groups(default_stream_group_names: List[str],
realm: Realm) -> List[DefaultStreamGroup]:
default_stream_groups = []
for group_name in default_stream_group_names:
try:
default_stream_group = DefaultStreamGroup.objects.get(
name=group_name, realm=realm)
except DefaultStreamGroup.DoesNotExist:
raise JsonableError(_('Invalid default stream group {}').format(group_name))
default_stream_groups.append(default_stream_group)
return default_stream_groups
def notify_default_streams(realm: Realm) -> None:
event = dict(
type="default_streams",
default_streams=streams_to_dicts_sorted(get_default_streams_for_realm(realm.id)),
)
send_event(realm, event, active_non_guest_user_ids(realm.id))
def notify_default_stream_groups(realm: Realm) -> None:
event = dict(
type="default_stream_groups",
default_stream_groups=default_stream_groups_to_dicts_sorted(get_default_stream_groups(realm)),
)
send_event(realm, event, active_non_guest_user_ids(realm.id))
def do_add_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
if not DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).exists():
DefaultStream.objects.create(realm_id=realm_id, stream_id=stream_id)
notify_default_streams(stream.realm)
def do_remove_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).delete()
notify_default_streams(stream.realm)
def do_create_default_stream_group(realm: Realm, group_name: str,
description: str, streams: List[Stream]) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(_(
"'{stream_name}' is a default stream and cannot be added to '{group_name}'",
).format(stream_name=stream.name, group_name=group_name))
check_default_stream_group_name(group_name)
(group, created) = DefaultStreamGroup.objects.get_or_create(
name=group_name, realm=realm, description=description)
if not created:
raise JsonableError(_(
"Default stream group '{group_name}' already exists",
).format(group_name=group_name))
group.streams.set(streams)
notify_default_stream_groups(realm)
def do_add_streams_to_default_stream_group(realm: Realm, group: DefaultStreamGroup,
streams: List[Stream]) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(_(
"'{stream_name}' is a default stream and cannot be added to '{group_name}'",
).format(stream_name=stream.name, group_name=group.name))
if stream in group.streams.all():
raise JsonableError(_(
"Stream '{stream_name}' is already present in default stream group '{group_name}'",
).format(stream_name=stream.name, group_name=group.name))
group.streams.add(stream)
group.save()
notify_default_stream_groups(realm)
def do_remove_streams_from_default_stream_group(realm: Realm, group: DefaultStreamGroup,
streams: List[Stream]) -> None:
for stream in streams:
if stream not in group.streams.all():
raise JsonableError(_(
"Stream '{stream_name}' is not present in default stream group '{group_name}'",
).format(stream_name=stream.name, group_name=group.name))
group.streams.remove(stream)
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_name(realm: Realm, group: DefaultStreamGroup,
new_group_name: str) -> None:
if group.name == new_group_name:
raise JsonableError(_("This default stream group is already named '{}'").format(new_group_name))
if DefaultStreamGroup.objects.filter(name=new_group_name, realm=realm).exists():
raise JsonableError(_("Default stream group '{}' already exists").format(new_group_name))
group.name = new_group_name
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_description(realm: Realm, group: DefaultStreamGroup,
new_description: str) -> None:
group.description = new_description
group.save()
notify_default_stream_groups(realm)
def do_remove_default_stream_group(realm: Realm, group: DefaultStreamGroup) -> None:
group.delete()
notify_default_stream_groups(realm)
def get_default_streams_for_realm(realm_id: int) -> List[Stream]:
return [default.stream for default in
DefaultStream.objects.select_related().filter(realm_id=realm_id)]
def get_default_subs(user_profile: UserProfile) -> List[Stream]:
# Right now default streams are realm-wide. This wrapper gives us flexibility
# to some day further customize how we set up default streams for new users.
return get_default_streams_for_realm(user_profile.realm_id)
# returns default streams in json serializeable format
def streams_to_dicts_sorted(streams: List[Stream]) -> List[Dict[str, Any]]:
return sorted([stream.to_dict() for stream in streams], key=lambda elt: elt["name"])
def default_stream_groups_to_dicts_sorted(groups: List[DefaultStreamGroup]) -> List[Dict[str, Any]]:
return sorted([group.to_dict() for group in groups], key=lambda elt: elt["name"])
def do_update_user_activity_interval(user_profile: UserProfile,
log_time: datetime.datetime) -> None:
effective_end = log_time + UserActivityInterval.MIN_INTERVAL_LENGTH
# This code isn't perfect, because with various races we might end
# up creating two overlapping intervals, but that shouldn't happen
# often, and can be corrected for in post-processing
try:
last = UserActivityInterval.objects.filter(user_profile=user_profile).order_by("-end")[0]
# There are two ways our intervals could overlap:
# (1) The start of the new interval could be inside the old interval
# (2) The end of the new interval could be inside the old interval
# In either case, we just extend the old interval to include the new interval.
if ((log_time <= last.end and log_time >= last.start) or
(effective_end <= last.end and effective_end >= last.start)):
last.end = max(last.end, effective_end)
last.start = min(last.start, log_time)
last.save(update_fields=["start", "end"])
return
except IndexError:
pass
# Otherwise, the intervals don't overlap, so we should make a new one
UserActivityInterval.objects.create(user_profile=user_profile, start=log_time,
end=effective_end)
@statsd_increment('user_activity')
def do_update_user_activity(user_profile_id: int,
client_id: int,
query: str,
count: int,
log_time: datetime.datetime) -> None:
(activity, created) = UserActivity.objects.get_or_create(
user_profile_id = user_profile_id,
client_id = client_id,
query = query,
defaults={'last_visit': log_time, 'count': count})
if not created:
activity.count += count
activity.last_visit = log_time
activity.save(update_fields=["last_visit", "count"])
def send_presence_changed(user_profile: UserProfile, presence: UserPresence) -> None:
presence_dict = presence.to_dict()
event = dict(type="presence",
email=user_profile.email,
user_id=user_profile.id,
server_timestamp=time.time(),
presence={presence_dict['client']: presence_dict})
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def consolidate_client(client: Client) -> Client:
# The web app reports a client as 'website'
# The desktop app reports a client as ZulipDesktop
# due to it setting a custom user agent. We want both
# to count as web users
# Alias ZulipDesktop to website
if client.name in ['ZulipDesktop']:
return get_client('website')
else:
return client
@statsd_increment('user_presence')
def do_update_user_presence(user_profile: UserProfile,
client: Client,
log_time: datetime.datetime,
status: int) -> None:
client = consolidate_client(client)
defaults = dict(
timestamp=log_time,
status=status,
realm_id=user_profile.realm_id,
)
(presence, created) = UserPresence.objects.get_or_create(
user_profile = user_profile,
client = client,
defaults = defaults,
)
stale_status = (log_time - presence.timestamp) > datetime.timedelta(minutes=1, seconds=10)
was_idle = presence.status == UserPresence.IDLE
became_online = (status == UserPresence.ACTIVE) and (stale_status or was_idle)
# If an object was created, it has already been saved.
#
# We suppress changes from ACTIVE to IDLE before stale_status is reached;
# this protects us from the user having two clients open: one active, the
# other idle. Without this check, we would constantly toggle their status
# between the two states.
if not created and stale_status or was_idle or status == presence.status:
# The following block attempts to only update the "status"
# field in the event that it actually changed. This is
# important to avoid flushing the UserPresence cache when the
# data it would return to a client hasn't actually changed
# (see the UserPresence post_save hook for details).
presence.timestamp = log_time
update_fields = ["timestamp"]
if presence.status != status:
presence.status = status
update_fields.append("status")
presence.save(update_fields=update_fields)
if not user_profile.realm.presence_disabled and (created or became_online):
# Push event to all users in the realm so they see the new user
# appear in the presence list immediately, or the newly online
# user without delay. Note that we won't send an update here for a
# timestamp update, because we rely on the browser to ping us every 50
# seconds for realm-wide status updates, and those updates should have
# recent timestamps, which means the browser won't think active users
# have gone idle. If we were more aggressive in this function about
# sending timestamp updates, we could eliminate the ping responses, but
# that's not a high priority for now, considering that most of our non-MIT
# realms are pretty small.
send_presence_changed(user_profile, presence)
def update_user_activity_interval(user_profile: UserProfile, log_time: datetime.datetime) -> None:
event = {'user_profile_id': user_profile.id,
'time': datetime_to_timestamp(log_time)}
queue_json_publish("user_activity_interval", event)
def update_user_presence(user_profile: UserProfile, client: Client, log_time: datetime.datetime,
status: int, new_user_input: bool) -> None:
event = {'user_profile_id': user_profile.id,
'status': status,
'time': datetime_to_timestamp(log_time),
'client': client.name}
queue_json_publish("user_presence", event)
if new_user_input:
update_user_activity_interval(user_profile, log_time)
def do_update_user_status(user_profile: UserProfile,
away: Optional[bool],
status_text: Optional[str],
client_id: int) -> None:
if away:
status = UserStatus.AWAY
else:
status = UserStatus.NORMAL
realm = user_profile.realm
update_user_status(
user_profile_id=user_profile.id,
status=status,
status_text=status_text,
client_id=client_id,
)
event = dict(
type='user_status',
user_id=user_profile.id,
)
if away is not None:
event['away'] = away
if status_text is not None:
event['status_text'] = status_text
send_event(realm, event, active_user_ids(realm.id))
def do_mark_all_as_read(user_profile: UserProfile, client: Client) -> int:
log_statsd_event('bankruptcy')
# First, we clear mobile push notifications. This is safer in the
# event that the below logic times out and we're killed.
all_push_message_ids = UserMessage.objects.filter(
user_profile=user_profile,
).extra(
where=[UserMessage.where_active_push_notification()],
).values_list("message_id", flat=True)[0:10000]
do_clear_mobile_push_notifications_for_ids([user_profile.id], all_push_message_ids)
msgs = UserMessage.objects.filter(
user_profile=user_profile,
).extra(
where=[UserMessage.where_unread()],
)
count = msgs.update(
flags=F('flags').bitor(UserMessage.flags.read),
)
event = dict(
type='update_message_flags',
operation='add',
flag='read',
messages=[], # we don't send messages, since the client reloads anyway
all=True,
)
event_time = timezone_now()
send_event(user_profile.realm, event, [user_profile.id])
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'],
None, event_time, increment=count)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'],
None, event_time, increment=min(1, count))
return count
def do_mark_stream_messages_as_read(user_profile: UserProfile,
client: Client,
stream: Stream,
topic_name: Optional[str]=None) -> int:
log_statsd_event('mark_stream_as_read')
msgs = UserMessage.objects.filter(
user_profile=user_profile,
)
recipient = stream.recipient
msgs = msgs.filter(message__recipient=recipient)
if topic_name:
msgs = filter_by_topic_name_via_message(
query=msgs,
topic_name=topic_name,
)
msgs = msgs.extra(
where=[UserMessage.where_unread()],
)
message_ids = list(msgs.values_list('message__id', flat=True))
count = msgs.update(
flags=F('flags').bitor(UserMessage.flags.read),
)
event = dict(
type='update_message_flags',
operation='add',
flag='read',
messages=message_ids,
all=False,
)
event_time = timezone_now()
send_event(user_profile.realm, event, [user_profile.id])
do_clear_mobile_push_notifications_for_ids([user_profile.id], message_ids)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'],
None, event_time, increment=count)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'],
None, event_time, increment=min(1, count))
return count
def do_update_mobile_push_notification(message: Message,
prior_mention_user_ids: Set[int],
stream_push_user_ids: Set[int]) -> None:
# Called during the message edit code path to remove mobile push
# notifications for users who are no longer mentioned following
# the edit. See #15428 for details.
#
# A perfect implementation would also support updating the message
# in a sent notification if a message was edited to mention a
# group rather than a user (or vise versa), though it is likely
# not worth the effort to do such a change.
if not message.is_stream_message():
return
remove_notify_users = prior_mention_user_ids - message.mentions_user_ids - stream_push_user_ids
do_clear_mobile_push_notifications_for_ids(list(remove_notify_users), [message.id])
def do_clear_mobile_push_notifications_for_ids(user_profile_ids: List[int],
message_ids: List[int]) -> None:
if len(message_ids) == 0:
return
# This function supports clearing notifications for several users
# only for the message-edit use case where we'll have a single message_id.
assert len(user_profile_ids) == 1 or len(message_ids) == 1
messages_by_user = defaultdict(list)
notifications_to_update = list(UserMessage.objects.filter(
message_id__in=message_ids,
user_profile_id__in=user_profile_ids,
).extra(
where=[UserMessage.where_active_push_notification()],
).values_list('user_profile_id', 'message_id'))
for (user_id, message_id) in notifications_to_update:
messages_by_user[user_id].append(message_id)
for (user_profile_id, event_message_ids) in messages_by_user.items():
queue_json_publish("missedmessage_mobile_notifications", {
"type": "remove",
"user_profile_id": user_profile_id,
"message_ids": event_message_ids,
})
def do_update_message_flags(user_profile: UserProfile,
client: Client,
operation: str,
flag: str,
messages: List[int]) -> int:
valid_flags = [item for item in UserMessage.flags
if item not in UserMessage.NON_API_FLAGS]
if flag not in valid_flags:
raise JsonableError(_("Invalid flag: '{}'").format(flag))
if flag in UserMessage.NON_EDITABLE_FLAGS:
raise JsonableError(_("Flag not editable: '{}'").format(flag))
flagattr = getattr(UserMessage.flags, flag)
msgs = UserMessage.objects.filter(user_profile=user_profile,
message__id__in=messages)
# This next block allows you to star any message, even those you
# didn't receive (e.g. because you're looking at a public stream
# you're not subscribed to, etc.). The problem is that starring
# is a flag boolean on UserMessage, and UserMessage rows are
# normally created only when you receive a message to support
# searching your personal history. So we need to create one. We
# add UserMessage.flags.historical, so that features that need
# "messages you actually received" can exclude these UserMessages.
if msgs.count() == 0:
if not len(messages) == 1:
raise JsonableError(_("Invalid message(s)"))
if flag != "starred":
raise JsonableError(_("Invalid message(s)"))
# Validate that the user could have read the relevant message
message = access_message(user_profile, messages[0])[0]
# OK, this is a message that you legitimately have access
# to via narrowing to the stream it is on, even though you
# didn't actually receive it. So we create a historical,
# read UserMessage message row for you to star.
UserMessage.objects.create(user_profile=user_profile,
message=message,
flags=UserMessage.flags.historical | UserMessage.flags.read)
if operation == 'add':
count = msgs.update(flags=F('flags').bitor(flagattr))
elif operation == 'remove':
count = msgs.update(flags=F('flags').bitand(~flagattr))
else:
raise AssertionError("Invalid message flags operation")
event = {'type': 'update_message_flags',
'operation': operation,
'flag': flag,
'messages': messages,
'all': False}
send_event(user_profile.realm, event, [user_profile.id])
if flag == "read" and operation == "add":
event_time = timezone_now()
do_clear_mobile_push_notifications_for_ids([user_profile.id], messages)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'],
None, event_time, increment=count)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'],
None, event_time, increment=min(1, count))
return count
class MessageUpdateUserInfoResult(TypedDict):
message_user_ids: Set[int]
mention_user_ids: Set[int]
def notify_topic_moved_streams(user_profile: UserProfile,
old_stream: Stream, old_topic: str,
new_stream: Stream, new_topic: Optional[str],
send_notification_to_old_thread: bool,
send_notification_to_new_thread: bool) -> None:
# Since moving content between streams is highly disruptive,
# it's worth adding a couple tombstone messages showing what
# happened.
sender = get_system_bot(settings.NOTIFICATION_BOT)
if new_topic is None:
new_topic = old_topic
user_mention = f"@_**{user_profile.full_name}|{user_profile.id}**"
old_topic_link = f"#**{old_stream.name}>{old_topic}**"
new_topic_link = f"#**{new_stream.name}>{new_topic}**"
if send_notification_to_new_thread:
with override_language(new_stream.realm.default_language):
internal_send_stream_message(
new_stream.realm, sender, new_stream, new_topic,
_("This topic was moved here from {old_location} by {user}").format(
old_location=old_topic_link, user=user_mention,
),
)
if send_notification_to_old_thread:
with override_language(old_stream.realm.default_language):
# Send a notification to the old stream that the topic was moved.
internal_send_stream_message(
old_stream.realm, sender, old_stream, old_topic,
_("This topic was moved by {user} to {new_location}").format(
user=user_mention, new_location=new_topic_link,
),
)
def get_user_info_for_message_updates(message_id: int) -> MessageUpdateUserInfoResult:
# We exclude UserMessage.flags.historical rows since those
# users did not receive the message originally, and thus
# probably are not relevant for reprocessed alert_words,
# mentions and similar rendering features. This may be a
# decision we change in the future.
query = UserMessage.objects.filter(
message=message_id,
flags=~UserMessage.flags.historical,
).values('user_profile_id', 'flags')
rows = list(query)
message_user_ids = {
row['user_profile_id']
for row in rows
}
mask = UserMessage.flags.mentioned | UserMessage.flags.wildcard_mentioned
mention_user_ids = {
row['user_profile_id']
for row in rows
if int(row['flags']) & mask
}
return dict(
message_user_ids=message_user_ids,
mention_user_ids=mention_user_ids,
)
def update_user_message_flags(message: Message, ums: Iterable[UserMessage]) -> None:
wildcard = message.mentions_wildcard
mentioned_ids = message.mentions_user_ids
ids_with_alert_words = message.user_ids_with_alert_words
changed_ums: Set[UserMessage] = set()
def update_flag(um: UserMessage, should_set: bool, flag: int) -> None:
if should_set:
if not (um.flags & flag):
um.flags |= flag
changed_ums.add(um)
else:
if (um.flags & flag):
um.flags &= ~flag
changed_ums.add(um)
for um in ums:
has_alert_word = um.user_profile_id in ids_with_alert_words
update_flag(um, has_alert_word, UserMessage.flags.has_alert_word)
mentioned = um.user_profile_id in mentioned_ids
update_flag(um, mentioned, UserMessage.flags.mentioned)
update_flag(um, wildcard, UserMessage.flags.wildcard_mentioned)
for um in changed_ums:
um.save(update_fields=['flags'])
def update_to_dict_cache(changed_messages: List[Message], realm_id: Optional[int]=None) -> List[int]:
"""Updates the message as stored in the to_dict cache (for serving
messages)."""
items_for_remote_cache = {}
message_ids = []
changed_messages_to_dict = MessageDict.to_dict_uncached(changed_messages, realm_id)
for msg_id, msg in changed_messages_to_dict.items():
message_ids.append(msg_id)
key = to_dict_cache_key_id(msg_id)
items_for_remote_cache[key] = (msg,)
cache_set_many(items_for_remote_cache)
return message_ids
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_embedded_data(user_profile: UserProfile,
message: Message,
content: Optional[str],
rendered_content: Optional[str]) -> None:
event: Dict[str, Any] = {
'type': 'update_message',
'sender': user_profile.email,
'message_id': message.id}
changed_messages = [message]
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
update_user_message_flags(message, ums)
message.content = content
message.rendered_content = rendered_content
message.rendered_content_version = markdown_version
event["content"] = content
event["rendered_content"] = rendered_content
message.save(update_fields=["content", "rendered_content"])
event['message_ids'] = update_to_dict_cache(changed_messages)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
'id': um.user_profile_id,
'flags': um.flags_list(),
}
send_event(user_profile.realm, event, list(map(user_info, ums)))
class DeleteMessagesEvent(TypedDict, total=False):
type: str
message_ids: List[int]
message_type: str
sender_id: int
recipient_id: int
topic: str
stream_id: int
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_message(user_profile: UserProfile, message: Message,
new_stream: Optional[Stream], topic_name: Optional[str],
propagate_mode: str, send_notification_to_old_thread: bool,
send_notification_to_new_thread: bool, content: Optional[str],
rendered_content: Optional[str], prior_mention_user_ids: Set[int],
mention_user_ids: Set[int], mention_data: Optional[MentionData]=None) -> int:
"""
The main function for message editing. A message edit event can
modify:
* the message's content (in which case the caller will have
set both content and rendered_content),
* the topic, in which case the caller will have set topic_name
* or both
With topic edits, propagate_mode determines whether other message
also have their topics edited.
"""
timestamp = timezone_now()
message.last_edit_time = timestamp
event: Dict[str, Any] = {
'type': 'update_message',
'user_id': user_profile.id,
'edit_timestamp': datetime_to_timestamp(timestamp),
'message_id': message.id,
}
edit_history_event: Dict[str, Any] = {
'user_id': user_profile.id,
'timestamp': event['edit_timestamp'],
}
changed_messages = [message]
stream_being_edited = None
if message.is_stream_message():
stream_id = message.recipient.type_id
stream_being_edited = get_stream_by_id_in_realm(stream_id, user_profile.realm)
event['stream_name'] = stream_being_edited.name
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
assert rendered_content is not None
# mention_data is required if there's a content edit.
assert mention_data is not None
# add data from group mentions to mentions_user_ids.
for group_id in message.mentions_user_group_ids:
members = mention_data.get_group_members(group_id)
message.mentions_user_ids.update(members)
update_user_message_flags(message, ums)
# One could imagine checking realm.allow_edit_history here and
# modifying the events based on that setting, but doing so
# doesn't really make sense. We need to send the edit event
# to clients regardless, and a client already had access to
# the original/pre-edit content of the message anyway. That
# setting must be enforced on the client side, and making a
# change here simply complicates the logic for clients parsing
# edit history events.
event['orig_content'] = message.content
event['orig_rendered_content'] = message.rendered_content
edit_history_event["prev_content"] = message.content
edit_history_event["prev_rendered_content"] = message.rendered_content
edit_history_event["prev_rendered_content_version"] = message.rendered_content_version
message.content = content
message.rendered_content = rendered_content
message.rendered_content_version = markdown_version
event["content"] = content
event["rendered_content"] = rendered_content
event['prev_rendered_content_version'] = message.rendered_content_version
event['is_me_message'] = Message.is_status_message(content, rendered_content)
# message.has_image and message.has_link will have been
# already updated by markdown rendering in the caller.
message.has_attachment = check_attachment_reference_change(message)
if message.is_stream_message():
if topic_name is not None:
new_topic_name = topic_name
else:
new_topic_name = message.topic_name()
stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget(
stream_id=stream_id,
topic_name=new_topic_name,
)
else:
stream_topic = None
info = get_recipient_info(
recipient=message.recipient,
sender_id=message.sender_id,
stream_topic=stream_topic,
possible_wildcard_mention=mention_data.message_has_wildcards(),
)
event['push_notify_user_ids'] = list(info['push_notify_user_ids'])
event['stream_push_user_ids'] = list(info['stream_push_user_ids'])
event['stream_email_user_ids'] = list(info['stream_email_user_ids'])
event['prior_mention_user_ids'] = list(prior_mention_user_ids)
event['mention_user_ids'] = list(mention_user_ids)
event['presence_idle_user_ids'] = filter_presence_idle_user_ids(info['active_user_ids'])
if message.mentions_wildcard:
event['wildcard_mention_user_ids'] = list(info['wildcard_mention_user_ids'])
else:
event['wildcard_mention_user_ids'] = []
do_update_mobile_push_notification(message, prior_mention_user_ids, info['stream_push_user_ids'])
if topic_name is not None or new_stream is not None:
orig_topic_name = message.topic_name()
event["propagate_mode"] = propagate_mode
event["stream_id"] = message.recipient.type_id
if new_stream is not None:
assert content is None
assert message.is_stream_message()
assert stream_being_edited is not None
edit_history_event['prev_stream'] = stream_being_edited.id
event[ORIG_TOPIC] = orig_topic_name
message.recipient_id = new_stream.recipient_id
event["new_stream_id"] = new_stream.id
event["propagate_mode"] = propagate_mode
# When messages are moved from one stream to another, some
# users may lose access to those messages, including guest
# users and users not subscribed to the new stream (if it is a
# private stream). For those users, their experience is as
# though the messages were deleted, and we should send a
# delete_message event to them instead.
subscribers = get_active_subscriptions_for_stream_id(
stream_id).select_related("user_profile")
subs_to_new_stream = list(get_active_subscriptions_for_stream_id(
new_stream.id).select_related("user_profile"))
new_stream_sub_ids = [user.user_profile_id for user in subs_to_new_stream]
# Get users who aren't subscribed to the new_stream.
subs_losing_usermessages = [
sub for sub in subscribers
if sub.user_profile_id not in new_stream_sub_ids
]
# Users who can longer access the message without some action
# from administrators.
#
# TODO: Extend this list to also contain users losing access
# due to the messages moving to a private stream they are not
# subscribed to.
subs_losing_access = [
sub for sub in subs_losing_usermessages
if sub.user_profile.is_guest
]
ums = ums.exclude(user_profile_id__in=[
sub.user_profile_id for sub in subs_losing_usermessages])
if topic_name is not None:
topic_name = truncate_topic(topic_name)
message.set_topic_name(topic_name)
# These fields have legacy field names.
event[ORIG_TOPIC] = orig_topic_name
event[TOPIC_NAME] = topic_name
event[TOPIC_LINKS] = topic_links(message.sender.realm_id, topic_name)
edit_history_event[LEGACY_PREV_TOPIC] = orig_topic_name
delete_event_notify_user_ids: List[int] = []
if propagate_mode in ["change_later", "change_all"]:
assert topic_name is not None or new_stream is not None
messages_list = update_messages_for_topic_edit(
message=message,
propagate_mode=propagate_mode,
orig_topic_name=orig_topic_name,
topic_name=topic_name,
new_stream=new_stream,
)
changed_messages += messages_list
if new_stream is not None:
assert stream_being_edited is not None
message_ids = [msg.id for msg in changed_messages]
# Delete UserMessage objects for users who will no
# longer have access to these messages. Note: This could be
# very expensive, since it's N guest users x M messages.
UserMessage.objects.filter(
user_profile_id__in=[sub.user_profile_id for sub in
subs_losing_usermessages],
message_id__in=message_ids,
).delete()
delete_event: DeleteMessagesEvent = {
'type': 'delete_message',
'message_ids': message_ids,
'message_type': 'stream',
'stream_id': stream_being_edited.id,
'topic': orig_topic_name,
}
delete_event_notify_user_ids = [sub.user_profile_id for sub in subs_losing_access]
send_event(user_profile.realm, delete_event, delete_event_notify_user_ids)
if message.edit_history is not None:
edit_history = ujson.loads(message.edit_history)
edit_history.insert(0, edit_history_event)
else:
edit_history = [edit_history_event]
message.edit_history = ujson.dumps(edit_history)
# This does message.save(update_fields=[...])
save_message_for_edit_use_case(message=message)
realm_id: Optional[int] = None
if stream_being_edited is not None:
realm_id = stream_being_edited.realm_id
event['message_ids'] = update_to_dict_cache(changed_messages, realm_id)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
'id': um.user_profile_id,
'flags': um.flags_list(),
}
# The following blocks arranges that users who are subscribed to a
# stream and can see history from before they subscribed get
# live-update when old messages are edited (e.g. if the user does
# a topic edit themself).
#
# We still don't send an update event to users who are not
# subscribed to this stream and don't have a UserMessage row. This
# means if a non-subscriber is viewing the narrow, they won't get
# a real-time updates. This is a balance between sending
# message-edit notifications for every public stream to every user
# in the organization (too expansive, and also not what we do for
# newly sent messages anyway) and having magical live-updates
# where possible.
users_to_be_notified = list(map(user_info, ums))
if stream_being_edited is not None:
if stream_being_edited.is_history_public_to_subscribers:
subscribers = get_active_subscriptions_for_stream_id(stream_id)
# We exclude long-term idle users, since they by
# definition have no active clients.
subscribers = subscribers.exclude(user_profile__long_term_idle=True)
# Remove duplicates by excluding the id of users already
# in users_to_be_notified list. This is the case where a
# user both has a UserMessage row and is a current
# Subscriber
subscribers = subscribers.exclude(user_profile_id__in=[um.user_profile_id for um in ums])
if new_stream is not None:
assert delete_event_notify_user_ids is not None
subscribers = subscribers.exclude(user_profile_id__in=delete_event_notify_user_ids)
# All users that are subscribed to the stream must be
# notified when a message is edited
subscriber_ids = [user.user_profile_id for user in subscribers]
if new_stream is not None:
# TODO: Guest users don't see the new moved topic
# unless breadcrumb message for new stream is
# enabled. Excluding these users from receiving this
# event helps us avoid a error trackeback for our
# clients. We should figure out a way to inform the
# guest users of this new topic if sending a 'message'
# event for these messages is not an option.
#
# Don't send this event to guest subs who are not
# subscribers of the old stream but are subscribed to
# the new stream; clients will be confused.
old_stream_unsubbed_guests = [
sub for sub in subs_to_new_stream
if sub.user_profile.is_guest
and sub.user_profile_id not in subscriber_ids
]
subscribers = subscribers.exclude(user_profile_id__in=[
sub.user_profile_id for sub in old_stream_unsubbed_guests])
subscriber_ids = [user.user_profile_id for user in subscribers]
users_to_be_notified += list(map(subscriber_info, subscriber_ids))
send_event(user_profile.realm, event, users_to_be_notified)
if (len(changed_messages) > 0 and new_stream is not None and
stream_being_edited is not None):
# Notify users that the topic was moved.
notify_topic_moved_streams(user_profile, stream_being_edited, orig_topic_name,
new_stream, topic_name, send_notification_to_old_thread,
send_notification_to_new_thread)
return len(changed_messages)
def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:
# messages in delete_message event belong to the same topic
# or is a single private message, as any other behaviour is not possible with
# the current callers to this method.
messages = list(messages)
message_ids = [message.id for message in messages]
if not message_ids:
return
event: DeleteMessagesEvent = {
'type': 'delete_message',
'message_ids': message_ids,
}
sample_message = messages[0]
message_type = "stream"
users_to_notify = []
if not sample_message.is_stream_message():
assert len(messages) == 1
message_type = "private"
ums = UserMessage.objects.filter(message_id__in=message_ids)
users_to_notify = [um.user_profile_id for um in ums]
# TODO: We should plan to remove `sender_id` here.
event['recipient_id'] = sample_message.recipient_id
event['sender_id'] = sample_message.sender_id
archiving_chunk_size = retention.MESSAGE_BATCH_SIZE
if message_type == "stream":
stream_id = sample_message.recipient.type_id
event['stream_id'] = stream_id
event['topic'] = sample_message.topic_name()
subscribers = get_active_subscriptions_for_stream_id(stream_id)
# We exclude long-term idle users, since they by definition have no active clients.
subscribers = subscribers.exclude(user_profile__long_term_idle=True)
subscriber_ids = [user.user_profile_id for user in subscribers]
users_to_notify = list(map(subscriber_info, subscriber_ids))
archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE
move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size)
event['message_type'] = message_type
send_event(realm, event, users_to_notify)
def do_delete_messages_by_sender(user: UserProfile) -> None:
message_ids = list(Message.objects.filter(sender=user).values_list('id', flat=True).order_by('id'))
if message_ids:
move_messages_to_archive(message_ids, chunk_size=retention.STREAM_MESSAGE_BATCH_SIZE)
def get_streams_traffic(stream_ids: Set[int]) -> Dict[int, int]:
stat = COUNT_STATS['messages_in_stream:is_bot:day']
traffic_from = timezone_now() - datetime.timedelta(days=28)
query = StreamCount.objects.filter(property=stat.property,
end_time__gt=traffic_from)
query = query.filter(stream_id__in=stream_ids)
traffic_list = query.values('stream_id').annotate(value=Sum('value'))
traffic_dict = {}
for traffic in traffic_list:
traffic_dict[traffic["stream_id"]] = traffic["value"]
return traffic_dict
def round_to_2_significant_digits(number: int) -> int:
return int(round(number, 2 - len(str(number))))
STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS = 7
def get_average_weekly_stream_traffic(stream_id: int, stream_date_created: datetime.datetime,
recent_traffic: Dict[int, int]) -> Optional[int]:
try:
stream_traffic = recent_traffic[stream_id]
except KeyError:
stream_traffic = 0
stream_age = (timezone_now() - stream_date_created).days
if stream_age >= 28:
average_weekly_traffic = int(stream_traffic // 4)
elif stream_age >= STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS:
average_weekly_traffic = int(stream_traffic * 7 // stream_age)
else:
return None
if average_weekly_traffic == 0 and stream_traffic > 0:
average_weekly_traffic = 1
return round_to_2_significant_digits(average_weekly_traffic)
SubHelperT = Tuple[List[Dict[str, Any]], List[Dict[str, Any]], List[Dict[str, Any]]]
def get_web_public_subs(realm: Realm) -> SubHelperT:
color_idx = 0
def get_next_color() -> str:
nonlocal color_idx
color = STREAM_ASSIGNMENT_COLORS[color_idx]
color_idx = (color_idx + 1) % len(STREAM_ASSIGNMENT_COLORS)
return color
subscribed = []
for stream in Stream.objects.filter(realm=realm, is_web_public=True, deactivated=False):
stream_dict = stream.to_dict()
# Add versions of the Subscription fields based on a simulated
# new user subscription set.
stream_dict['is_muted'] = False
stream_dict['color'] = get_next_color()
stream_dict['desktop_notifications'] = True
stream_dict['audible_notifications'] = True
stream_dict['push_notifications'] = True
stream_dict['email_notifications'] = True
stream_dict['pin_to_top'] = False
stream_weekly_traffic = get_average_weekly_stream_traffic(stream.id,
stream.date_created,
{})
stream_dict['stream_weekly_traffic'] = stream_weekly_traffic
stream_dict['email_address'] = ''
subscribed.append(stream_dict)
return (subscribed, [], [])
# In general, it's better to avoid using .values() because it makes
# the code pretty ugly, but in this case, it has significant
# performance impact for loading / for users with large numbers of
# subscriptions, so it's worth optimizing.
def gather_subscriptions_helper(user_profile: UserProfile,
include_subscribers: bool=True) -> SubHelperT:
sub_dicts = get_stream_subscriptions_for_user(user_profile).values(
*Subscription.API_FIELDS, "recipient_id").order_by("recipient_id")
sub_dicts = list(sub_dicts)
sub_recipient_ids = [
sub['recipient_id']
for sub in sub_dicts
]
stream_recipient = StreamRecipientMap()
stream_recipient.populate_for_recipient_ids(sub_recipient_ids)
stream_ids: Set[int] = set()
for sub in sub_dicts:
sub['stream_id'] = stream_recipient.stream_id_for(sub['recipient_id'])
stream_ids.add(sub['stream_id'])
recent_traffic = get_streams_traffic(stream_ids=stream_ids)
all_streams = get_active_streams(user_profile.realm).select_related(
"realm").values(
*Stream.API_FIELDS,
# date_created is used as an input for the stream_weekly_traffic computed field.
"date_created",
# The realm_id and recipient_id are generally not needed in the API.
"realm_id",
"recipient_id",
# email_token isn't public to some users with access to
# the stream, so doesn't belong in API_FIELDS.
"email_token")
stream_dicts = [stream for stream in all_streams if stream['id'] in stream_ids]
stream_hash = {}
for stream in stream_dicts:
stream_hash[stream["id"]] = stream
all_streams_id = [stream["id"] for stream in all_streams]
subscribed = []
unsubscribed = []
never_subscribed = []
# Deactivated streams aren't in stream_hash.
streams = [stream_hash[sub["stream_id"]] for sub in sub_dicts
if sub["stream_id"] in stream_hash]
streams_subscribed_map = {sub["stream_id"]: sub["active"] for sub in sub_dicts}
# Add never subscribed streams to streams_subscribed_map
streams_subscribed_map.update({stream['id']: False for stream in all_streams if stream not in streams})
if include_subscribers:
subscriber_map: Mapping[int, Optional[List[int]]] = bulk_get_subscriber_user_ids(
all_streams,
user_profile,
streams_subscribed_map,
stream_recipient,
)
else:
# If we're not including subscribers, always return None,
# which the below code needs to check for anyway.
subscriber_map = defaultdict(lambda: None)
sub_unsub_stream_ids = set()
for sub in sub_dicts:
sub_unsub_stream_ids.add(sub["stream_id"])
stream = stream_hash.get(sub["stream_id"])
if not stream:
# This stream has been deactivated, don't include it.
continue
# We first construct a dictionary based on the standard Stream
# and Subscription models' API_FIELDS.
stream_dict = {}
for field_name in Stream.API_FIELDS:
if field_name == "id":
stream_dict['stream_id'] = stream["id"]
continue
stream_dict[field_name] = stream[field_name]
# Copy Subscription.API_FIELDS except for "active", which is
# used to determine where to the put the field.
for field_name in Subscription.API_FIELDS:
stream_dict[field_name] = sub[field_name]
# Backwards-compatibility for clients that haven't been
# updated for the in_home_view => is_muted API migration.
stream_dict['in_home_view'] = not stream_dict['is_muted']
# Backwards-compatibility for clients that haven't been
# updated for the is_announcement_only -> stream_post_policy
# migration.
stream_dict['is_announcement_only'] = \
stream['stream_post_policy'] == Stream.STREAM_POST_POLICY_ADMINS
# Add a few computed fields not directly from the data models.
stream_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic(
stream["id"], stream["date_created"], recent_traffic)
stream_dict['email_address'] = encode_email_address_helper(
stream["name"], stream["email_token"], show_sender=True)
# Construct and add subscribers data
subscribers: Optional[List[int]] = subscriber_map[stream["id"]]
# Important: don't show the subscribers if the stream is invite only
# and this user isn't on it anymore (or a realm administrator).
if stream["invite_only"] and not (sub["active"] or user_profile.is_realm_admin):
subscribers = None
# Guest users lose access to subscribers when they are unsubscribed.
if not sub["active"] and user_profile.is_guest:
subscribers = None
if subscribers is not None:
stream_dict['subscribers'] = subscribers
# is_active is represented in this structure by which list we include it in.
is_active = stream_dict.pop("active")
if is_active:
subscribed.append(stream_dict)
else:
unsubscribed.append(stream_dict)
all_streams_id_set = set(all_streams_id)
if user_profile.can_access_public_streams():
never_subscribed_stream_ids = all_streams_id_set - sub_unsub_stream_ids
else:
never_subscribed_stream_ids = set()
never_subscribed_streams = [ns_stream_dict for ns_stream_dict in all_streams
if ns_stream_dict['id'] in never_subscribed_stream_ids]
for stream in never_subscribed_streams:
is_public = (not stream['invite_only'])
if is_public or user_profile.is_realm_admin:
stream_dict = {}
for field_name in Stream.API_FIELDS:
if field_name == "id":
stream_dict['stream_id'] = stream["id"]
continue
stream_dict[field_name] = stream[field_name]
stream_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic(
stream["id"], stream["date_created"], recent_traffic)
# Backwards-compatibility addition of removed field.
stream_dict['is_announcement_only'] = \
stream['stream_post_policy'] == Stream.STREAM_POST_POLICY_ADMINS
if is_public or user_profile.is_realm_admin:
subscribers = subscriber_map[stream["id"]]
if subscribers is not None:
stream_dict['subscribers'] = subscribers
never_subscribed.append(stream_dict)
return (sorted(subscribed, key=lambda x: x['name']),
sorted(unsubscribed, key=lambda x: x['name']),
sorted(never_subscribed, key=lambda x: x['name']))
def gather_subscriptions(
user_profile: UserProfile,
include_subscribers: bool=False,
) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
subscribed, unsubscribed, _ = gather_subscriptions_helper(
user_profile, include_subscribers=include_subscribers)
if include_subscribers:
user_ids = set()
for subs in [subscribed, unsubscribed]:
for sub in subs:
if 'subscribers' in sub:
for subscriber in sub['subscribers']:
user_ids.add(subscriber)
email_dict = get_emails_from_user_ids(list(user_ids))
for subs in [subscribed, unsubscribed]:
for sub in subs:
if 'subscribers' in sub:
sub['subscribers'] = sorted([
email_dict[user_id] for user_id in sub['subscribers']
])
return (subscribed, unsubscribed)
def get_active_presence_idle_user_ids(realm: Realm,
sender_id: int,
message_type: str,
active_user_ids: Set[int],
user_flags: Dict[int, List[str]]) -> List[int]:
'''
Given a list of active_user_ids, we build up a subset
of those users who fit these criteria:
* They are likely to need notifications (either due
to mentions, alert words, or being PM'ed).
* They are no longer "present" according to the
UserPresence table.
'''
if realm.presence_disabled:
return []
is_pm = message_type == 'private'
user_ids = set()
for user_id in active_user_ids:
flags: Iterable[str] = user_flags.get(user_id, [])
mentioned = 'mentioned' in flags or 'wildcard_mentioned' in flags
private_message = is_pm and user_id != sender_id
alerted = 'has_alert_word' in flags
if mentioned or private_message or alerted:
user_ids.add(user_id)
return filter_presence_idle_user_ids(user_ids)
def filter_presence_idle_user_ids(user_ids: Set[int]) -> List[int]:
# Given a set of user IDs (the recipients of a message), accesses
# the UserPresence table to determine which of these users are
# currently idle and should potentially get email notifications
# (and push notifications with with
# user_profile.enable_online_push_notifications=False).
#
# We exclude any presence data from ZulipMobile for the purpose of
# triggering these notifications; the mobile app can more
# effectively do its own client-side filtering of notification
# sounds/etc. for the case that the user is actively doing a PM
# conversation in the app.
if not user_ids:
return []
# Matches presence.js constant
OFFLINE_THRESHOLD_SECS = 140
recent = timezone_now() - datetime.timedelta(seconds=OFFLINE_THRESHOLD_SECS)
rows = UserPresence.objects.filter(
user_profile_id__in=user_ids,
status=UserPresence.ACTIVE,
timestamp__gte=recent,
).exclude(client__name="ZulipMobile").distinct('user_profile_id').values('user_profile_id')
active_user_ids = {row['user_profile_id'] for row in rows}
idle_user_ids = user_ids - active_user_ids
return sorted(list(idle_user_ids))
def do_send_confirmation_email(invitee: PreregistrationUser,
referrer: UserProfile) -> str:
"""
Send the confirmation/welcome e-mail to an invited user.
"""
activation_url = create_confirmation_link(invitee, Confirmation.INVITATION)
context = {'referrer_full_name': referrer.full_name, 'referrer_email': referrer.delivery_email,
'activate_url': activation_url, 'referrer_realm_name': referrer.realm.name}
from_name = f"{referrer.full_name} (via Zulip)"
send_email('zerver/emails/invitation', to_emails=[invitee.email], from_name=from_name,
from_address=FromAddress.tokenized_no_reply_address(),
language=referrer.realm.default_language, context=context,
realm=referrer.realm)
return activation_url
def email_not_system_bot(email: str) -> None:
if is_cross_realm_bot_email(email):
msg = email_reserved_for_system_bots_error(email)
code = msg
raise ValidationError(
msg,
code=code,
params=dict(deactivated=False),
)
class InvitationError(JsonableError):
code = ErrorCode.INVITATION_FAILED
data_fields = ['errors', 'sent_invitations']
def __init__(self, msg: str, errors: List[Tuple[str, str, bool]],
sent_invitations: bool) -> None:
self._msg: str = msg
self.errors: List[Tuple[str, str, bool]] = errors
self.sent_invitations: bool = sent_invitations
def estimate_recent_invites(realms: Iterable[Realm], *, days: int) -> int:
'''An upper bound on the number of invites sent in the last `days` days'''
recent_invites = RealmCount.objects.filter(
realm__in=realms,
property='invites_sent::day',
end_time__gte=timezone_now() - datetime.timedelta(days=days),
).aggregate(Sum('value'))['value__sum']
if recent_invites is None:
return 0
return recent_invites
def check_invite_limit(realm: Realm, num_invitees: int) -> None:
'''Discourage using invitation emails as a vector for carrying spam.'''
msg = _("You do not have enough remaining invites. "
"Please contact {email} to have your limit raised. "
"No invitations were sent.").format(email=settings.ZULIP_ADMINISTRATOR)
if not settings.OPEN_REALM_CREATION:
return
recent_invites = estimate_recent_invites([realm], days=1)
if num_invitees + recent_invites > realm.max_invites:
raise InvitationError(msg, [], sent_invitations=False)
default_max = settings.INVITES_DEFAULT_REALM_DAILY_MAX
newrealm_age = datetime.timedelta(days=settings.INVITES_NEW_REALM_DAYS)
if realm.date_created <= timezone_now() - newrealm_age:
# If this isn't a "newly-created" realm, we're done. The
# remaining code applies an aggregate limit across all
# "new" realms, to address sudden bursts of spam realms.
return
if realm.max_invites > default_max:
# If a user is on a realm where we've bumped up
# max_invites, then we exempt them from invite limits.
return
new_realms = Realm.objects.filter(
date_created__gte=timezone_now() - newrealm_age,
_max_invites__lte=default_max,
).all()
for days, count in settings.INVITES_NEW_REALM_LIMIT_DAYS:
recent_invites = estimate_recent_invites(new_realms, days=days)
if num_invitees + recent_invites > count:
raise InvitationError(msg, [], sent_invitations=False)
def do_invite_users(user_profile: UserProfile,
invitee_emails: SizedTextIterable,
streams: Iterable[Stream],
invite_as: int=PreregistrationUser.INVITE_AS['MEMBER']) -> None:
check_invite_limit(user_profile.realm, len(invitee_emails))
realm = user_profile.realm
if not realm.invite_required:
# Inhibit joining an open realm to send spam invitations.
min_age = datetime.timedelta(days=settings.INVITES_MIN_USER_AGE_DAYS)
if (user_profile.date_joined > timezone_now() - min_age
and not user_profile.is_realm_admin):
raise InvitationError(
_("Your account is too new to send invites for this organization. "
"Ask an organization admin, or a more experienced user."),
[], sent_invitations=False)
good_emails: Set[str] = set()
errors: List[Tuple[str, str, bool]] = []
validate_email_allowed_in_realm = get_realm_email_validator(user_profile.realm)
for email in invitee_emails:
if email == '':
continue
email_error = validate_email_is_valid(
email,
validate_email_allowed_in_realm,
)
if email_error:
errors.append((email, email_error, False))
else:
good_emails.add(email)
'''
good_emails are emails that look ok so far,
but we still need to make sure they're not
gonna conflict with existing users
'''
error_dict = get_existing_user_errors(user_profile.realm, good_emails)
skipped: List[Tuple[str, str, bool]] = []
for email in error_dict:
msg, deactivated = error_dict[email]
skipped.append((email, msg, deactivated))
good_emails.remove(email)
validated_emails = list(good_emails)
if errors:
raise InvitationError(
_("Some emails did not validate, so we didn't send any invitations."),
errors + skipped, sent_invitations=False)
if skipped and len(skipped) == len(invitee_emails):
# All e-mails were skipped, so we didn't actually invite anyone.
raise InvitationError(_("We weren't able to invite anyone."),
skipped, sent_invitations=False)
# We do this here rather than in the invite queue processor since this
# is used for rate limiting invitations, rather than keeping track of
# when exactly invitations were sent
do_increment_logging_stat(user_profile.realm, COUNT_STATS['invites_sent::day'],
None, timezone_now(), increment=len(validated_emails))
# Now that we are past all the possible errors, we actually create
# the PreregistrationUser objects and trigger the email invitations.
for email in validated_emails:
# The logged in user is the referrer.
prereg_user = PreregistrationUser(email=email, referred_by=user_profile,
invited_as=invite_as,
realm=user_profile.realm)
prereg_user.save()
stream_ids = [stream.id for stream in streams]
prereg_user.streams.set(stream_ids)
event = {"prereg_id": prereg_user.id, "referrer_id": user_profile.id}
queue_json_publish("invites", event)
if skipped:
raise InvitationError(_("Some of those addresses are already using Zulip, "
"so we didn't send them an invitation. We did send "
"invitations to everyone else!"),
skipped, sent_invitations=True)
notify_invites_changed(user_profile)
def do_get_user_invites(user_profile: UserProfile) -> List[Dict[str, Any]]:
if user_profile.is_realm_admin:
prereg_users = filter_to_valid_prereg_users(
PreregistrationUser.objects.filter(referred_by__realm=user_profile.realm)
)
else:
prereg_users = filter_to_valid_prereg_users(
PreregistrationUser.objects.filter(referred_by=user_profile)
)
invites = []
for invitee in prereg_users:
invites.append(dict(email=invitee.email,
invited_by_user_id=invitee.referred_by.id,
invited=datetime_to_timestamp(invitee.invited_at),
id=invitee.id,
invited_as=invitee.invited_as,
is_multiuse=False))
if not user_profile.is_realm_admin:
# We do not return multiuse invites to non-admin users.
return invites
lowest_datetime = timezone_now() - datetime.timedelta(days=settings.INVITATION_LINK_VALIDITY_DAYS)
multiuse_confirmation_objs = Confirmation.objects.filter(realm=user_profile.realm,
type=Confirmation.MULTIUSE_INVITE,
date_sent__gte=lowest_datetime)
for confirmation_obj in multiuse_confirmation_objs:
invite = confirmation_obj.content_object
invites.append(dict(invited_by_user_id=invite.referred_by.id,
invited=datetime_to_timestamp(confirmation_obj.date_sent),
id=invite.id,
link_url=confirmation_url(confirmation_obj.confirmation_key,
user_profile.realm,
Confirmation.MULTIUSE_INVITE),
invited_as=invite.invited_as,
is_multiuse=True))
return invites
def do_create_multiuse_invite_link(referred_by: UserProfile, invited_as: int,
streams: Sequence[Stream] = []) -> str:
realm = referred_by.realm
invite = MultiuseInvite.objects.create(realm=realm, referred_by=referred_by)
if streams:
invite.streams.set(streams)
invite.invited_as = invited_as
invite.save()
notify_invites_changed(referred_by)
return create_confirmation_link(invite, Confirmation.MULTIUSE_INVITE)
def do_revoke_user_invite(prereg_user: PreregistrationUser) -> None:
email = prereg_user.email
# Delete both the confirmation objects and the prereg_user object.
# TODO: Probably we actually want to set the confirmation objects
# to a "revoked" status so that we can give the invited user a better
# error message.
content_type = ContentType.objects.get_for_model(PreregistrationUser)
Confirmation.objects.filter(content_type=content_type,
object_id=prereg_user.id).delete()
prereg_user.delete()
clear_scheduled_invitation_emails(email)
notify_invites_changed(prereg_user)
def do_revoke_multi_use_invite(multiuse_invite: MultiuseInvite) -> None:
content_type = ContentType.objects.get_for_model(MultiuseInvite)
Confirmation.objects.filter(content_type=content_type,
object_id=multiuse_invite.id).delete()
multiuse_invite.delete()
notify_invites_changed(multiuse_invite.referred_by)
def do_resend_user_invite_email(prereg_user: PreregistrationUser) -> int:
# These are two structurally for the caller's code path.
assert prereg_user.referred_by is not None
assert prereg_user.realm is not None
check_invite_limit(prereg_user.referred_by.realm, 1)
prereg_user.invited_at = timezone_now()
prereg_user.save()
do_increment_logging_stat(prereg_user.realm, COUNT_STATS['invites_sent::day'],
None, prereg_user.invited_at)
clear_scheduled_invitation_emails(prereg_user.email)
# We don't store the custom email body, so just set it to None
event = {"prereg_id": prereg_user.id, "referrer_id": prereg_user.referred_by.id, "email_body": None}
queue_json_publish("invites", event)
return datetime_to_timestamp(prereg_user.invited_at)
def notify_realm_emoji(realm: Realm) -> None:
event = dict(type="realm_emoji", op="update",
realm_emoji=realm.get_emoji())
send_event(realm, event, active_user_ids(realm.id))
def check_add_realm_emoji(realm: Realm,
name: str,
author: UserProfile,
image_file: File) -> Optional[RealmEmoji]:
realm_emoji = RealmEmoji(realm=realm, name=name, author=author)
realm_emoji.full_clean()
realm_emoji.save()
emoji_file_name = get_emoji_file_name(image_file.name, realm_emoji.id)
# The only user-controlled portion of 'emoji_file_name' is an extension,
# which can not contain '..' or '/' or '\', making it difficult to exploit
emoji_file_name = mark_sanitized(emoji_file_name)
emoji_uploaded_successfully = False
try:
upload_emoji_image(image_file, emoji_file_name, author)
emoji_uploaded_successfully = True
finally:
if not emoji_uploaded_successfully:
realm_emoji.delete()
return None
else:
realm_emoji.file_name = emoji_file_name
realm_emoji.save(update_fields=['file_name'])
notify_realm_emoji(realm_emoji.realm)
return realm_emoji
def do_remove_realm_emoji(realm: Realm, name: str) -> None:
emoji = RealmEmoji.objects.get(realm=realm, name=name, deactivated=False)
emoji.deactivated = True
emoji.save(update_fields=['deactivated'])
notify_realm_emoji(realm)
def notify_alert_words(user_profile: UserProfile, words: Iterable[str]) -> None:
event = dict(type="alert_words", alert_words=words)
send_event(user_profile.realm, event, [user_profile.id])
def do_add_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = add_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_remove_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = remove_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_mute_topic(user_profile: UserProfile, stream: Stream, recipient: Recipient, topic: str,
date_muted: Optional[datetime.datetime]=None) -> None:
if date_muted is None:
date_muted = timezone_now()
add_topic_mute(user_profile, stream.id, recipient.id, topic, date_muted)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_unmute_topic(user_profile: UserProfile, stream: Stream, topic: str) -> None:
remove_topic_mute(user_profile, stream.id, topic)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_mark_hotspot_as_read(user: UserProfile, hotspot: str) -> None:
UserHotspot.objects.get_or_create(user=user, hotspot=hotspot)
event = dict(type="hotspots", hotspots=get_next_hotspots(user))
send_event(user.realm, event, [user.id])
def notify_realm_filters(realm: Realm) -> None:
realm_filters = realm_filters_for_realm(realm.id)
event = dict(type="realm_filters", realm_filters=realm_filters)
send_event(realm, event, active_user_ids(realm.id))
# NOTE: Regexes must be simple enough that they can be easily translated to JavaScript
# RegExp syntax. In addition to JS-compatible syntax, the following features are available:
# * Named groups will be converted to numbered groups automatically
# * Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags
def do_add_realm_filter(realm: Realm, pattern: str, url_format_string: str) -> int:
pattern = pattern.strip()
url_format_string = url_format_string.strip()
realm_filter = RealmFilter(
realm=realm, pattern=pattern,
url_format_string=url_format_string)
realm_filter.full_clean()
realm_filter.save()
notify_realm_filters(realm)
return realm_filter.id
def do_remove_realm_filter(realm: Realm, pattern: Optional[str]=None,
id: Optional[int]=None) -> None:
if pattern is not None:
RealmFilter.objects.get(realm=realm, pattern=pattern).delete()
else:
RealmFilter.objects.get(realm=realm, pk=id).delete()
notify_realm_filters(realm)
def get_emails_from_user_ids(user_ids: Sequence[int]) -> Dict[int, str]:
# We may eventually use memcached to speed this up, but the DB is fast.
return UserProfile.emails_from_ids(user_ids)
def do_add_realm_domain(realm: Realm, domain: str, allow_subdomains: bool) -> (RealmDomain):
realm_domain = RealmDomain.objects.create(realm=realm, domain=domain,
allow_subdomains=allow_subdomains)
event = dict(type="realm_domains", op="add",
realm_domain=dict(domain=realm_domain.domain,
allow_subdomains=realm_domain.allow_subdomains))
send_event(realm, event, active_user_ids(realm.id))
return realm_domain
def do_change_realm_domain(realm_domain: RealmDomain, allow_subdomains: bool) -> None:
realm_domain.allow_subdomains = allow_subdomains
realm_domain.save(update_fields=['allow_subdomains'])
event = dict(type="realm_domains", op="change",
realm_domain=dict(domain=realm_domain.domain,
allow_subdomains=realm_domain.allow_subdomains))
send_event(realm_domain.realm, event, active_user_ids(realm_domain.realm_id))
def do_remove_realm_domain(realm_domain: RealmDomain, acting_user: Optional[UserProfile]=None) -> None:
realm = realm_domain.realm
domain = realm_domain.domain
realm_domain.delete()
if RealmDomain.objects.filter(realm=realm).count() == 0 and realm.emails_restricted_to_domains:
# If this was the last realm domain, we mark the realm as no
# longer restricted to domain, because the feature doesn't do
# anything if there are no domains, and this is probably less
# confusing than the alternative.
do_set_realm_property(realm, 'emails_restricted_to_domains', False, acting_user=acting_user)
event = dict(type="realm_domains", op="remove", domain=domain)
send_event(realm, event, active_user_ids(realm.id))
def get_occupied_streams(realm: Realm) -> QuerySet:
# TODO: Make a generic stub for QuerySet
""" Get streams with subscribers """
exists_expression = Exists(
Subscription.objects.filter(active=True, user_profile__is_active=True,
user_profile__realm=realm,
recipient_id=OuterRef('recipient_id')),
)
occupied_streams = Stream.objects.filter(realm=realm, deactivated=False) \
.annotate(occupied=exists_expression).filter(occupied=True)
return occupied_streams
def get_web_public_streams(realm: Realm) -> List[Dict[str, Any]]:
query = Stream.objects.filter(realm=realm, deactivated=False, is_web_public=True)
streams = Stream.get_client_data(query)
return streams
def do_get_streams(
user_profile: UserProfile, include_public: bool=True,
include_subscribed: bool=True, include_all_active: bool=False,
include_default: bool=False, include_owner_subscribed: bool=False,
) -> List[Dict[str, Any]]:
if include_all_active and not user_profile.is_api_super_user:
raise JsonableError(_("User not authorized for this query"))
include_public = include_public and user_profile.can_access_public_streams()
# Start out with all streams in the realm with subscribers
query = get_occupied_streams(user_profile.realm)
if include_all_active:
streams = Stream.get_client_data(query)
else:
# We construct a query as the or (|) of the various sources
# this user requested streams from.
query_filter: Optional[Q] = None
def add_filter_option(option: Q) -> None:
nonlocal query_filter
if query_filter is None:
query_filter = option
else:
query_filter |= option
if include_subscribed:
subscribed_stream_ids = get_subscribed_stream_ids_for_user(user_profile)
recipient_check = Q(id__in=set(subscribed_stream_ids))
add_filter_option(recipient_check)
if include_public:
invite_only_check = Q(invite_only=False)
add_filter_option(invite_only_check)
if include_owner_subscribed and user_profile.is_bot:
bot_owner = user_profile.bot_owner
assert bot_owner is not None
owner_stream_ids = get_subscribed_stream_ids_for_user(bot_owner)
owner_subscribed_check = Q(id__in=set(owner_stream_ids))
add_filter_option(owner_subscribed_check)
if query_filter is not None:
query = query.filter(query_filter)
streams = Stream.get_client_data(query)
else:
# Don't bother going to the database with no valid sources
streams = []
streams.sort(key=lambda elt: elt["name"])
if include_default:
is_default = {}
default_streams = get_default_streams_for_realm(user_profile.realm_id)
for default_stream in default_streams:
is_default[default_stream.id] = True
for stream in streams:
stream['is_default'] = is_default.get(stream["stream_id"], False)
return streams
def notify_attachment_update(user_profile: UserProfile, op: str,
attachment_dict: Dict[str, Any]) -> None:
event = {
'type': 'attachment',
'op': op,
'attachment': attachment_dict,
"upload_space_used": user_profile.realm.currently_used_upload_space_bytes(),
}
send_event(user_profile.realm, event, [user_profile.id])
def do_claim_attachments(message: Message, potential_path_ids: List[str]) -> bool:
claimed = False
for path_id in potential_path_ids:
user_profile = message.sender
is_message_realm_public = False
if message.is_stream_message():
is_message_realm_public = Stream.objects.get(id=message.recipient.type_id).is_public()
if not validate_attachment_request(user_profile, path_id):
# Technically, there are 2 cases here:
# * The user put something in their message that has the form
# of an upload, but doesn't correspond to a file that doesn't
# exist. validate_attachment_request will return None.
# * The user is trying to send a link to a file they don't have permission to
# access themselves. validate_attachment_request will return False.
#
# Either case is unusual and suggests a UI bug that got
# the user in this situation, so we log in these cases.
logging.warning(
"User %s tried to share upload %s in message %s, but lacks permission",
user_profile.id, path_id, message.id,
)
continue
claimed = True
attachment = claim_attachment(user_profile, path_id, message, is_message_realm_public)
notify_attachment_update(user_profile, "update", attachment.to_dict())
return claimed
def do_delete_old_unclaimed_attachments(weeks_ago: int) -> None:
old_unclaimed_attachments = get_old_unclaimed_attachments(weeks_ago)
for attachment in old_unclaimed_attachments:
delete_message_image(attachment.path_id)
attachment.delete()
def check_attachment_reference_change(message: Message) -> bool:
# For a unsaved message edit (message.* has been updated, but not
# saved to the database), adjusts Attachment data to correspond to
# the new content.
prev_attachments = {a.path_id for a in message.attachment_set.all()}
new_attachments = set(message.potential_attachment_path_ids)
if new_attachments == prev_attachments:
return bool(prev_attachments)
to_remove = list(prev_attachments - new_attachments)
if len(to_remove) > 0:
attachments_to_update = Attachment.objects.filter(path_id__in=to_remove).select_for_update()
message.attachment_set.remove(*attachments_to_update)
to_add = list(new_attachments - prev_attachments)
if len(to_add) > 0:
do_claim_attachments(message, to_add)
return message.attachment_set.exists()
def notify_realm_custom_profile_fields(realm: Realm, operation: str) -> None:
fields = custom_profile_fields_for_realm(realm.id)
event = dict(type="custom_profile_fields",
op=operation,
fields=[f.as_dict() for f in fields])
send_event(realm, event, active_user_ids(realm.id))
def try_add_realm_default_custom_profile_field(realm: Realm,
field_subtype: str) -> CustomProfileField:
field_data = DEFAULT_EXTERNAL_ACCOUNTS[field_subtype]
field = CustomProfileField(realm=realm, name=field_data['name'],
field_type=CustomProfileField.EXTERNAL_ACCOUNT,
hint=field_data['hint'],
field_data=ujson.dumps(dict(subtype=field_subtype)))
field.save()
field.order = field.id
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'add')
return field
def try_add_realm_custom_profile_field(realm: Realm, name: str, field_type: int,
hint: str='',
field_data: Optional[ProfileFieldData]=None) -> CustomProfileField:
field = CustomProfileField(realm=realm, name=name, field_type=field_type)
field.hint = hint
if (field.field_type == CustomProfileField.CHOICE or
field.field_type == CustomProfileField.EXTERNAL_ACCOUNT):
field.field_data = ujson.dumps(field_data or {})
field.save()
field.order = field.id
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'add')
return field
def do_remove_realm_custom_profile_field(realm: Realm, field: CustomProfileField) -> None:
"""
Deleting a field will also delete the user profile data
associated with it in CustomProfileFieldValue model.
"""
field.delete()
notify_realm_custom_profile_fields(realm, 'delete')
def do_remove_realm_custom_profile_fields(realm: Realm) -> None:
CustomProfileField.objects.filter(realm=realm).delete()
def try_update_realm_custom_profile_field(realm: Realm, field: CustomProfileField,
name: str, hint: str='',
field_data: Optional[ProfileFieldData]=None) -> None:
field.name = name
field.hint = hint
if (field.field_type == CustomProfileField.CHOICE or
field.field_type == CustomProfileField.EXTERNAL_ACCOUNT):
field.field_data = ujson.dumps(field_data or {})
field.save()
notify_realm_custom_profile_fields(realm, 'update')
def try_reorder_realm_custom_profile_fields(realm: Realm, order: List[int]) -> None:
order_mapping = {_[1]: _[0] for _ in enumerate(order)}
fields = CustomProfileField.objects.filter(realm=realm)
for field in fields:
if field.id not in order_mapping:
raise JsonableError(_("Invalid order mapping."))
for field in fields:
field.order = order_mapping[field.id]
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'update')
def notify_user_update_custom_profile_data(user_profile: UserProfile,
field: Dict[str, Union[int, str, List[int], None]]) -> None:
data = dict(id=field['id'])
if field['type'] == CustomProfileField.USER:
data["value"] = ujson.dumps(field['value'])
else:
data['value'] = field['value']
if field['rendered_value']:
data['rendered_value'] = field['rendered_value']
payload = dict(user_id=user_profile.id, custom_profile_field=data)
event = dict(type="realm_user", op="update", person=payload)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm.id))
def do_update_user_custom_profile_data_if_changed(user_profile: UserProfile,
data: List[Dict[str, Union[int, str, List[int]]]],
) -> None:
with transaction.atomic():
for field in data:
field_value, created = CustomProfileFieldValue.objects.get_or_create(
user_profile=user_profile,
field_id=field['id'])
if not created and field_value.value == str(field['value']):
# If the field value isn't actually being changed to a different one,
# and always_notify is disabled, we have nothing to do here for this field.
# Note: field_value.value is a TextField() so we need to cast field['value']
# to a string for the comparison in this if.
continue
field_value.value = field['value']
if field_value.field.is_renderable():
field_value.rendered_value = render_stream_description(str(field['value']))
field_value.save(update_fields=['value', 'rendered_value'])
else:
field_value.save(update_fields=['value'])
notify_user_update_custom_profile_data(user_profile, {
"id": field_value.field_id,
"value": field_value.value,
"rendered_value": field_value.rendered_value,
"type": field_value.field.field_type})
def check_remove_custom_profile_field_value(user_profile: UserProfile, field_id: int) -> None:
try:
field = CustomProfileField.objects.get(realm=user_profile.realm, id=field_id)
field_value = CustomProfileFieldValue.objects.get(field=field, user_profile=user_profile)
field_value.delete()
notify_user_update_custom_profile_data(user_profile, {'id': field_id,
'value': None,
'rendered_value': None,
'type': field.field_type})
except CustomProfileField.DoesNotExist:
raise JsonableError(_('Field id {id} not found.').format(id=field_id))
except CustomProfileFieldValue.DoesNotExist:
pass
def do_send_create_user_group_event(user_group: UserGroup, members: List[UserProfile]) -> None:
event = dict(type="user_group",
op="add",
group=dict(name=user_group.name,
members=[member.id for member in members],
description=user_group.description,
id=user_group.id,
),
)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def check_add_user_group(realm: Realm, name: str, initial_members: List[UserProfile],
description: str) -> None:
try:
user_group = create_user_group(name, initial_members, realm, description=description)
do_send_create_user_group_event(user_group, initial_members)
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '{}' already exists.").format(name))
def do_send_user_group_update_event(user_group: UserGroup, data: Dict[str, Any]) -> None:
event = dict(type="user_group", op='update', group_id=user_group.id, data=data)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def do_update_user_group_name(user_group: UserGroup, name: str) -> None:
try:
user_group.name = name
user_group.save(update_fields=['name'])
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '{}' already exists.").format(name))
do_send_user_group_update_event(user_group, dict(name=name))
def do_update_user_group_description(user_group: UserGroup, description: str) -> None:
user_group.description = description
user_group.save(update_fields=['description'])
do_send_user_group_update_event(user_group, dict(description=description))
def do_update_outgoing_webhook_service(bot_profile: UserProfile,
service_interface: int,
service_payload_url: str) -> None:
# TODO: First service is chosen because currently one bot can only have one service.
# Update this once multiple services are supported.
service = get_bot_services(bot_profile.id)[0]
service.base_url = service_payload_url
service.interface = service_interface
service.save()
send_event(bot_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=bot_profile.id,
services = [dict(base_url=service.base_url,
interface=service.interface,
token=service.token)],
),
),
bot_owner_user_ids(bot_profile))
def do_update_bot_config_data(bot_profile: UserProfile,
config_data: Dict[str, str]) -> None:
for key, value in config_data.items():
set_bot_config(bot_profile, key, value)
updated_config_data = get_bot_config(bot_profile)
send_event(bot_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=bot_profile.id,
services = [dict(config_data=updated_config_data)],
),
),
bot_owner_user_ids(bot_profile))
def get_service_dicts_for_bot(user_profile_id: int) -> List[Dict[str, Any]]:
user_profile = get_user_profile_by_id(user_profile_id)
services = get_bot_services(user_profile_id)
service_dicts: List[Dict[str, Any]] = []
if user_profile.bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [{'base_url': service.base_url,
'interface': service.interface,
'token': service.token,
}
for service in services]
elif user_profile.bot_type == UserProfile.EMBEDDED_BOT:
try:
service_dicts = [{'config_data': get_bot_config(user_profile),
'service_name': services[0].name,
}]
# A ConfigError just means that there are no config entries for user_profile.
except ConfigError:
pass
return service_dicts
def get_service_dicts_for_bots(bot_dicts: List[Dict[str, Any]],
realm: Realm) -> Dict[int, List[Dict[str, Any]]]:
bot_profile_ids = [bot_dict['id'] for bot_dict in bot_dicts]
bot_services_by_uid: Dict[int, List[Service]] = defaultdict(list)
for service in Service.objects.filter(user_profile_id__in=bot_profile_ids):
bot_services_by_uid[service.user_profile_id].append(service)
embedded_bot_ids = [bot_dict['id'] for bot_dict in bot_dicts
if bot_dict['bot_type'] == UserProfile.EMBEDDED_BOT]
embedded_bot_configs = get_bot_configs(embedded_bot_ids)
service_dicts_by_uid: Dict[int, List[Dict[str, Any]]] = {}
for bot_dict in bot_dicts:
bot_profile_id = bot_dict["id"]
bot_type = bot_dict["bot_type"]
services = bot_services_by_uid[bot_profile_id]
service_dicts: List[Dict[str, Any]] = []
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [{'base_url': service.base_url,
'interface': service.interface,
'token': service.token,
}
for service in services]
elif bot_type == UserProfile.EMBEDDED_BOT:
if bot_profile_id in embedded_bot_configs.keys():
bot_config = embedded_bot_configs[bot_profile_id]
service_dicts = [{'config_data': bot_config,
'service_name': services[0].name,
}]
service_dicts_by_uid[bot_profile_id] = service_dicts
return service_dicts_by_uid
def get_owned_bot_dicts(user_profile: UserProfile,
include_all_realm_bots_if_admin: bool=True) -> List[Dict[str, Any]]:
if user_profile.is_realm_admin and include_all_realm_bots_if_admin:
result = get_bot_dicts_in_realm(user_profile.realm)
else:
result = UserProfile.objects.filter(realm=user_profile.realm, is_bot=True,
bot_owner=user_profile).values(*bot_dict_fields)
services_by_ids = get_service_dicts_for_bots(result, user_profile.realm)
return [{'email': botdict['email'],
'user_id': botdict['id'],
'full_name': botdict['full_name'],
'bot_type': botdict['bot_type'],
'is_active': botdict['is_active'],
'api_key': botdict['api_key'],
'default_sending_stream': botdict['default_sending_stream__name'],
'default_events_register_stream': botdict['default_events_register_stream__name'],
'default_all_public_streams': botdict['default_all_public_streams'],
'owner_id': botdict['bot_owner__id'],
'avatar_url': avatar_url_from_dict(botdict),
'services': services_by_ids[botdict['id']],
}
for botdict in result]
def do_send_user_group_members_update_event(event_name: str,
user_group: UserGroup,
user_ids: List[int]) -> None:
event = dict(type="user_group",
op=event_name,
group_id=user_group.id,
user_ids=user_ids)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def bulk_add_members_to_user_group(user_group: UserGroup,
user_profiles: List[UserProfile]) -> None:
memberships = [UserGroupMembership(user_group_id=user_group.id,
user_profile=user_profile)
for user_profile in user_profiles]
UserGroupMembership.objects.bulk_create(memberships)
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event('add_members', user_group, user_ids)
def remove_members_from_user_group(user_group: UserGroup,
user_profiles: List[UserProfile]) -> None:
UserGroupMembership.objects.filter(
user_group_id=user_group.id,
user_profile__in=user_profiles).delete()
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event('remove_members', user_group, user_ids)
def do_send_delete_user_group_event(realm: Realm, user_group_id: int,
realm_id: int) -> None:
event = dict(type="user_group",
op="remove",
group_id=user_group_id)
send_event(realm, event, active_user_ids(realm_id))
def check_delete_user_group(user_group_id: int, user_profile: UserProfile) -> None:
user_group = access_user_group_by_id(user_group_id, user_profile)
user_group.delete()
do_send_delete_user_group_event(user_profile.realm, user_group_id, user_profile.realm.id)
def do_send_realm_reactivation_email(realm: Realm) -> None:
url = create_confirmation_link(realm, Confirmation.REALM_REACTIVATION)
context = {'confirmation_url': url,
'realm_uri': realm.uri,
'realm_name': realm.name}
language = realm.default_language
send_email_to_admins(
'zerver/emails/realm_reactivation', realm,
from_address=FromAddress.tokenized_no_reply_address(),
from_name=FromAddress.security_email_from_name(language=language),
language=language, context=context)
def do_set_zoom_token(user: UserProfile, token: Optional[Dict[str, object]]) -> None:
user.zoom_token = token
user.save(update_fields=["zoom_token"])
send_event(
user.realm, dict(type="has_zoom_token", value=token is not None), [user.id],
)
def notify_realm_export(user_profile: UserProfile) -> None:
# In the future, we may want to send this event to all realm admins.
event = dict(type='realm_export',
exports=get_realm_exports_serialized(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_delete_realm_export(user_profile: UserProfile, export: RealmAuditLog) -> None:
# Give mypy a hint so it knows `ujson.loads`
# isn't being passed an `Optional[str]`.
export_extra_data = export.extra_data
assert export_extra_data is not None
export_data = ujson.loads(export_extra_data)
export_path = export_data.get('export_path')
if export_path:
# Allow removal even if the export failed.
delete_export_tarball(export_path)
export_data.update({'deleted_timestamp': timezone_now().timestamp()})
export.extra_data = ujson.dumps(export_data)
export.save(update_fields=['extra_data'])
notify_realm_export(user_profile)
def get_topic_messages(user_profile: UserProfile, stream: Stream,
topic_name: str) -> List[Message]:
query = UserMessage.objects.filter(
user_profile=user_profile,
message__recipient=stream.recipient,
).order_by("id")
return [um.message for um in filter_by_topic_name_via_message(query, topic_name)]
| zerver/lib/actions.py | 257,820 | The Django ORM is too slow for bulk operations. This class
is optimized for the simple use case of inserting a bunch of
rows into zerver_usermessage.
Create a message object and checks it, but doesn't send it or save it to the database.
The internal function that calls this can therefore batch send a bunch of created
messages together as one database query.
Call do_send_messages with a list of the return values of this method.
Give you the last ONBOARDING_TOTAL_MESSAGES messages on your public
streams, so you have something to look at in your home view once
you finish the tutorial. The most recent ONBOARDING_UNREAD_MESSAGES
are marked unread.
sub_dict maps stream_id => whether the user is subscribed to that stream.
Doing bulk inserts this way is much faster than using Django,
since we don't have any ORM overhead. Profiling with 1000
users shows a speedup of 0.436 -> 0.027 seconds, so we're
talking about a 15x speedup.
Verifies that the user's proposed full name is valid. The caller
is responsible for checking check permissions. Returns the new
full name, which may differ from what was passed in (because this
function strips whitespace).
Discourage using invitation emails as a vector for carrying spam.
See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
Takes in a UserProfile object, the name of a global notification
preference to update, and the value to update to
Deactivate this realm. Do NOT deactivate the users -- we need to be able to
tell the difference between users that were intentionally deactivated,
e.g. by a realm admin, and users who can't currently use Zulip because their
realm has been deactivated.
Deleting a field will also delete the user profile data
associated with it in CustomProfileFieldValue model.
Send the confirmation/welcome e-mail to an invited user.
See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
Takes in a realm object, the name of an attribute to update, the
value to update and and the user who initiated the update.
The main function for message editing. A message edit event can
modify:
* the message's content (in which case the caller will have
set both content and rendered_content),
* the topic, in which case the caller will have set topic_name
* or both
With topic edits, propagate_mode determines whether other message
also have their topics edited.
An upper bound on the number of invites sent in the last `days` days
Given a list of active_user_ids, we build up a subset
of those users who fit these criteria:
* They are likely to need notifications (either due
to mentions, alert words, or being PM'ed).
* They are no longer "present" according to the
UserPresence table.
Only includes users on the explicit message to line
Get streams with subscribers
altered_user_ids is the user_ids that we are adding/removing
subscribed_user_ids is the already-subscribed user_ids
Based on stream policy, we notify the correct bystanders, while
not notifying altered_users (who get subscribers via another event)
Build a query to get the subscribers list for a stream, raising a JsonableError if:
'realm' is optional in stream.
The caller can refine this query with select_related(), values(), etc. depending
on whether it wants objects or just certain fields
See _internal_prep_message for details of how this works.
See _internal_prep_message for details of how this works.
See _internal_prep_message for details of how this works.
If a bot sends a message to a stream that doesn't exist or has no
subscribers, sends a notification to the bot owner (if not a
cross-realm bot) so that the owner can correct the issue.
Sends a PM error notification to a bot's owner if one hasn't already
been sent in the last 5 minutes.
Updates the message as stored in the to_dict cache (for serving
messages).
Validates whether the user can view the subscribers of a stream. Raises a JsonableError if:
* The user and the stream are in different realms
* The realm is MIT and the stream is not invite only.
* The stream is invite only, requesting_user is passed, and that user
does not subscribe to the stream.
Helper for validate_user_access_to_subscribers that doesn't require
a full stream object. This function is a bit hard to read,
because it is carefully optimized for performance in the two code
paths we call it from:
* In `bulk_get_subscriber_user_ids`, we already know whether the
user was subscribed via `sub_dict`, and so we want to avoid a
database query at all (especially since it calls this in a loop);
* In `validate_user_access_to_subscribers`, we want to only check
if the user is subscribed when we absolutely have to, since it
costs a database query.
The `check_user_subscribed` argument is a function that reports
whether the user is subscribed to the stream.
Note also that we raise a ValidationError in cases where the
caller is doing the wrong thing (maybe these should be
AssertionErrors), and JsonableError for 400 type errors.
This will be used to type annotate parameters in a function if the function works on both str and unicode in python 2 but in python 3 it only works on str. Store an event in the log for re-importing messages return user ids of users who can access the attributes of a stream, such as its name/description. For a public stream, this is everyone in the realm except unsubscribed guest users for a private stream, it's subscribers plus realm admins. TODO: Find similar queries elsewhere and de-duplicate this code. This one-liner helps us work around a lint rule. Send notification to realm signup notifications stream if it exists Don't send notification for the first user in a realm We also send a notification to the Zulip administrative realm Check whether the stream exists We intentionally use the same strings as above to avoid translation burden. If the signups stream hasn't been created in the admin realm, don't auto-create it to send to it; just do nothing. Handle the race condition where a message arrives between bulk_add_subscriptions above and the Message query just above Mark the newest ONBOARDING_UNREAD_MESSAGES as unread. Does the processing for a new user account: * Subscribes to default/invitation streams * Fills in some recent historical messages * Notifies other users in realm and Zulip about the signup * Deactivates PreregistrationUser objects * subscribe the user to newsletter if newsletter_data is specified If the user's invitation didn't explicitly list some streams, we add the default streams mit_beta_users don't have a referred_by field This is a cross-realm private message. Mark any other PreregistrationUsers that are STATUS_ACTIVE as inactive so we can keep track of the PreregistrationUser we actually used for analytics Clear any scheduled invitation emails to prevent them from being sent after the user is created. We have an import loop here; it's intentional, because we want to keep all the onboarding code in zerver/lib/onboarding.py. If the user was created automatically via the API, we may not want to register them for the newsletter Since we don't know what the client supports at this point in the code, we just assume client_gravatar and user_avatar_url_field_optional = False :( We assume there's no custom profile field data for a new user; initial values are expected to be added in a later event. Set the owner key only when the bot has an owner. The default bots don't have an owner. So don't set the owner key while reactivating them. Note that for bots, the caller will send an additional event with bot-specific info like services. Unlike do_activate_user, this is meant for re-activating existing users, so it doesn't reset their password, etc. We use real email addresses on UserProfile.email only if EMAIL_ADDRESS_VISIBILITY_EVERYONE is configured, so changes between values that will not require changing that field, so we can save work and return here. TODO: Design a bulk event for this or force-reload all clients Don't deactivate the users, but do delete their sessions so they get bumped to the login screen, where they'll get a realm deactivation notice when they try to log in. nocoverage For zephyr mirror users, we need to make them a mirror dummy again; otherwise, other users won't get the correct behavior when trying to send messages to this person inside Zulip. Ideally, we need to also ensure their zephyr mirroring bot isn't running, but that's a separate issue. Get the affected user ids *before* we deactivate everybody. Preserve as much as possible the original stream name while giving it a special prefix that both indicates that the stream is deactivated and frees up the original name for reuse. This stream has already been deactivated, keep prepending !s until we have a unique stream name or you've hit a rename limit. If you don't have a unique name at this point, this will fail later in the code path. If this is a default stream, remove it, properly sending a notification to browser clients. Remove the old stream information from remote cache. We notify just the target user (and eventually org admins, only when email_address_visibility=EMAIL_ADDRESS_VISIBILITY_ADMINS) about their new delivery email, since that field is private. If the user is using Gravatar to manage their email address, their Gravatar just changed, and we need to notify other clients. Additionally, if we're also changing the publicly visible email, we send a new_email event as well. Forge a user for this person The sender and recipient may be the same id, so de-duplicate using a set. Anybody calling us w/r/t a stream message needs to supply stream_topic. We may eventually want to have different versions of this function for different message types. This implements the structure that the UserProfile stream notification settings are defaults, which can be overridden by the stream-level settings (if those values are not null). Note: muting a stream overrides stream_push_notify Note: muting a stream overrides stream_email_notify If there's a possible wildcard mention, we need to determine which users would receive a wildcard mention notification for this message should the message indeed contain a wildcard mention. We don't have separate values for push/email notifications here; at this stage, we're just determining whether this wildcard mention should be treated as a mention (and follow the user's mention notification preferences) or a normal message. Important note: Because we haven't rendered markdown yet, we don't yet know which of these possibly-mentioned users was actually mentioned in the message (in other words, the mention syntax might have been in a code block or otherwise escaped). `get_ids_for` will filter these extra user rows for our data structures not related to bots query_for_ids is fast highly optimized for large queries, and we need this codepath to be fast (it's part of sending messages) TODO: We should always have at least one user_id as a recipient of any message we send. Right now the exception to this rule is `notify_new_user`, which, at least in a possibly contrived test scenario, can attempt to send messages to an inactive bot. When we plug that hole, we can avoid this `else` clause and just `assert(user_ids)`. UPDATE: It's February 2020 (and a couple years after the above comment was written). We have simplified notify_new_user so that it should be a little easier to reason about. There is currently some cleanup to how we handle cross realm bots that is still under development. Once that effort is complete, we should be able to address this to-do. Service bots don't get UserMessage rows. These two bot data structures need to filter from the full set of users who either are receiving the message or might have been mentioned in it, and so can't use get_ids_for. Further in the do_send_messages code path, once `mentioned_user_ids` has been computed via markdown, we'll filter these data structures for just those users who are either a direct recipient or were mentioned; for now, we're just making sure we have the data we need for that without extra database queries. Avoid infinite loops by preventing messages sent by bots from generating Service events. Important note: service_bot_tuples may contain service bots who were not actually mentioned in the message (e.g. if mention syntax for that bot appeared in a code block). Thus, it is important to filter any users who aren't part of either mentioned_user_ids (the actual mentioned users) or active_user_ids (the actual recipients). So even though this is implied by the logic below, we filter these not-actually-mentioned users here, to help keep this function future-proof. Mention triggers, for stream messages PM triggers for personal and huddle messages Filter out messages which didn't pass internal_prep_message properly Filter out zephyr mirror anomalies where the message was already sent For consistency, changes to the default values for these gets should also be applied to the default args in do_send_message Render our messages. Add members of the mentioned user groups into `mentions_user_ids`. Only send data to Tornado about wildcard mentions if message rendering determined the message had an actual wildcard mention in it (and not e.g. wildcard mention syntax inside a code block). Save the message receipts in the database Claim attachments in message Service bots (outgoing webhook bots and embedded bots) don't store UserMessage rows; they will be processed later. assert needed because stubs for django are missing Deliver events to the real-time push system, as well as enqueuing any additional processing triggered by the message. Note: This is where authorization for single-stream get_updates happens! We only attach stream data to the notify new_message request if it's a public stream, ensuring that in the tornado server, non-public stream messages are only associated to their subscribed users. assert needed because stubs for django are missing Note that this does not preserve the order of message ids returned. In practice, this shouldn't matter, as we only mirror single zephyr messages at a time and don't otherwise intermingle sending zephyr messages with other messages. These properties on the Message are set via render_markdown by code in the markdown inline patterns For long_term_idle (aka soft-deactivated) users, we are allowed to optimize by lazily not creating UserMessage rows that would have the default 0 flag set (since the soft-reactivation logic knows how to create those when the user comes back). We need to create the UserMessage rows for these long_term_idle users non-lazily in a few cases: * There are nonzero flags (e.g. the user was mentioned), since that case is rare and this saves a lot of complexity in soft-reactivation. * If the user is going to be notified (e.g. they get push/email notifications for every message on a stream), since in that case the notifications code will call `access_message` on the message to re-verify permissions, and for private streams, will get an error if the UserMessage row doesn't exist yet. See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.htmlsoft-deactivation for details on this system. TODO: We plan to remove this redundant user_dict object once clients are updated to support accessing use user_id. See https://github.com/zulip/zulip/pull/14711 for details. Update the cached message since new reaction is added. Recipients for message update events, including reactions, are everyone who got the original message. This means reactions won't live-update in preview narrows, but it's the right performance tradeoff, since otherwise we'd need to send all reactions to public stream messages to every browser for every client in the organization, which doesn't scale. However, to ensure that reactions do live-update for any user who has actually participated in reacting to a message, we add a "historical" UserMessage row for any user who reacts to message, subscribing them to future notifications. nocoverage This can happen when a race results in the check in views code not catching an attempt to double-add a reaction, or perhaps if the emoji_name/emoji_code mapping is busted. Include a list of recipients in the event body to help identify where the typing is happening Only deliver the notification to active user recipients check_send_typing_notification: Checks the typing notification and sends it If any of the user_ids being sent in are invalid, we will just reject the whole request, since a partial list of user_ids can create confusion related to huddles. Plus it's a good sign that a client is confused (or possibly even malicious) if we get bad user_ids. We include cross-bot realms as possible recipients, so that clients can know which huddle conversation is relevant here. Avoid mutating the passed in list of recipient_profiles. In our mirroring integrations with some third-party protocols, bots subscribed to the third-party protocol forward to Zulip messages that they received in the third-party service. The permissions model for that forwarding is that users can only submit to Zulip private messages they personally received, and here we do the check for whether forwarder_user_profile is among the private message recipients of the message. If the private message is just between the sender and another person, force it to be a personal internally Otherwise, we need a huddle. Make sure the sender is included in huddle messages We exempt cross-realm bots from the check that all the recipients are in the same realm. For huddle messages, we use a 10-second window because the timestamps aren't guaranteed to actually match between two copies of the same message. Users can pass stream name as either an id or a name, and if they choose to pass a name, they may JSON encode it for legacy reasons. If there was no JSON encoding, then we just have a raw stream name. We should stop supporting this odd use case once we improve our documentation. nocoverage We had a JSON-encoded stream name. We had a stream id. We try to accept multiple incoming formats for recipients. See test_extract_recipients() for examples of what we allow. We don't complain about empty message recipients here check_send_message: Returns the id of the sent message. Has same argspec as check_message. Don't send these notifications for cross-realm bot messages (e.g. from EMAIL_GATEWAY_BOT) since the owner for EMAIL_GATEWAY_BOT is probably the server administrator, not the owner of the bot who could potentially fix the problem. We warn the user once every 5 minutes to avoid a flood of PMs on a misconfigured integration, re-using the UserProfile.last_reminder field, which is not used for bots. We allow PMs only between users and bots, to avoid breaking the tutorial as well as automated notifications from system bots to users. check_message: Returns message ready for sending with do_send_message on success or the error message (string) on error. This will raise JsonableError if there are problems. API Super-users who set the `forged` flag are allowed to forge messages sent by any user, so we disable the `forwarded_mirror_message` security check in that case. This is defensive code--Addressee already validates the message type. Forged messages come with a timestamp We render messages later in the process. Remove any null bytes from the content If we have a stream name, and the stream doesn't exist, we create it here (though this code path should probably be removed eventually, moving that responsibility to the caller). If addressee.stream_name() is None (i.e. we're sending to a stream by ID), we skip this, as the stream object must already exist. These colors are shared with the palette in subs.js. We use a lambda here so that we only compute whether the user is subscribed if we have to Guest users can access subscribed public stream's subscribers We could put an AssertionError here; in that we don't have any code paths that would allow a guest user to access other streams in the first place. Organization administrators can view subscribers for all streams. TODO: Make a generic stub for QuerySet Note that non-active users may still have "active" subscriptions, because we want to be able to easily reactivate them with their old subscriptions. This is why the query here has to look at the UserProfile.is_active flag. Skip the "active" field, it's implied by context Send a notification to the user who subscribed. PRIVATE STREAMS Realm admins can access all private stream subscribers. Send them an event even if they aren't subscribed to stream. PUBLIC STREAMS We now do "peer_add" or "peer_remove" events even for streams users were never subscribed to, in order for the neversubscribed structure to stay up-to-date. We generally use this function to populate RealmAuditLog, and the max id here is actually systemwide, not per-realm. I assume there's some advantage in not filtering by realm. During initial realm creation, there might be 0 messages in the database; in that case, the `aggregate` query returns None. Since we want an int for "beginning of time", use -1. Mark the sub as active, without saving, so that pick_color will consider this to be an active subscription when picking colors TODO: XXX: This transaction really needs to be done at the serializeable transaction isolation level. Log Subscription Activities in RealmAuditLog Now since we have all log objects generated we can do a bulk insert Notify all existing users on streams that users have joined First, get all users subscribed to the streams that we care about We fetch all subscription information upfront, as it's used throughout the following code and we want to minize DB queries We now send several types of events to notify browsers. The first batch is notifications to users on invite-only streams that the stream exists. Users newly added to invite-only streams need a `create` notification. The former, because they need the stream to exist before they get the "subscribe" notification, and the latter so they can manage the new stream. Realm admins already have all created private streams. The second batch is events for the users themselves that they were subscribed to the new streams. The second batch is events for other users who are tracking the subscribers lists of streams in their browser; everyone for public streams and only existing subscribers for private streams. nocoverage Exclude e.g. zulip.abcd1234.ogg (generated by production hash-naming) to avoid spurious duplicates. This loop just flattens out our data into big lists for bulk operations. TODO: XXX: This transaction really needs to be done at the serializeable transaction isolation level. Log Subscription Activities in RealmAuditLog Now since we have all log objects generated we can do a bulk insert Deactivate any newly-vacant private streams For this property, is_muted is used in the database, but in_home_view in the API, since we haven't migrated the events API to the new name yet. Our web app will try to patch full_name even if the user didn't modify the name in the form. We just silently ignore those situations. Can't use update_fields because of how the foreign key works. For admins, update event is sent instead of delete/add event. bot_data of admin contains all the bots and none of them should be removed/(added again). Delete the bot from previous owner's bot data. Do not send update event for previous bot owner. Notify the new owner that the bot has been added. Do not send update event for bot_owner. Since `bot_owner_id` is included in the user profile dict we need to update the users dict with the new bot owner id We need to explicitly delete the old API key from our caches, because the on-save handler for flushing the UserProfile object in zerver/lib/cache.py only has access to the new API key. Even clients using client_gravatar don't need the email, since we're sending the URL anyway. type: ignore[assignment] Apparent mypy bug with Optional[int] setter. Backwards-compatibility code: We removed the is_announcement_only property in early 2020, but we send a duplicate event for legacy mobile clients that might want the data. Update the display recipient and stream, which are easy single items to set. Delete cache entries for everything else, which is cheaper and clearer than trying to set them. display_recipient is the out of date field in all cases. We will tell our users to essentially update stream.name = new_name where name = old_name and update stream.email = new_email where name = old_name. We could optimize this by trying to send one message, but the client code really wants one property update at a time, and updating stream names is a pretty infrequent operation. More importantly, we want to key these updates by id, not name, since id is the immutable primary key, and obviously name is not. Even though the token doesn't change, the web client needs to update the email forwarding address to display the correctly-escaped new name. Create stream once Realm object has been saved With the current initial streams situation, the only public stream is the notifications_stream. Log the event Send a notification to the admin realm nocoverage If the signups stream hasn't been created in the admin realm, don't auto-create it to send to it; just do nothing. Disabling digest emails should clear a user's email queue Updates to the timezone display setting are sent to all users Right now default streams are realm-wide. This wrapper gives us flexibility to some day further customize how we set up default streams for new users. returns default streams in json serializeable format This code isn't perfect, because with various races we might end up creating two overlapping intervals, but that shouldn't happen often, and can be corrected for in post-processing There are two ways our intervals could overlap: (1) The start of the new interval could be inside the old interval (2) The end of the new interval could be inside the old interval In either case, we just extend the old interval to include the new interval. Otherwise, the intervals don't overlap, so we should make a new one The web app reports a client as 'website' The desktop app reports a client as ZulipDesktop due to it setting a custom user agent. We want both to count as web users Alias ZulipDesktop to website If an object was created, it has already been saved. We suppress changes from ACTIVE to IDLE before stale_status is reached; this protects us from the user having two clients open: one active, the other idle. Without this check, we would constantly toggle their status between the two states. The following block attempts to only update the "status" field in the event that it actually changed. This is important to avoid flushing the UserPresence cache when the data it would return to a client hasn't actually changed (see the UserPresence post_save hook for details). Push event to all users in the realm so they see the new user appear in the presence list immediately, or the newly online user without delay. Note that we won't send an update here for a timestamp update, because we rely on the browser to ping us every 50 seconds for realm-wide status updates, and those updates should have recent timestamps, which means the browser won't think active users have gone idle. If we were more aggressive in this function about sending timestamp updates, we could eliminate the ping responses, but that's not a high priority for now, considering that most of our non-MIT realms are pretty small. First, we clear mobile push notifications. This is safer in the event that the below logic times out and we're killed. we don't send messages, since the client reloads anyway Called during the message edit code path to remove mobile push notifications for users who are no longer mentioned following the edit. See 15428 for details. A perfect implementation would also support updating the message in a sent notification if a message was edited to mention a group rather than a user (or vise versa), though it is likely not worth the effort to do such a change. This function supports clearing notifications for several users only for the message-edit use case where we'll have a single message_id. This next block allows you to star any message, even those you didn't receive (e.g. because you're looking at a public stream you're not subscribed to, etc.). The problem is that starring is a flag boolean on UserMessage, and UserMessage rows are normally created only when you receive a message to support searching your personal history. So we need to create one. We add UserMessage.flags.historical, so that features that need "messages you actually received" can exclude these UserMessages. Validate that the user could have read the relevant message OK, this is a message that you legitimately have access to via narrowing to the stream it is on, even though you didn't actually receive it. So we create a historical, read UserMessage message row for you to star. Since moving content between streams is highly disruptive, it's worth adding a couple tombstone messages showing what happened. Send a notification to the old stream that the topic was moved. We exclude UserMessage.flags.historical rows since those users did not receive the message originally, and thus probably are not relevant for reprocessed alert_words, mentions and similar rendering features. This may be a decision we change in the future. We use transaction.atomic to support select_for_update in the attachment codepath. We use transaction.atomic to support select_for_update in the attachment codepath. mention_data is required if there's a content edit. add data from group mentions to mentions_user_ids. One could imagine checking realm.allow_edit_history here and modifying the events based on that setting, but doing so doesn't really make sense. We need to send the edit event to clients regardless, and a client already had access to the original/pre-edit content of the message anyway. That setting must be enforced on the client side, and making a change here simply complicates the logic for clients parsing edit history events. message.has_image and message.has_link will have been already updated by markdown rendering in the caller. When messages are moved from one stream to another, some users may lose access to those messages, including guest users and users not subscribed to the new stream (if it is a private stream). For those users, their experience is as though the messages were deleted, and we should send a delete_message event to them instead. Get users who aren't subscribed to the new_stream. Users who can longer access the message without some action from administrators. TODO: Extend this list to also contain users losing access due to the messages moving to a private stream they are not subscribed to. These fields have legacy field names. Delete UserMessage objects for users who will no longer have access to these messages. Note: This could be very expensive, since it's N guest users x M messages. This does message.save(update_fields=[...]) The following blocks arranges that users who are subscribed to a stream and can see history from before they subscribed get live-update when old messages are edited (e.g. if the user does a topic edit themself). We still don't send an update event to users who are not subscribed to this stream and don't have a UserMessage row. This means if a non-subscriber is viewing the narrow, they won't get a real-time updates. This is a balance between sending message-edit notifications for every public stream to every user in the organization (too expansive, and also not what we do for newly sent messages anyway) and having magical live-updates where possible. We exclude long-term idle users, since they by definition have no active clients. Remove duplicates by excluding the id of users already in users_to_be_notified list. This is the case where a user both has a UserMessage row and is a current Subscriber All users that are subscribed to the stream must be notified when a message is edited TODO: Guest users don't see the new moved topic unless breadcrumb message for new stream is enabled. Excluding these users from receiving this event helps us avoid a error trackeback for our clients. We should figure out a way to inform the guest users of this new topic if sending a 'message' event for these messages is not an option. Don't send this event to guest subs who are not subscribers of the old stream but are subscribed to the new stream; clients will be confused. Notify users that the topic was moved. messages in delete_message event belong to the same topic or is a single private message, as any other behaviour is not possible with the current callers to this method. TODO: We should plan to remove `sender_id` here. We exclude long-term idle users, since they by definition have no active clients. Add versions of the Subscription fields based on a simulated new user subscription set. In general, it's better to avoid using .values() because it makes the code pretty ugly, but in this case, it has significant performance impact for loading / for users with large numbers of subscriptions, so it's worth optimizing. date_created is used as an input for the stream_weekly_traffic computed field. The realm_id and recipient_id are generally not needed in the API. email_token isn't public to some users with access to the stream, so doesn't belong in API_FIELDS. Deactivated streams aren't in stream_hash. Add never subscribed streams to streams_subscribed_map If we're not including subscribers, always return None, which the below code needs to check for anyway. This stream has been deactivated, don't include it. We first construct a dictionary based on the standard Stream and Subscription models' API_FIELDS. Copy Subscription.API_FIELDS except for "active", which is used to determine where to the put the field. Backwards-compatibility for clients that haven't been updated for the in_home_view => is_muted API migration. Backwards-compatibility for clients that haven't been updated for the is_announcement_only -> stream_post_policy migration. Add a few computed fields not directly from the data models. Construct and add subscribers data Important: don't show the subscribers if the stream is invite only and this user isn't on it anymore (or a realm administrator). Guest users lose access to subscribers when they are unsubscribed. is_active is represented in this structure by which list we include it in. Backwards-compatibility addition of removed field. Given a set of user IDs (the recipients of a message), accesses the UserPresence table to determine which of these users are currently idle and should potentially get email notifications (and push notifications with with user_profile.enable_online_push_notifications=False). We exclude any presence data from ZulipMobile for the purpose of triggering these notifications; the mobile app can more effectively do its own client-side filtering of notification sounds/etc. for the case that the user is actively doing a PM conversation in the app. Matches presence.js constant If this isn't a "newly-created" realm, we're done. The remaining code applies an aggregate limit across all "new" realms, to address sudden bursts of spam realms. If a user is on a realm where we've bumped up max_invites, then we exempt them from invite limits. Inhibit joining an open realm to send spam invitations. All e-mails were skipped, so we didn't actually invite anyone. We do this here rather than in the invite queue processor since this is used for rate limiting invitations, rather than keeping track of when exactly invitations were sent Now that we are past all the possible errors, we actually create the PreregistrationUser objects and trigger the email invitations. The logged in user is the referrer. We do not return multiuse invites to non-admin users. Delete both the confirmation objects and the prereg_user object. TODO: Probably we actually want to set the confirmation objects to a "revoked" status so that we can give the invited user a better error message. These are two structurally for the caller's code path. We don't store the custom email body, so just set it to None The only user-controlled portion of 'emoji_file_name' is an extension, which can not contain '..' or '/' or '\', making it difficult to exploit NOTE: Regexes must be simple enough that they can be easily translated to JavaScript RegExp syntax. In addition to JS-compatible syntax, the following features are available: * Named groups will be converted to numbered groups automatically * Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags We may eventually use memcached to speed this up, but the DB is fast. If this was the last realm domain, we mark the realm as no longer restricted to domain, because the feature doesn't do anything if there are no domains, and this is probably less confusing than the alternative. TODO: Make a generic stub for QuerySet Start out with all streams in the realm with subscribers We construct a query as the or (|) of the various sources this user requested streams from. Don't bother going to the database with no valid sources Technically, there are 2 cases here: * The user put something in their message that has the form of an upload, but doesn't correspond to a file that doesn't exist. validate_attachment_request will return None. * The user is trying to send a link to a file they don't have permission to access themselves. validate_attachment_request will return False. Either case is unusual and suggests a UI bug that got the user in this situation, so we log in these cases. For a unsaved message edit (message.* has been updated, but not saved to the database), adjusts Attachment data to correspond to the new content. If the field value isn't actually being changed to a different one, and always_notify is disabled, we have nothing to do here for this field. Note: field_value.value is a TextField() so we need to cast field['value'] to a string for the comparison in this if. TODO: First service is chosen because currently one bot can only have one service. Update this once multiple services are supported. A ConfigError just means that there are no config entries for user_profile. In the future, we may want to send this event to all realm admins. Give mypy a hint so it knows `ujson.loads` isn't being passed an `Optional[str]`. Allow removal even if the export failed. | 38,781 | en | 0.922326 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from msccl.collectives import *
from msccl.algorithm import *
from msccl.instance import *
from msccl.topologies import *
def _alltoall_subproblem(local_nodes, num_copies):
remote_node = local_nodes
local_end = local_nodes * local_nodes
num_remote_pairs = (num_copies - 1) * local_nodes * local_nodes
remote_out_end = local_end + num_remote_pairs
num_chunks = remote_out_end + num_remote_pairs
def cases(chunk, local,remote_out,remote_in):
if chunk < local_end:
return local(chunk)
elif chunk < remote_out_end:
return remote_out(chunk - local_end)
else:
return remote_in(chunk - remote_out_end)
def pre(rank, chunk):
return cases(chunk,
lambda c: rank == c % local_nodes,
lambda c: rank == (c // (num_copies - 1)) % local_nodes,
lambda c: rank == remote_node)
def post(rank, chunk):
return cases(chunk,
lambda c: rank == c // local_nodes,
lambda c: rank == remote_node,
lambda c: rank == (c // (num_copies - 1)) // local_nodes)
def trigger(rank, chunk):
if rank == remote_node:
return cases(chunk,
lambda c: None,
lambda c: chunk + num_remote_pairs,
lambda c: chunk - num_remote_pairs)
else:
return None
return build_collective(f'AlltoallSubproblem(n={local_nodes},copies={num_copies})',
local_nodes + 1, num_chunks,
pre, post, trigger=trigger)
def make_alltoall_subproblem_collective_and_topology(topology, num_copies, relay_nodes, bw = 1, share_bw = False):
local_nodes = topology.num_nodes()
remote_node = local_nodes
links = [[0 for _ in range(local_nodes + 1)] for _ in range(local_nodes + 1)]
for src in range(local_nodes):
for dst in range(local_nodes):
links[dst][src] = topology.link(src, dst)
for relay in relay_nodes:
links[remote_node][relay] = bw
links[relay][remote_node] = bw
switches = topology.switches.copy()
if share_bw:
switches.append((relay_nodes, [num_nodes + 1], bw, 'remote_out'))
switches.append(([num_nodes + 1], relay_nodes, bw, 'remote_in'))
collective = _alltoall_subproblem(local_nodes, num_copies)
topology = Topology(f'Subtopo(local={topology.name},relays=({",".join(str(i) for i in relay_nodes)}))', links, topology.switches)
return collective, topology
def synthesize_alltoall_subproblem(subproblem_algo, num_copies, logging=False):
if subproblem_algo.is_pipelined():
raise ValueError('Pipelining is not supported.')
local_topology = subproblem_algo.topology
chunks = subproblem_algo.instance.chunks
local_nodes = local_topology.num_nodes() - 1
remote_node = local_nodes
nodes = local_nodes * num_copies
collective = alltoall(nodes).chunk_up(chunks)
# Create a distributed topology where copies of relay nodes that connect to the remote node in the subproblem
# topology are connected to all the relay nodes in the other copies.
links = [[0 for _ in range(nodes)] for _ in range(nodes)]
for dst in range(nodes):
for src in range(nodes):
local_src = src % local_nodes
local_dst = dst % local_nodes
if src // local_nodes != dst // local_nodes:
bw = min(local_topology.link(local_src, remote_node), local_topology.link(remote_node, local_dst))
links[dst][src] = bw
else:
links[dst][src] = local_topology.link(local_src, local_dst)
# Also make copies of switches with a similar expansion of the remote node into the nodes of other copies.
switches = []
for srcs, dsts, bw, name in local_topology.switches:
for i in range(num_copies):
def to_dist(ranks):
for rank in ranks:
if rank < remote_node:
# Non-remote nodes are just translated to the distributed numbering of ranks.
yield rank + i * local_nodes
else:
# Include all remote nodes in the switch. This is fine because the links already limit
# connectivity to just the relay nodes.
for r in range(nodes):
if r // local_nodes != i:
yield r
dist_srcs = list(to_dist(srcs))
dist_dsts = list(to_dist(dsts))
switches.append((dist_srcs, dist_dsts, bw, f'copy_{i}_{name}_local'))
topology = Topology(f'Stiched(sub={local_topology.name},copies={num_copies})', links, switches)
def nth_chunk_for_pair(src, dst, idx):
# The following chunk calculation respects both the _scattered and _transpose
# pre/postconditions in Alltoall. When substituting it in:
# -the precondition (chunk % self.num_nodes) simplifies to src
# -the postcondition ((chunk // self.num_nodes) % self.num_nodes) simplifies to dst
return (src + dst * collective.num_nodes) * chunks + idx
steps = []
# Calculate the ranges of the differently handled chunks
local_end = local_nodes * local_nodes
num_remote_pairs = (num_copies - 1) * local_nodes * local_nodes
remote_out_end = local_end + num_remote_pairs
num_chunks = remote_out_end + num_remote_pairs
for local_step in subproblem_algo.steps:
sends = []
# These are used to track operations involving remote nodes that get matched with another operation in the same
# step.
unmatched_sends = {}
unmatched_recvs = {}
# Stitch together copies of the subproblem algorithm
for chunk, src, dst in local_step.sends:
for i in range(num_copies):
def to_dist(rank):
# Translates ranks from the local to the distributed topology
return rank + i * local_nodes
def other_start(c):
# Given a relative remote chunk return local rank 0 in the copy it corresponds to
other_i = c % (num_copies - 1)
if other_i >= i:
other_i += 1
return other_i * local_nodes
# Calculate origin and target ranks that match the Alltoall pre/postconditions
if chunk < local_end:
assert src != remote_node and dst != remote_node
origin = to_dist((chunk // chunks) % local_nodes)
target = to_dist((chunk // chunks) // local_nodes)
# Check that the origin and target calculation match the local collective
assert subproblem_algo.collective.precondition(origin % local_nodes, chunk)
assert subproblem_algo.collective.postcondition(target % local_nodes, chunk)
elif chunk < remote_out_end:
c = chunk - local_end
local_origin = ((c // chunks) // (num_copies - 1)) % local_nodes
origin = to_dist(local_origin)
target = other_start(c) + ((c // (num_copies - 1))) // local_nodes
# Check that the origin and target calculation match the local collective
assert subproblem_algo.collective.precondition(local_origin, chunk)
assert subproblem_algo.collective.postcondition(target % local_nodes, chunk + num_remote_pairs)
else:
assert chunk < num_chunks
c = chunk - remote_out_end
local_target = ((c // chunks) // (num_copies - 1)) // local_nodes
target = to_dist(local_target)
origin = other_start(c) + ((c // (num_copies - 1))) % local_nodes
# Check that the origin and target calculation match the local collective
assert subproblem_algo.collective.precondition(origin % local_nodes, chunk - num_remote_pairs)
assert subproblem_algo.collective.postcondition(local_target, chunk)
# Get the chunk number in the distributed algorithm
chunk_idx = chunk % chunks
# Translate send src and dst to distributed space and add the send to the distributed algorithm
dist_chunk = nth_chunk_for_pair(origin, target, chunk_idx)
if dst == remote_node:
assert chunk < remote_out_end
# Sends to remote nodes have to find a matched receive
if dist_chunk in unmatched_recvs:
dist_dst = unmatched_recvs.pop(dist_chunk)
sends.append((dist_chunk, to_dist(src), dist_dst))
else:
unmatched_sends[dist_chunk] = to_dist(src)
elif src == remote_node:
assert chunk < num_chunks
# Receives from remote nodes have to find a matched send
if dist_chunk in unmatched_sends:
dist_src = unmatched_sends.pop(dist_chunk)
sends.append((dist_chunk, dist_src, to_dist(dst)))
else:
unmatched_recvs[dist_chunk] = to_dist(dst)
else:
# Sends locally are just translated to the new distributed space of ranks
sends.append((dist_chunk, to_dist(src), to_dist(dst)))
if len(unmatched_sends) > 0 or len(unmatched_recvs) > 0:
raise ValueError('Subproblem algorithm has unpaired sends/recvs.')
steps.append(Step(local_step.rounds, sends))
instance = Instance(
steps=len(steps),
extra_rounds=sum(step.rounds - 1 for step in steps),
chunks=chunks,
)
return Algorithm.make_implementation(collective, topology, instance, steps)
| msccl/distributors/alltoall_subproblem.py | 10,140 | Copyright (c) Microsoft Corporation. Licensed under the MIT License. Create a distributed topology where copies of relay nodes that connect to the remote node in the subproblem topology are connected to all the relay nodes in the other copies. Also make copies of switches with a similar expansion of the remote node into the nodes of other copies. Non-remote nodes are just translated to the distributed numbering of ranks. Include all remote nodes in the switch. This is fine because the links already limit connectivity to just the relay nodes. The following chunk calculation respects both the _scattered and _transpose pre/postconditions in Alltoall. When substituting it in: -the precondition (chunk % self.num_nodes) simplifies to src -the postcondition ((chunk // self.num_nodes) % self.num_nodes) simplifies to dst Calculate the ranges of the differently handled chunks These are used to track operations involving remote nodes that get matched with another operation in the same step. Stitch together copies of the subproblem algorithm Translates ranks from the local to the distributed topology Given a relative remote chunk return local rank 0 in the copy it corresponds to Calculate origin and target ranks that match the Alltoall pre/postconditions Check that the origin and target calculation match the local collective Check that the origin and target calculation match the local collective Check that the origin and target calculation match the local collective Get the chunk number in the distributed algorithm Translate send src and dst to distributed space and add the send to the distributed algorithm Sends to remote nodes have to find a matched receive Receives from remote nodes have to find a matched send Sends locally are just translated to the new distributed space of ranks | 1,807 | en | 0.869265 |
import logging
from collections import OrderedDict
from pathlib import Path
from typing import List, Optional, Set, Tuple, Union
import numpy as np
import torch
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import minmax_scale
from tqdm import tqdm
import flair
from flair.data import Dictionary, Sentence, Span, SpanLabel
from flair.datasets import DataLoader, FlairDatapointDataset
from flair.embeddings import (
TokenEmbeddings,
TransformerDocumentEmbeddings,
TransformerWordEmbeddings,
)
from flair.file_utils import cached_path
from flair.models.sequence_tagger_model import SequenceTagger
from flair.models.text_classification_model import TextClassifier
from flair.training_utils import store_embeddings
log = logging.getLogger("flair")
class FewshotClassifier(flair.nn.Classifier[Sentence]):
def __init__(self):
self._current_task = None
self._task_specific_attributes = {}
self.label_nearest_map = None
self.tars_model: flair.nn.Classifier[Sentence]
super(FewshotClassifier, self).__init__()
def forward_loss(
self, data_points: Union[List[Sentence], Sentence]
) -> Union[torch.Tensor, Tuple[torch.Tensor, int]]:
if not isinstance(data_points, list):
data_points = [data_points]
# Transform input data into TARS format
sentences = self._get_tars_formatted_sentences(data_points)
loss = self.tars_model.forward_loss(sentences)
return loss
@property
def tars_embeddings(self):
raise NotImplementedError
def _get_tars_formatted_sentence(self, label, sentence):
raise NotImplementedError
def _get_tars_formatted_sentences(self, sentences: List[Sentence]):
label_text_pairs = []
all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item]
for sentence in sentences:
label_text_pairs_for_sentence = []
if self.training and self.num_negative_labels_to_sample is not None:
positive_labels = list(
OrderedDict.fromkeys([label.value for label in sentence.get_labels(self.label_type)])
)
sampled_negative_labels = self._get_nearest_labels_for(positive_labels)
for label in positive_labels:
label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence))
for label in sampled_negative_labels:
label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence))
else:
for label in all_labels:
label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence))
label_text_pairs.extend(label_text_pairs_for_sentence)
return label_text_pairs
def _get_nearest_labels_for(self, labels):
# if there are no labels, return a random sample as negatives
if len(labels) == 0:
tags = self.get_current_label_dictionary().get_items()
import random
sample = random.sample(tags, k=self.num_negative_labels_to_sample)
return sample
already_sampled_negative_labels = set()
# otherwise, go through all labels
for label in labels:
plausible_labels = []
plausible_label_probabilities = []
for plausible_label in self.label_nearest_map[label]:
if plausible_label in already_sampled_negative_labels or plausible_label in labels:
continue
else:
plausible_labels.append(plausible_label)
plausible_label_probabilities.append(self.label_nearest_map[label][plausible_label])
# make sure the probabilities always sum up to 1
plausible_label_probabilities = np.array(plausible_label_probabilities, dtype="float64")
plausible_label_probabilities += 1e-08
plausible_label_probabilities /= np.sum(plausible_label_probabilities)
if len(plausible_labels) > 0:
num_samples = min(self.num_negative_labels_to_sample, len(plausible_labels))
sampled_negative_labels = np.random.choice(
plausible_labels,
num_samples,
replace=False,
p=plausible_label_probabilities,
)
already_sampled_negative_labels.update(sampled_negative_labels)
return already_sampled_negative_labels
def train(self, mode=True):
"""Populate label similarity map based on cosine similarity before running epoch
If the `num_negative_labels_to_sample` is set to an integer value then before starting
each epoch the model would create a similarity measure between the label names based
on cosine distances between their BERT encoded embeddings.
"""
if mode and self.num_negative_labels_to_sample is not None:
self._compute_label_similarity_for_current_epoch()
super().train(mode)
super().train(mode)
def _compute_label_similarity_for_current_epoch(self):
"""
Compute the similarity between all labels for better sampling of negatives
"""
# get and embed all labels by making a Sentence object that contains only the label text
all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item]
label_sentences = [Sentence(label) for label in all_labels]
self.tars_embeddings.eval() # TODO: check if this is necessary
self.tars_embeddings.embed(label_sentences)
self.tars_embeddings.train()
# get each label embedding and scale between 0 and 1
if isinstance(self.tars_embeddings, TokenEmbeddings):
encodings_np = [sentence[0].get_embedding().cpu().detach().numpy() for sentence in label_sentences]
else:
encodings_np = [sentence.get_embedding().cpu().detach().numpy() for sentence in label_sentences]
normalized_encoding = minmax_scale(encodings_np)
# compute similarity matrix
similarity_matrix = cosine_similarity(normalized_encoding)
# the higher the similarity, the greater the chance that a label is
# sampled as negative example
negative_label_probabilities = {}
for row_index, label in enumerate(all_labels):
negative_label_probabilities[label] = {}
for column_index, other_label in enumerate(all_labels):
if label != other_label:
negative_label_probabilities[label][other_label] = similarity_matrix[row_index][column_index]
self.label_nearest_map = negative_label_probabilities
def get_current_label_dictionary(self):
label_dictionary = self._task_specific_attributes[self._current_task]["label_dictionary"]
return label_dictionary
def get_current_label_type(self):
return self._task_specific_attributes[self._current_task]["label_type"]
def is_current_task_multi_label(self):
return self._task_specific_attributes[self._current_task]["multi_label"]
def add_and_switch_to_new_task(
self,
task_name,
label_dictionary: Union[List, Set, Dictionary, str],
label_type: str,
multi_label: bool = True,
force_switch: bool = False,
):
"""
Adds a new task to an existing TARS model. Sets necessary attributes and finally 'switches'
to the new task. Parameters are similar to the constructor except for model choice, batch
size and negative sampling. This method does not store the resultant model onto disk.
:param task_name: a string depicting the name of the task
:param label_dictionary: dictionary of the labels you want to predict
:param label_type: string to identify the label type ('ner', 'sentiment', etc.)
:param multi_label: whether this task is a multi-label prediction problem
:param force_switch: if True, will overwrite existing task with same name
"""
if task_name in self._task_specific_attributes and not force_switch:
log.warning("Task `%s` already exists in TARS model. Switching to it.", task_name)
else:
# make label dictionary if no Dictionary object is passed
if isinstance(label_dictionary, Dictionary):
label_dictionary = label_dictionary.get_items()
if type(label_dictionary) == str:
label_dictionary = [label_dictionary]
# prepare dictionary of tags (without B- I- prefixes and without UNK)
tag_dictionary = Dictionary(add_unk=False)
for tag in label_dictionary:
if tag == "<unk>" or tag == "O":
continue
if tag[1] == "-":
tag = tag[2:]
tag_dictionary.add_item(tag)
else:
tag_dictionary.add_item(tag)
self._task_specific_attributes[task_name] = {
"label_dictionary": tag_dictionary,
"label_type": label_type,
"multi_label": multi_label,
}
self.switch_to_task(task_name)
def list_existing_tasks(self) -> Set[str]:
"""
Lists existing tasks in the loaded TARS model on the console.
"""
return set(self._task_specific_attributes.keys())
def switch_to_task(self, task_name):
"""
Switches to a task which was previously added.
"""
if task_name not in self._task_specific_attributes:
log.error(
"Provided `%s` does not exist in the model. Consider calling " "`add_and_switch_to_new_task` first.",
task_name,
)
else:
self._current_task = task_name
def _drop_task(self, task_name):
if task_name in self._task_specific_attributes:
if self._current_task == task_name:
log.error(
"`%s` is the current task." " Switch to some other task before dropping this.",
task_name,
)
else:
self._task_specific_attributes.pop(task_name)
else:
log.warning("No task exists with the name `%s`.", task_name)
@staticmethod
def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]:
filtered_sentences = [sentence for sentence in sentences if sentence.tokens]
if len(sentences) != len(filtered_sentences):
log.warning(f"Ignore {len(sentences) - len(filtered_sentences)} sentence(s) with no tokens.")
return filtered_sentences
@property
def label_type(self):
return self.get_current_label_type()
def predict_zero_shot(
self,
sentences: Union[List[Sentence], Sentence],
candidate_label_set: Union[List[str], Set[str], str],
multi_label: bool = True,
):
"""
Method to make zero shot predictions from the TARS model
:param sentences: input sentence objects to classify
:param candidate_label_set: set of candidate labels
:param multi_label: indicates whether multi-label or single class prediction. Defaults to True.
"""
# check if candidate_label_set is empty
if candidate_label_set is None or len(candidate_label_set) == 0:
log.warning("Provided candidate_label_set is empty")
return
# make list if only one candidate label is passed
if isinstance(candidate_label_set, str):
candidate_label_set = {candidate_label_set}
# create label dictionary
label_dictionary = Dictionary(add_unk=False)
for label in candidate_label_set:
label_dictionary.add_item(label)
# note current task
existing_current_task = self._current_task
# create a temporary task
self.add_and_switch_to_new_task(
task_name="ZeroShot",
label_dictionary=label_dictionary,
label_type="-".join(label_dictionary.get_items()),
multi_label=multi_label,
)
try:
# make zero shot predictions
self.predict(sentences)
finally:
# switch to the pre-existing task
self.switch_to_task(existing_current_task)
self._drop_task("ZeroShot")
return
class TARSTagger(FewshotClassifier):
"""
TARS model for sequence tagging. In the backend, the model uses a BERT based 5-class
sequence labeler which given a <label, text> pair predicts the probability for each word
to belong to one of the BIOES classes. The input data is a usual Sentence object which is inflated
by the model internally before pushing it through the transformer stack of BERT.
"""
static_label_type = "tars_label"
def __init__(
self,
task_name: Optional[str] = None,
label_dictionary: Optional[Dictionary] = None,
label_type: Optional[str] = None,
embeddings: Union[TransformerWordEmbeddings, str] = "bert-base-uncased",
num_negative_labels_to_sample: int = 2,
prefix: bool = True,
**tagger_args,
):
"""
Initializes a TextClassifier
:param task_name: a string depicting the name of the task
:param label_dictionary: dictionary of labels you want to predict
:param embeddings: name of the pre-trained transformer model e.g.,
'bert-base-uncased' etc
:param num_negative_labels_to_sample: number of negative labels to sample for each
positive labels against a sentence during training. Defaults to 2 negative
labels for each positive label. The model would sample all the negative labels
if None is passed. That slows down the training considerably.
"""
super(TARSTagger, self).__init__()
if isinstance(embeddings, str):
embeddings = TransformerWordEmbeddings(
model=embeddings,
fine_tune=True,
layers="-1",
layer_mean=False,
)
# prepare TARS dictionary
tars_dictionary = Dictionary(add_unk=False)
tars_dictionary.add_item("entity")
tars_dictionary.span_labels = True
# initialize a bare-bones sequence tagger
self.tars_model: SequenceTagger = SequenceTagger(
hidden_size=123,
embeddings=embeddings,
tag_dictionary=tars_dictionary,
tag_type=self.static_label_type,
use_crf=False,
use_rnn=False,
reproject_embeddings=False,
**tagger_args,
)
# transformer separator
self.separator = str(self.tars_embeddings.tokenizer.sep_token)
if self.tars_embeddings.tokenizer._bos_token:
self.separator += str(self.tars_embeddings.tokenizer.bos_token)
self.prefix = prefix
self.num_negative_labels_to_sample = num_negative_labels_to_sample
if task_name and label_dictionary and label_type:
# Store task specific labels since TARS can handle multiple tasks
self.add_and_switch_to_new_task(task_name, label_dictionary, label_type)
else:
log.info(
"TARS initialized without a task. You need to call .add_and_switch_to_new_task() "
"before training this model"
)
def _get_tars_formatted_sentence(self, label, sentence):
original_text = sentence.to_tokenized_string()
label_text_pair = (
f"{label} {self.separator} {original_text}" if self.prefix else f"{original_text} {self.separator} {label}"
)
label_length = 0 if not self.prefix else len(label.split(" ")) + len(self.separator.split(" "))
# make a tars sentence where all labels are O by default
tars_sentence = Sentence(label_text_pair, use_tokenizer=False)
for entity_label in sentence.get_labels(self.label_type):
if entity_label.value == label:
new_span = [tars_sentence.get_token(token.idx + label_length) for token in entity_label.span]
tars_sentence.add_complex_label(self.static_label_type, SpanLabel(Span(new_span), value="entity"))
return tars_sentence
def _get_state_dict(self):
model_state = {
"state_dict": self.state_dict(),
"current_task": self._current_task,
"tag_type": self.get_current_label_type(),
"tag_dictionary": self.get_current_label_dictionary(),
"tars_model": self.tars_model,
"num_negative_labels_to_sample": self.num_negative_labels_to_sample,
"prefix": self.prefix,
"task_specific_attributes": self._task_specific_attributes,
}
return model_state
@staticmethod
def _fetch_model(model_name) -> str:
if model_name == "tars-ner":
cache_dir = Path("models")
model_name = cached_path(
"https://nlp.informatik.hu-berlin.de/resources/models/tars-ner/tars-ner.pt",
cache_dir=cache_dir,
)
return model_name
@staticmethod
def _init_model_with_state_dict(state):
# init new TARS classifier
model = TARSTagger(
task_name=state["current_task"],
label_dictionary=state["tag_dictionary"],
label_type=state["tag_type"],
embeddings=state["tars_model"].embeddings,
num_negative_labels_to_sample=state["num_negative_labels_to_sample"],
prefix=state["prefix"],
)
# set all task information
model._task_specific_attributes = state["task_specific_attributes"]
# linear layers of internal classifier
model.load_state_dict(state["state_dict"])
return model
@property
def tars_embeddings(self):
return self.tars_model.embeddings
def predict(
self,
sentences: Union[List[Sentence], Sentence],
mini_batch_size=32,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss=False,
embedding_storage_mode="none",
most_probable_first: bool = True,
):
# return
"""
Predict sequence tags for Named Entity Recognition task
:param sentences: a Sentence or a List of Sentence
:param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory,
up to a point when it has no more effect.
:param all_tag_prob: True to compute the score for each tag on each token,
otherwise only the score of the best tag is returned
:param verbose: set to True to display a progress bar
:param return_loss: set to True to return loss
:param label_name: set this to change the name of the label type that is predicted
:param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if
you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.
'gpu' to store embeddings in GPU memory.
"""
if label_name is None:
label_name = self.get_current_label_type()
# with torch.no_grad():
if not sentences:
return sentences
if not isinstance(sentences, list):
sentences = [sentences]
reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True)
dataloader = DataLoader(
dataset=FlairDatapointDataset(reordered_sentences),
batch_size=mini_batch_size,
)
# progress bar for verbosity
if verbose:
dataloader = tqdm(dataloader)
overall_loss = 0
overall_count = 0
with torch.no_grad():
for batch in dataloader:
batch = self._filter_empty_sentences(batch)
# stop if all sentences are empty
if not batch:
continue
# go through each sentence in the batch
for sentence in batch:
# always remove tags first
sentence.remove_labels(label_name)
all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item]
all_detected = {}
for label in all_labels:
tars_sentence = self._get_tars_formatted_sentence(label, sentence)
loss_and_count = self.tars_model.predict(
tars_sentence,
label_name=label_name,
return_loss=True,
)
overall_loss += loss_and_count[0].item()
overall_count += loss_and_count[1]
for predicted in tars_sentence.get_labels(label_name):
predicted.value = label
all_detected[predicted] = predicted.score
if most_probable_first:
import operator
already_set_indices: List[int] = []
sorted_x = sorted(all_detected.items(), key=operator.itemgetter(1))
sorted_x.reverse()
for tuple in sorted_x:
# get the span and its label
label = tuple[0]
# label = span.get_labels("tars_temp_label")[0].value
label_length = (
0 if not self.prefix else len(label.value.split(" ")) + len(self.separator.split(" "))
)
# determine whether tokens in this span already have a label
tag_this = True
for token in label.span:
corresponding_token = sentence.get_token(token.idx - label_length)
if corresponding_token is None:
tag_this = False
continue
if token.idx in already_set_indices:
tag_this = False
continue
# only add if all tokens have no label
if tag_this:
already_set_indices.extend(token.idx for token in label.span)
predicted_span = [sentence.get_token(token.idx - label_length) for token in label.span]
sentence.add_complex_label(
label_name,
label=SpanLabel(Span(predicted_span), value=label.value, score=label.score),
)
# clearing token embeddings to save memory
store_embeddings(batch, storage_mode=embedding_storage_mode)
if return_loss:
return overall_loss, overall_count
class TARSClassifier(FewshotClassifier):
"""
TARS model for text classification. In the backend, the model uses a BERT based binary
text classifier which given a <label, text> pair predicts the probability of two classes
"True", and "False". The input data is a usual Sentence object which is inflated
by the model internally before pushing it through the transformer stack of BERT.
"""
static_label_type = "tars_label"
LABEL_MATCH = "YES"
LABEL_NO_MATCH = "NO"
def __init__(
self,
task_name: Optional[str] = None,
label_dictionary: Optional[Dictionary] = None,
label_type: Optional[str] = None,
embeddings: Union[TransformerDocumentEmbeddings, str] = "bert-base-uncased",
num_negative_labels_to_sample: int = 2,
prefix: bool = True,
**tagger_args,
):
"""
Initializes a TextClassifier
:param task_name: a string depicting the name of the task
:param label_dictionary: dictionary of labels you want to predict
:param embeddings: name of the pre-trained transformer model e.g.,
'bert-base-uncased' etc
:param num_negative_labels_to_sample: number of negative labels to sample for each
positive labels against a sentence during training. Defaults to 2 negative
labels for each positive label. The model would sample all the negative labels
if None is passed. That slows down the training considerably.
:param multi_label: auto-detected by default, but you can set this to True
to force multi-label predictionor False to force single-label prediction
:param multi_label_threshold: If multi-label you can set the threshold to make predictions
:param beta: Parameter for F-beta score for evaluation and training annealing
"""
super(TARSClassifier, self).__init__()
if isinstance(embeddings, str):
embeddings = TransformerDocumentEmbeddings(
model=embeddings,
fine_tune=True,
layers="-1",
layer_mean=False,
)
# prepare TARS dictionary
tars_dictionary = Dictionary(add_unk=False)
tars_dictionary.add_item(self.LABEL_NO_MATCH)
tars_dictionary.add_item(self.LABEL_MATCH)
# initialize a bare-bones sequence tagger
self.tars_model = TextClassifier(
document_embeddings=embeddings,
label_dictionary=tars_dictionary,
label_type=self.static_label_type,
**tagger_args,
)
# transformer separator
self.separator = str(self.tars_embeddings.tokenizer.sep_token)
if self.tars_embeddings.tokenizer._bos_token:
self.separator += str(self.tars_embeddings.tokenizer.bos_token)
self.prefix = prefix
self.num_negative_labels_to_sample = num_negative_labels_to_sample
if task_name and label_dictionary and label_type:
# Store task specific labels since TARS can handle multiple tasks
self.add_and_switch_to_new_task(task_name, label_dictionary, label_type)
else:
log.info(
"TARS initialized without a task. You need to call .add_and_switch_to_new_task() "
"before training this model"
)
self.clean_up_labels = True
def _clean(self, label_value: str) -> str:
if self.clean_up_labels:
return label_value.replace("_", " ")
else:
return label_value
def _get_tars_formatted_sentence(self, label, sentence):
label = self._clean(label)
original_text = sentence.to_tokenized_string()
label_text_pair = (
f"{label} {self.separator} {original_text}" if self.prefix else f"{original_text} {self.separator} {label}"
)
sentence_labels = [self._clean(label.value) for label in sentence.get_labels(self.get_current_label_type())]
tars_label = self.LABEL_MATCH if label in sentence_labels else self.LABEL_NO_MATCH
tars_sentence = Sentence(label_text_pair, use_tokenizer=False).add_label(self.static_label_type, tars_label)
return tars_sentence
def _get_state_dict(self):
model_state = {
"state_dict": self.state_dict(),
"current_task": self._current_task,
"label_type": self.get_current_label_type(),
"label_dictionary": self.get_current_label_dictionary(),
"tars_model": self.tars_model,
"num_negative_labels_to_sample": self.num_negative_labels_to_sample,
"task_specific_attributes": self._task_specific_attributes,
}
return model_state
@staticmethod
def _init_model_with_state_dict(state):
# init new TARS classifier
label_dictionary = state["label_dictionary"]
label_type = "default_label" if not state["label_type"] else state["label_type"]
model: TARSClassifier = TARSClassifier(
task_name=state["current_task"],
label_dictionary=label_dictionary,
label_type=label_type,
embeddings=state["tars_model"].document_embeddings,
num_negative_labels_to_sample=state["num_negative_labels_to_sample"],
)
# set all task information
model._task_specific_attributes = state["task_specific_attributes"]
# linear layers of internal classifier
model.load_state_dict(state["state_dict"])
return model
@staticmethod
def _fetch_model(model_name) -> str:
model_map = {}
hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models"
model_map["tars-base"] = "/".join([hu_path, "tars-base", "tars-base-v8.pt"])
cache_dir = Path("models")
if model_name in model_map:
model_name = cached_path(model_map[model_name], cache_dir=cache_dir)
return model_name
@property
def tars_embeddings(self):
return self.tars_model.document_embeddings
def predict(
self,
sentences: Union[List[Sentence], Sentence],
mini_batch_size=32,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss=False,
embedding_storage_mode="none",
label_threshold: float = 0.5,
multi_label: Optional[bool] = None,
):
"""
Predict sequence tags for Named Entity Recognition task
:param sentences: a Sentence or a List of Sentence
:param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory,
up to a point when it has no more effect.
:param all_tag_prob: True to compute the score for each tag on each token,
otherwise only the score of the best tag is returned
:param verbose: set to True to display a progress bar
:param return_loss: set to True to return loss
:param label_name: set this to change the name of the label type that is predicted
:param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if
you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.
'gpu' to store embeddings in GPU memory.
"""
if label_name is None:
label_name = self.get_current_label_type()
if multi_label is None:
multi_label = self.is_current_task_multi_label()
# with torch.no_grad():
if not sentences:
return sentences
if isinstance(sentences, Sentence):
sentences = [sentences]
# set context if not set already
previous_sentence = None
for sentence in sentences:
if sentence.is_context_set():
continue
sentence._previous_sentence = previous_sentence
sentence._next_sentence = None
if previous_sentence:
previous_sentence._next_sentence = sentence
previous_sentence = sentence
reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True)
dataloader = DataLoader(
dataset=FlairDatapointDataset(reordered_sentences),
batch_size=mini_batch_size,
)
# progress bar for verbosity
if verbose:
progressbar = tqdm(dataloader)
progressbar.set_description("Batch inference")
dataloader = progressbar
overall_loss = 0
overall_count = 0
batch_no = 0
with torch.no_grad():
for batch in dataloader:
batch_no += 1
batch = self._filter_empty_sentences(batch)
# stop if all sentences are empty
if not batch:
continue
# go through each sentence in the batch
for sentence in batch:
# always remove tags first
sentence.remove_labels(label_name)
all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item]
best_label = None
for label in all_labels:
tars_sentence = self._get_tars_formatted_sentence(label, sentence)
loss_and_count = self.tars_model.predict(
tars_sentence,
label_name=label_name,
return_loss=True,
return_probabilities_for_all_classes=True if label_threshold < 0.5 else False,
)
overall_loss += loss_and_count[0].item()
overall_count += loss_and_count[1]
# add all labels that according to TARS match the text and are above threshold
for predicted_tars_label in tars_sentence.get_labels(label_name):
if (
predicted_tars_label.value == self.LABEL_MATCH
and predicted_tars_label.score > label_threshold
):
# do not add labels below confidence threshold
sentence.add_label(label_name, label, predicted_tars_label.score)
# only use label with highest confidence if enforcing single-label predictions
if not multi_label:
if len(sentence.get_labels(label_name)) > 0:
# get all label scores and do an argmax to get the best label
label_scores = torch.tensor(
[label.score for label in sentence.get_labels(label_name)],
dtype=torch.float,
)
best_label = sentence.get_labels(label_name)[torch.argmax(label_scores)]
# remove previously added labels and only add the best label
sentence.remove_labels(label_name)
sentence.add_label(
typename=label_name,
value=best_label.value,
score=best_label.score,
)
# clearing token embeddings to save memory
store_embeddings(batch, storage_mode=embedding_storage_mode)
if return_loss:
return overall_loss, overall_count
| flair/models/tars_model.py | 35,323 | TARS model for text classification. In the backend, the model uses a BERT based binary
text classifier which given a <label, text> pair predicts the probability of two classes
"True", and "False". The input data is a usual Sentence object which is inflated
by the model internally before pushing it through the transformer stack of BERT.
TARS model for sequence tagging. In the backend, the model uses a BERT based 5-class
sequence labeler which given a <label, text> pair predicts the probability for each word
to belong to one of the BIOES classes. The input data is a usual Sentence object which is inflated
by the model internally before pushing it through the transformer stack of BERT.
Initializes a TextClassifier
:param task_name: a string depicting the name of the task
:param label_dictionary: dictionary of labels you want to predict
:param embeddings: name of the pre-trained transformer model e.g.,
'bert-base-uncased' etc
:param num_negative_labels_to_sample: number of negative labels to sample for each
positive labels against a sentence during training. Defaults to 2 negative
labels for each positive label. The model would sample all the negative labels
if None is passed. That slows down the training considerably.
Initializes a TextClassifier
:param task_name: a string depicting the name of the task
:param label_dictionary: dictionary of labels you want to predict
:param embeddings: name of the pre-trained transformer model e.g.,
'bert-base-uncased' etc
:param num_negative_labels_to_sample: number of negative labels to sample for each
positive labels against a sentence during training. Defaults to 2 negative
labels for each positive label. The model would sample all the negative labels
if None is passed. That slows down the training considerably.
:param multi_label: auto-detected by default, but you can set this to True
to force multi-label predictionor False to force single-label prediction
:param multi_label_threshold: If multi-label you can set the threshold to make predictions
:param beta: Parameter for F-beta score for evaluation and training annealing
Compute the similarity between all labels for better sampling of negatives
Adds a new task to an existing TARS model. Sets necessary attributes and finally 'switches'
to the new task. Parameters are similar to the constructor except for model choice, batch
size and negative sampling. This method does not store the resultant model onto disk.
:param task_name: a string depicting the name of the task
:param label_dictionary: dictionary of the labels you want to predict
:param label_type: string to identify the label type ('ner', 'sentiment', etc.)
:param multi_label: whether this task is a multi-label prediction problem
:param force_switch: if True, will overwrite existing task with same name
Lists existing tasks in the loaded TARS model on the console.
Predict sequence tags for Named Entity Recognition task
:param sentences: a Sentence or a List of Sentence
:param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory,
up to a point when it has no more effect.
:param all_tag_prob: True to compute the score for each tag on each token,
otherwise only the score of the best tag is returned
:param verbose: set to True to display a progress bar
:param return_loss: set to True to return loss
:param label_name: set this to change the name of the label type that is predicted
:param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if
you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.
'gpu' to store embeddings in GPU memory.
Predict sequence tags for Named Entity Recognition task
:param sentences: a Sentence or a List of Sentence
:param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory,
up to a point when it has no more effect.
:param all_tag_prob: True to compute the score for each tag on each token,
otherwise only the score of the best tag is returned
:param verbose: set to True to display a progress bar
:param return_loss: set to True to return loss
:param label_name: set this to change the name of the label type that is predicted
:param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if
you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.
'gpu' to store embeddings in GPU memory.
Method to make zero shot predictions from the TARS model
:param sentences: input sentence objects to classify
:param candidate_label_set: set of candidate labels
:param multi_label: indicates whether multi-label or single class prediction. Defaults to True.
Switches to a task which was previously added.
Populate label similarity map based on cosine similarity before running epoch
If the `num_negative_labels_to_sample` is set to an integer value then before starting
each epoch the model would create a similarity measure between the label names based
on cosine distances between their BERT encoded embeddings.
Transform input data into TARS format if there are no labels, return a random sample as negatives otherwise, go through all labels make sure the probabilities always sum up to 1 get and embed all labels by making a Sentence object that contains only the label text TODO: check if this is necessary get each label embedding and scale between 0 and 1 compute similarity matrix the higher the similarity, the greater the chance that a label is sampled as negative example make label dictionary if no Dictionary object is passed prepare dictionary of tags (without B- I- prefixes and without UNK) check if candidate_label_set is empty make list if only one candidate label is passed create label dictionary note current task create a temporary task make zero shot predictions switch to the pre-existing task prepare TARS dictionary initialize a bare-bones sequence tagger transformer separator Store task specific labels since TARS can handle multiple tasks make a tars sentence where all labels are O by default init new TARS classifier set all task information linear layers of internal classifier return with torch.no_grad(): progress bar for verbosity stop if all sentences are empty go through each sentence in the batch always remove tags first get the span and its label label = span.get_labels("tars_temp_label")[0].value determine whether tokens in this span already have a label only add if all tokens have no label clearing token embeddings to save memory prepare TARS dictionary initialize a bare-bones sequence tagger transformer separator Store task specific labels since TARS can handle multiple tasks init new TARS classifier set all task information linear layers of internal classifier with torch.no_grad(): set context if not set already progress bar for verbosity stop if all sentences are empty go through each sentence in the batch always remove tags first add all labels that according to TARS match the text and are above threshold do not add labels below confidence threshold only use label with highest confidence if enforcing single-label predictions get all label scores and do an argmax to get the best label remove previously added labels and only add the best label clearing token embeddings to save memory | 7,322 | en | 0.813444 |
# -*- coding: utf-8 -*-
"""Console script for python_learn."""
import sys
import click
@click.command()
def main(args=None):
"""Console script for python_learn."""
click.echo("Replace this message by putting your code into "
"python_learn.cli.main")
click.echo("See click documentation at http://click.pocoo.org/")
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| python_learn/cli.py | 428 | Console script for python_learn.
Console script for python_learn.
-*- coding: utf-8 -*- pragma: no cover | 106 | en | 0.696841 |
# -------------------------------------------------------------------------
# Copyright (C) 2018 BMW Car IT GmbH
# -------------------------------------------------------------------------
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# -------------------------------------------------------------------------
"""
Contains configurations constants
"""
G_WARNING_COUNT = 0
G_PROP_FILES = [
'proprietary',
'scripts/integration_tests/proprietary',
'scripts/integration_tests/run_smoke_tests_as_bat_tests.py',
'zuul.d'
]
G_LICENSE_TEMPLATE_OPEN = """
-------------------------------------------------------------------------
Copyright (C) [YYYY] BMW XXXX
-------------------------------------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at https://mozilla.org/MPL/2.0/.
-------------------------------------------------------------------------
"""
G_LICENSE_TEMPLATE_PROP = """
-------------------------------------------------------------------------
Copyright (C) [YYYY] BMW XXXX
All rights reserved.
-------------------------------------------------------------------------
This document contains proprietary information belonging to BMW XXXX.
Passing on and copying of this document, use and communication of its
contents is not permitted without prior written authorization.
-------------------------------------------------------------------------
"""
| scripts/code_style_checker/common_modules/config.py | 1,721 | Contains configurations constants
------------------------------------------------------------------------- Copyright (C) 2018 BMW Car IT GmbH ------------------------------------------------------------------------- This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/. ------------------------------------------------------------------------- | 493 | en | 0.639477 |
import time
import random
import highfive
# This is the remote worker for the sum example. Here, we define what the
# workers do when they get a call from the master. All we need is a single
# function which takes the call, does some processing, and returns a response.
# An interesting way to play with the workers is to spin some up, then shut
# them down before the job set running on the master is complete. The jobs
# which the workers are running will be requeued on the master so that when
# more workers connect, the jobs will be tried again. This makes network
# problems no big deal as long as you reconnect the workers at some point.
# In our case, we take in a pair of numbers and return their sum. To make
# it easier to watch the progress of the job set in real time, we sleep for
# anywhere between 0 and 1/4 seconds before the sum to simulate heavy
# processing.
def delayed_sum(numbers):
time.sleep(random.random() / 4)
return sum(numbers)
# Now we can easily start a worker pool to connect to a local HighFive master.
# We can also add a `host=<host name>` and `port=<port number>` to connect to a
# remote HighFive master. By default, `run_worker_pool()` creates a worker
# process for each available CPU core to maximize CPU utilization, but we can
# we can limit this with `max_workers=<number of workers>`.
if __name__ == "__main__":
try:
highfive.run_worker_pool(delayed_sum)
except KeyboardInterrupt:
print("keyboard interrupt")
| examples/sum_worker.py | 1,495 | This is the remote worker for the sum example. Here, we define what the workers do when they get a call from the master. All we need is a single function which takes the call, does some processing, and returns a response. An interesting way to play with the workers is to spin some up, then shut them down before the job set running on the master is complete. The jobs which the workers are running will be requeued on the master so that when more workers connect, the jobs will be tried again. This makes network problems no big deal as long as you reconnect the workers at some point. In our case, we take in a pair of numbers and return their sum. To make it easier to watch the progress of the job set in real time, we sleep for anywhere between 0 and 1/4 seconds before the sum to simulate heavy processing. Now we can easily start a worker pool to connect to a local HighFive master. We can also add a `host=<host name>` and `port=<port number>` to connect to a remote HighFive master. By default, `run_worker_pool()` creates a worker process for each available CPU core to maximize CPU utilization, but we can we can limit this with `max_workers=<number of workers>`. | 1,174 | en | 0.948911 |
import datetime
import inspect
import os
import pprint as pretty_print
import jinja2
from jinja2 import Environment, FileSystemLoader
DEFAULT_TEMPLATE_FOLDERS = ["templates"]
def get_log_errors(logs):
return [e for e in logs.list() if e["level"] >= 40]
def make_list(obj):
return list(obj)
def pprint(obj):
return pretty_print.pformat(obj)
def format_time(time):
if not isinstance(time, datetime.timedelta):
time = datetime.timedelta(seconds=int(time / 1000.0))
return ":".join(str(time).split(":")[:2]) + "h"
FILTERS = {
"pprint": pprint,
"list": make_list,
"get_log_errors": get_log_errors,
"format_time": format_time,
}
GLOBALS = {"datetime": datetime, "str": str}
def get_environment(paths):
loader = FileSystemLoader(paths)
environment = Environment(loader=loader, lstrip_blocks=True, trim_blocks=True)
for filter_name, filter in FILTERS.items():
environment.filters[filter_name] = filter
for global_name, global_value in GLOBALS.items():
environment.globals[global_name] = global_value
return environment
class TemplateLoader:
def __init__(self):
self.paths = []
self.reload_env()
def add_path(self, path):
if path not in self.paths and os.path.isdir(path):
self.paths.append(path)
self.reload_env()
def auto_discover(self, path=None, folder=None):
caller_folder = os.path.dirname(inspect.stack()[1][1])
if path:
caller_folder = os.path.join(caller_folder, path)
if folder:
self.add_path(os.path.join(caller_folder, folder))
else:
self.discover_folder(caller_folder)
def discover_folder(self, candidate_folder):
for folder in [
os.path.join(candidate_folder, dir) for dir in DEFAULT_TEMPLATE_FOLDERS
]:
self.add_path(folder)
def reload_env(self):
self.env = get_environment(self.paths)
def get_template(self, name):
if os.path.isabs(name): # If provided an absolute path to a template
environment = get_environment(os.path.dirname(name))
template = environment.get_template(os.path.basename(name))
else:
template = self.env.get_template(name)
return template
template_loader = TemplateLoader()
| spidermon/templates.py | 2,354 | If provided an absolute path to a template | 42 | en | 0.388684 |
# -*- coding: utf-8 -*-
#
# github-cli documentation build configuration file, created by
# sphinx-quickstart on Tue May 5 17:40:34 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'github-cli'
copyright = u'2009-2012, Sander Smits'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'github-clidoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'github-cli.tex', u'github-cli Documentation',
u'Sander Smits', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| docs/source/conf.py | 6,307 | -*- coding: utf-8 -*- github-cli documentation build configuration file, created by sphinx-quickstart on Tue May 5 17:40:34 2009. This file is execfile()d with the current directory set to its containing dir. Note that not all possible configuration values are present in this autogenerated file. All configuration values have a default; values that are commented out serve to show the default. If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here.sys.path.append(os.path.abspath('.')) -- General configuration ----------------------------------------------------- Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. The suffix of source filenames. The encoding of source files.source_encoding = 'utf-8' The master toctree document. General information about the project. The version info for the project you're documenting, acts as replacement for |version| and |release|, also used in various other places throughout the built documents. The short X.Y version. The full version, including alpha/beta/rc tags. The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages.language = None There are two options for replacing |today|: either, you set today to some non-false value, then it is used:today = '' Else, today_fmt is used as the format for a strftime call.today_fmt = '%B %d, %Y' List of documents that shouldn't be included in the build.unused_docs = [] List of directories, relative to source directory, that shouldn't be searched for source files. The reST default role (used for this markup: `text`) to use for all documents.default_role = None If true, '()' will be appended to :func: etc. cross-reference text.add_function_parentheses = True If true, the current module name will be prepended to all description unit titles (such as .. function::).add_module_names = True If true, sectionauthor and moduleauthor directives will be shown in the output. They are ignored by default.show_authors = False The name of the Pygments (syntax highlighting) style to use. A list of ignored prefixes for module index sorting.modindex_common_prefix = [] -- Options for HTML output --------------------------------------------------- The theme to use for HTML and HTML Help pages. Major themes that come with Sphinx are currently 'default' and 'sphinxdoc'. Theme options are theme-specific and customize the look and feel of a theme further. For a list of options available for each theme, see the documentation.html_theme_options = {} Add any paths that contain custom themes here, relative to this directory.html_theme_path = [] The name for this set of Sphinx documents. If None, it defaults to "<project> v<release> documentation".html_title = None A shorter title for the navigation bar. Default is the same as html_title.html_short_title = None The name of an image file (relative to this directory) to place at the top of the sidebar.html_logo = None The name of an image file (within the static path) to use as favicon of the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 pixels large.html_favicon = None Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". If not '', a 'Last updated on:' timestamp is inserted at every page bottom, using the given strftime format.html_last_updated_fmt = '%b %d, %Y' If true, SmartyPants will be used to convert quotes and dashes to typographically correct entities.html_use_smartypants = True Custom sidebar templates, maps document names to template names.html_sidebars = {} Additional templates that should be rendered to pages, maps page names to template names.html_additional_pages = {} If false, no module index is generated.html_use_modindex = True If false, no index is generated.html_use_index = True If true, the index is split into individual pages for each letter.html_split_index = False If true, links to the reST sources are added to the pages.html_show_sourcelink = True If true, an OpenSearch description file will be output, and all pages will contain a <link> tag referring to it. The value of this option must be the base URL from which the finished HTML is served.html_use_opensearch = '' If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").html_file_suffix = '' Output file base name for HTML help builder. -- Options for LaTeX output -------------------------------------------------- The paper size ('letter' or 'a4').latex_paper_size = 'letter' The font size ('10pt', '11pt' or '12pt').latex_font_size = '10pt' Grouping the document tree into LaTeX files. List of tuples (source start file, target name, title, author, documentclass [howto/manual]). The name of an image file (relative to this directory) to place at the top of the title page.latex_logo = None For "manual" documents, if this is true, then toplevel headings are parts, not chapters.latex_use_parts = False Additional stuff for the LaTeX preamble.latex_preamble = '' Documents to append as an appendix to all manuals.latex_appendices = [] If false, no module index is generated.latex_use_modindex = True | 5,552 | en | 0.712869 |
from __future__ import print_function
import argparse
import os
import pickle
import sys
import cv2
import numpy as np
import torch
import vlfeat # calls constructor
from sklearn.cluster import MiniBatchKMeans
from src.utils.cluster.eval_metrics import _hungarian_match, _original_match, \
_acc
from src.utils.segmentation.data import make_Coco_dataloaders, \
make_Potsdam_dataloaders
SIFT_DLEN = 128
SIFT_STEP = 10
def _get_vectorised_sift_samples(archetype_config, dataloader):
# returns num unmasked pixels x SIFT_DLEN, in uint8 format
# operates on greyscale 128 bit images
num_batches, batch_sz = len(dataloader), archetype_config.dataloader_batch_sz
num_imgs_max = num_batches * batch_sz # estimate
img_sz = archetype_config.input_sz
# cluster individual (box central) pixels
desc_side = int(img_sz / SIFT_STEP)
print("img sz %d, desc_side %d" % (img_sz, desc_side))
sys.stdout.flush()
descs_all = np.zeros((num_imgs_max, desc_side * desc_side,
SIFT_DLEN), dtype=np.uint8)
masks_all = np.zeros((num_imgs_max, desc_side * desc_side), dtype=np.bool)
labels_all = None
actual_num_imgs = 0
# when descriptor matrix flattened, goes along rows first (rows change slow)
central_inds_h = (np.arange(desc_side) * SIFT_STEP +
(SIFT_STEP / 2)).reshape((desc_side, 1)).repeat(desc_side,
axis=1)
central_inds_w = (np.arange(desc_side) * SIFT_STEP +
(SIFT_STEP / 2)).reshape((1, desc_side)).repeat(desc_side,
axis=0)
central_inds_h, central_inds_w = central_inds_h.reshape(-1), \
central_inds_w.reshape(-1)
for b_i, batch in enumerate(dataloader):
if len(batch) == 3: # test dataloader
store_labels = True
if (labels_all is None):
labels_all = np.zeros((num_imgs_max, desc_side * desc_side),
dtype=np.int32)
imgs, labels, masks = batch
labels = labels.cpu().numpy().astype(np.int32)
else: # training dataloader
store_labels = False
imgs, _, _, masks = batch
# imgs currently channel first, [0-1] range, floats
imgs = (imgs * 255.).permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8)
masks = masks.cpu().numpy().astype(np.bool)
curr_batch_sz, h, w, c = imgs.shape
assert (h == archetype_config.input_sz and w == archetype_config.input_sz
and c == archetype_config.in_channels)
if b_i < num_batches - 1:
assert (batch_sz == curr_batch_sz)
start = b_i * batch_sz
for i in range(curr_batch_sz):
grey_img = cv2.cvtColor(imgs[i, :, :, :], cv2.COLOR_RGB2GRAY)
locs, descs = vlfeat.vl_dsift(grey_img, step=SIFT_STEP)
descs = descs.transpose((1, 0)) # 40*40, 128
descs = descs.reshape(-1, SIFT_DLEN) # rows change slowest
# get the corresponding box central mask/label
mask = masks[i][central_inds_h, central_inds_w]
offset = start + i
descs_all[offset, :, :] = descs
masks_all[offset, :] = mask
if store_labels:
label = labels[i][central_inds_h, central_inds_w]
labels_all[offset, :] = label
actual_num_imgs += curr_batch_sz
descs_all = descs_all[:actual_num_imgs, :, :]
masks_all = masks_all[:actual_num_imgs, :]
num_unmasked = masks_all.sum()
if store_labels:
labels_all = labels_all[:actual_num_imgs, :]
samples_labels = labels_all[masks_all].reshape(-1)
assert (samples_labels.shape[0] == num_unmasked)
samples = descs_all[masks_all, :].reshape(-1, SIFT_DLEN)
assert (samples.shape[0] == num_unmasked)
if not store_labels:
return samples
else:
return samples, samples_labels
def _get_vectorised_colour_samples(archetype_config, dataloader):
num_batches, batch_sz = len(dataloader), archetype_config.dataloader_batch_sz
num_imgs_max = num_batches * batch_sz # estimate
img_sz = archetype_config.input_sz
# cluster individual pixels
imgs_all = np.zeros(
(num_imgs_max, img_sz, img_sz, archetype_config.in_channels),
dtype=np.uint8)
masks_all = np.zeros((num_imgs_max, img_sz, img_sz), dtype=np.bool)
labels_all = None
actual_num_imgs = 0
for b_i, batch in enumerate(dataloader):
if len(batch) == 3:
store_labels = True
if (labels_all is None):
labels_all = np.zeros((num_imgs_max, img_sz, img_sz), dtype=np.int32)
imgs, labels, masks = batch
labels = labels.cpu().numpy().astype(np.int32)
else:
store_labels = False
imgs, _, _, masks = batch
# channels last
imgs = (imgs * 255.).permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8)
masks = masks.cpu().numpy().astype(np.bool)
curr_batch_sz, h, w, c = imgs.shape
assert (h == archetype_config.input_sz and w == archetype_config.input_sz
and c == archetype_config.in_channels)
if b_i < num_batches - 1:
assert (batch_sz == curr_batch_sz)
start = b_i * batch_sz
imgs_all[start:(start + curr_batch_sz), :, :, :] = imgs
masks_all[start:(start + curr_batch_sz), :, :] = masks
if store_labels:
labels_all[start:(start + curr_batch_sz), :, :] = labels
actual_num_imgs += curr_batch_sz
imgs_all = imgs_all[:actual_num_imgs, :, :, :]
masks_all = masks_all[:actual_num_imgs, :, :]
num_unmasked = masks_all.sum()
if store_labels:
labels_all = labels_all[:actual_num_imgs, :, :]
samples_labels = labels_all[masks_all].reshape(-1)
assert (samples_labels.shape[0] == num_unmasked)
samples = imgs_all[masks_all, :].reshape(-1, archetype_config.in_channels)
assert (samples.shape[0] == num_unmasked)
if not store_labels:
return samples
else:
return samples, samples_labels
def main():
# based on segmentation_multioutput_twohead - we pass in the config of the
# IID run we are comparing against, so the settings can be copied
parser = argparse.ArgumentParser()
parser.add_argument("--model_ind", type=int, required=True)
parser.add_argument("--out_root", type=str,
default="/scratch/shared/slow/xuji/iid_private")
parser.add_argument("--IID_model_ind", type=int, required=True)
parser.add_argument("--max_num_train", type=int, required=True)
parser.add_argument("--test_code", default=False, action="store_true")
parser.add_argument("--do_sift", default=False, action="store_true")
config = parser.parse_args()
config.out_dir = os.path.join(config.out_root, str(config.model_ind))
if not os.path.exists(config.out_dir):
os.makedirs(config.out_dir)
archetype_config_path = os.path.join(config.out_root,
str(config.IID_model_ind),
"config.pickle")
print("Loading archetype config from: %s" % archetype_config_path)
with open(archetype_config_path, "rb") as config_f:
archetype_config = pickle.load(config_f)
assert (config.IID_model_ind == archetype_config.model_ind)
assert (archetype_config.mode == "IID") # compare against fully unsup
sample_fn = _get_vectorised_colour_samples
if config.do_sift:
sample_fn = _get_vectorised_sift_samples
# set it to be only rgb (and ir if nec) but no sobel - we're clustering
# single pixel colours
archetype_config.include_rgb = True
archetype_config.no_sobel = True
if "Coco" in archetype_config.dataset:
assert (not archetype_config.using_IR)
archetype_config.in_channels = 3
elif archetype_config.dataset == "Potsdam": # IR
assert (archetype_config.using_IR)
archetype_config.in_channels = 4
# Data
# -------------------------------------------------------------------------
if "Coco" in archetype_config.dataset:
dataloaders_head_A, mapping_assignment_dataloader, \
mapping_test_dataloader = \
make_Coco_dataloaders(archetype_config)
elif archetype_config.dataset == "Potsdam":
dataloaders_head_A, mapping_assignment_dataloader, \
mapping_test_dataloader = \
make_Potsdam_dataloaders(archetype_config)
else:
raise NotImplementedError
# unlike in clustering script for STL - isn't any data from unknown classes
dataloaders_head_B = dataloaders_head_A
# networks and optimisers
# ------------------------------------------------------
assert (archetype_config.num_dataloaders == 1)
dataloader = dataloaders_head_B[0]
samples = sample_fn(archetype_config, dataloader)
print("got training samples")
sys.stdout.flush()
if config.test_code:
print("testing code, taking 10000 samples only")
samples = samples[:10000, :]
else:
num_samples_train = min(samples.shape[0], config.max_num_train)
print("taking %d samples" % num_samples_train)
chosen_inds = np.random.choice(samples.shape[0], size=num_samples_train,
replace=False)
samples = samples[chosen_inds, :]
print(samples.shape)
sys.stdout.flush()
kmeans = MiniBatchKMeans(n_clusters=archetype_config.gt_k, verbose=1).fit(
samples)
print("trained kmeans")
sys.stdout.flush()
# use mapping assign to assign output_k=gt_k to gt_k
# and also assess on its predictions, since it's identical to
# mapping_test_dataloader
assign_samples, assign_labels = sample_fn(archetype_config,
mapping_assignment_dataloader)
num_samples = assign_samples.shape[0]
assign_preds = kmeans.predict(assign_samples)
print("finished prediction for mapping assign/test data")
sys.stdout.flush()
assign_preds = torch.from_numpy(assign_preds).cuda()
assign_labels = torch.from_numpy(assign_labels).cuda()
if archetype_config.eval_mode == "hung":
match = _hungarian_match(assign_preds, assign_labels,
preds_k=archetype_config.gt_k,
targets_k=archetype_config.gt_k)
elif archetype_config.eval_mode == "orig": # flat!
match = _original_match(assign_preds, assign_labels,
preds_k=archetype_config.gt_k,
targets_k=archetype_config.gt_k)
elif archetype_config.eval_mode == "orig_soft":
assert (False) # not used
# reorder predictions to be same cluster assignments as gt_k
found = torch.zeros(archetype_config.gt_k)
reordered_preds = torch.zeros(num_samples).to(torch.int32).cuda()
for pred_i, target_i in match:
reordered_preds[assign_preds == pred_i] = target_i
found[pred_i] = 1
assert (found.sum() == archetype_config.gt_k) # each output_k must get mapped
acc = _acc(reordered_preds, assign_labels, archetype_config.gt_k)
print("got acc %f" % acc)
config.epoch_acc = [acc]
config.centroids = kmeans.cluster_centers_
config.match = match
# write results and centroids to model_ind output file
with open(os.path.join(config.out_dir, "config.pickle"), "w") as outfile:
pickle.dump(config, outfile)
with open(os.path.join(config.out_dir, "config.txt"), "w") as text_file:
text_file.write("%s" % config)
if __name__ == "__main__":
main()
| src/scripts/segmentation/baselines/kmeans_and_sift.py | 11,132 | calls constructor returns num unmasked pixels x SIFT_DLEN, in uint8 format operates on greyscale 128 bit images estimate cluster individual (box central) pixels when descriptor matrix flattened, goes along rows first (rows change slow) test dataloader training dataloader imgs currently channel first, [0-1] range, floats 40*40, 128 rows change slowest get the corresponding box central mask/label estimate cluster individual pixels channels last based on segmentation_multioutput_twohead - we pass in the config of the IID run we are comparing against, so the settings can be copied compare against fully unsup set it to be only rgb (and ir if nec) but no sobel - we're clustering single pixel colours IR Data ------------------------------------------------------------------------- unlike in clustering script for STL - isn't any data from unknown classes networks and optimisers ------------------------------------------------------ use mapping assign to assign output_k=gt_k to gt_k and also assess on its predictions, since it's identical to mapping_test_dataloader flat! not used reorder predictions to be same cluster assignments as gt_k each output_k must get mapped write results and centroids to model_ind output file | 1,229 | en | 0.7232 |
from django.db import models
from django.template.defaultfilters import truncatechars
from django.utils import timezone
from camper.sked.models import Event, Session
from camper.twit.threads import SendTweetThread
class TweetTooLongError(Exception):
def __init__(self, msg=None):
self.msg = msg
if not self.msg:
self.msg = 'Adding this session would result in a tweet longer than 140 characters.'
class AlreadyAssignedError(Exception):
def __init__(self, msg=None):
self.msg = msg
if not self.msg:
self.msg = 'This session already belongs to a tweet in this sequence.'
class Tweet(models.Model):
sent_at = models.DateTimeField(blank=True, null=True)
class Meta:
abstract = True
def send(self):
# ''' This is weird. It can only be called from the first tweet in
# a series, raising NotImplementedError if called on a non-initial tweet.
# It spins off a thread to make the actual api calls, which
# manages state within the series.
# '''
if self.previous:
raise NotImplementedError('Serial tweets can only be sent from the beginning.')
SendTweetThread(self).start()
@property
def is_sent(self):
return self.sent_at is not None
class SessionBlockTweetManager(models.Manager):
def unsent(qs):
return qs.filter(sent_at=None, previous=None)
class SessionBlockTweet(Tweet):
timeslot = models.DateTimeField()
event = models.ForeignKey(Event, related_name="session_tweets")
session_ids = models.CommaSeparatedIntegerField(max_length=128,
blank=True, default="")
previous = models.OneToOneField('SessionBlockTweet', blank=True,
null=True, unique=True, related_name="next")
objects = SessionBlockTweetManager()
class Meta:
ordering = ('-timeslot', 'id')
def __unicode__(self):
try:
return 'Tweet %s of %s for %s at %s' % (
self.index + 1, self.total, self.timeslot, self.event)
except:
return 'Tweet for %s at %s' % (self.timeslot, self.event)
def touch(self):
self._seq = None
self._sessions = None
def get_sequence(self):
try:
if self._seq is not None:
return self._seq
except AttributeError:
pass
seq = []
cursor = self
while cursor.previous:
cursor = cursor.previous
seq.append(cursor)
while True:
try:
cursor = cursor.next
seq.append(cursor)
except SessionBlockTweet.DoesNotExist:
break
self._seq = seq
return self.get_sequence()
def first_in_sequence(self):
seq = self.get_sequence()
return seq[0]
def get_session_ids(self):
try:
return [int(id) for id in self.session_ids.split(',')]
except:
return []
def add_session(self, session):
if self.length < 140:
assigned = [id for tweet in self.get_sequence() for id in tweet.get_session_ids()]
if session.id in assigned:
raise AlreadyAssignedError()
locally_assigned = self.get_session_ids()
locally_assigned.append(session.id)
self.session_ids = ','.join([str(id) for id in locally_assigned])
self.touch()
if self.length > 140:
if self.sessions.count() > 1:
self.remove_session(session)
raise TweetTooLongError()
else:
raise TweetTooLongError()
def remove_session(self, session):
self.session_ids = ','.join([str(id) for
id in self.get_session_ids() if
id != session.id])
self.touch()
@property
def sessions(self):
try:
if self._sessions is not None:
return self._sessions
except AttributeError:
pass
try:
self._sessions = Session.objects.filter(id__in=self.get_session_ids())
except ValueError:
self._sessions = Session.objects.none()
return self.sessions
@property
def index(self):
seq = self.get_sequence()
return seq.index(self)
@property
def is_first(self):
return self.previous is None
@property
def is_last(self):
try:
return self.next is None
except SessionBlockTweet.DoesNotExist:
return True
@property
def total(self):
seq = self.get_sequence()
return len(seq)
@property
def text(self):
txt = u''
if self.is_first:
txt += u'Coming up at %s: ' % (self.timeslot
.astimezone(timezone.get_current_timezone())
.strftime('%-I:%M'))
txt += u', '.join(['%s (%s)' % (truncatechars(s.title, 120) if
self.sessions.count() is 1 else
s.title, s.location.name) for
s in self.sessions])
return txt
@property
def length(self):
return len(self.text)
| camper/twit/models.py | 5,444 | ''' This is weird. It can only be called from the first tweet in a series, raising NotImplementedError if called on a non-initial tweet. It spins off a thread to make the actual api calls, which manages state within the series. ''' | 247 | en | 0.907311 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import re
import logging
from itertools import groupby
from six.moves.cPickle import dumps, loads
from collections import defaultdict
from six.moves.urllib.parse import unquote
import bson
import pymongo
from tg import tmpl_context as c
from ming import collection, Field, Index
from ming import schema as S
from ming.utils import LazyProperty
from ming.orm import session, mapper
from ming.orm import ForeignIdProperty, RelationProperty
from allura.lib import helpers as h
from .session import main_doc_session, main_orm_session
from .project import Project
import six
log = logging.getLogger(__name__)
# Collection definitions
ArtifactReferenceDoc = collection(
str('artifact_reference'), main_doc_session,
Field('_id', str),
Field('artifact_reference', dict(
cls=S.Binary(),
project_id=S.ObjectId(),
app_config_id=S.ObjectId(),
artifact_id=S.Anything(if_missing=None))),
Field('references', [str], index=True),
Index('artifact_reference.project_id'), # used in ReindexCommand
)
ShortlinkDoc = collection(
str('shortlink'), main_doc_session,
Field('_id', S.ObjectId()),
# index needed for from_artifact() and index_tasks.py:del_artifacts
Field('ref_id', str, index=True),
Field('project_id', S.ObjectId()),
Field('app_config_id', S.ObjectId()),
Field('link', str),
Field('url', str),
# used by from_links() More helpful to have project_id first, for other
# queries
Index('project_id', 'link'),
)
# Class definitions
class ArtifactReference(object):
@classmethod
def from_artifact(cls, artifact):
'''Upsert logic to generate an ArtifactReference object from an artifact'''
obj = cls.query.get(_id=artifact.index_id())
if obj is not None:
return obj
try:
obj = cls(
_id=artifact.index_id(),
artifact_reference=dict(
cls=bson.Binary(dumps(artifact.__class__, protocol=2)),
project_id=artifact.app_config.project_id,
app_config_id=artifact.app_config._id,
artifact_id=artifact._id))
session(obj).flush(obj)
return obj
except pymongo.errors.DuplicateKeyError: # pragma no cover
session(obj).expunge(obj)
return cls.query.get(_id=artifact.index_id())
@LazyProperty
def artifact(self):
'''Look up the artifact referenced'''
aref = self.artifact_reference
try:
cls = loads(six.binary_type(aref.cls))
with h.push_context(aref.project_id):
return cls.query.get(_id=aref.artifact_id)
except Exception:
log.exception('Error loading artifact for %s: %r',
self._id, aref)
class Shortlink(object):
'''Collection mapping shorthand_ids for artifacts to ArtifactReferences'''
# Regexes used to find shortlinks
_core_re = r'''(\[
(?:(?P<project_id>.*?):)? # optional project ID
(?:(?P<app_id>.*?):)? # optional tool ID
(?P<artifact_id>.*) # artifact ID
\])'''
re_link_1 = re.compile(r'\s' + _core_re, re.VERBOSE)
re_link_2 = re.compile(r'^' + _core_re, re.VERBOSE)
def __repr__(self):
return '<Shortlink %s %s %s -> %s>' % (
self.project_id,
self.app_config_id,
self.link,
self.ref_id)
@classmethod
def lookup(cls, link):
return cls.from_links(link)[link]
@classmethod
def from_artifact(cls, a):
result = cls.query.get(ref_id=a.index_id())
if result is None:
try:
result = cls(
ref_id=a.index_id(),
project_id=a.app_config.project_id,
app_config_id=a.app_config._id)
session(result).flush(result)
except pymongo.errors.DuplicateKeyError: # pragma no cover
session(result).expunge(result)
result = cls.query.get(ref_id=a.index_id())
result.link = a.shorthand_id()
result.url = a.url()
if result.link is None:
result.delete()
return None
return result
@classmethod
def from_links(cls, *links):
'''Convert a sequence of shortlinks to the matching Shortlink objects'''
if len(links):
result = {}
# Parse all the links
parsed_links = dict((link, cls._parse_link(link))
for link in links)
links_by_artifact = defaultdict(list)
project_ids = set()
for link, d in list(parsed_links.items()):
if d:
project_ids.add(d['project_id'])
links_by_artifact[unquote(d['artifact'])].append(d)
else:
result[link] = parsed_links.pop(link)
q = cls.query.find(
dict(
link={'$in': list(links_by_artifact.keys())},
project_id={'$in': list(project_ids)}
),
validate=False,
sort=[('_id', pymongo.DESCENDING)], # if happen to be multiple (ticket move?) have newest first
)
matches_by_artifact = dict(
(link, list(matches))
for link, matches in groupby(q, key=lambda s: unquote(s.link)))
for link, d in six.iteritems(parsed_links):
matches = matches_by_artifact.get(unquote(d['artifact']), [])
matches = (
m for m in matches
if m.project.shortname == d['project'] and
m.project.neighborhood_id == d['nbhd'] and
m.app_config is not None and
m.project.app_instance(m.app_config.options.mount_point))
if d['app']:
matches = (
m for m in matches
if m.app_config.options.mount_point == d['app'])
result[link] = cls._get_correct_match(link, list(matches))
return result
else:
return {}
@classmethod
def _get_correct_match(cls, link, matches):
result = None
if len(matches) == 1:
result = matches[0]
elif len(matches) > 1 and getattr(c, 'app', None):
# use current app's link
for m in matches:
if m.app_config_id == c.app.config._id:
result = m
break
if not result:
cls.log_ambiguous_link('Can not remove ambiguity for link %s with c.app %s', matches, link, c.app)
result = matches[0]
elif len(matches) > 1 and not getattr(c, 'app', None):
cls.log_ambiguous_link('Ambiguous link to %s and c.app is not present to remove ambiguity', matches, link)
result = matches[0]
return result
@classmethod
def log_ambiguous_link(cls, msg, matches, *args):
log.warn(msg, *args)
for m in matches:
log.warn('... %r', m)
@classmethod
def _parse_link(cls, s):
'''Parse a shortlink into its nbhd/project/app/artifact parts'''
s = s.strip()
if s.startswith('['):
s = s[1:]
if s.endswith(']'):
s = s[:-1]
parts = s.split(':')
p_shortname = None
p_id = None
p_nbhd = None
if getattr(c, 'project', None):
p_shortname = getattr(c.project, 'shortname', None)
p_id = getattr(c.project, '_id', None)
p_nbhd = c.project.neighborhood_id
if len(parts) == 3:
p = Project.query.get(shortname=parts[0], neighborhood_id=p_nbhd)
if p:
p_id = p._id
return dict(
nbhd=p_nbhd,
project=parts[0],
project_id=p_id,
app=parts[1],
artifact=parts[2])
elif len(parts) == 2:
return dict(
nbhd=p_nbhd,
project=p_shortname,
project_id=p_id,
app=parts[0],
artifact=parts[1])
elif len(parts) == 1:
return dict(
nbhd=p_nbhd,
project=p_shortname,
project_id=p_id,
app=None,
artifact=parts[0])
else:
return None
# Mapper definitions
mapper(ArtifactReference, ArtifactReferenceDoc, main_orm_session)
mapper(Shortlink, ShortlinkDoc, main_orm_session, properties=dict(
ref_id=ForeignIdProperty(ArtifactReference),
project_id=ForeignIdProperty('Project'),
app_config_id=ForeignIdProperty('AppConfig'),
project=RelationProperty('Project'),
app_config=RelationProperty('AppConfig'),
ref=RelationProperty(ArtifactReference)))
| Allura/allura/model/index.py | 9,962 | Collection mapping shorthand_ids for artifacts to ArtifactReferences
Parse a shortlink into its nbhd/project/app/artifact parts
Look up the artifact referenced
Upsert logic to generate an ArtifactReference object from an artifact
Convert a sequence of shortlinks to the matching Shortlink objects
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Collection definitions used in ReindexCommand index needed for from_artifact() and index_tasks.py:del_artifacts used by from_links() More helpful to have project_id first, for other queries Class definitions pragma no cover Regexes used to find shortlinks pragma no cover Parse all the links if happen to be multiple (ticket move?) have newest first use current app's link Mapper definitions | 1,528 | en | 0.857202 |
import os, sys
sys.path.insert(0, os.path.join("..",".."))
from nodebox.graphics.context import *
from nodebox.graphics import *
from nodebox.graphics.geometry import coordinates
from nodebox.graphics.shader import dropshadow, OffscreenBuffer, transparent, stretch
from time import time
flower = Image("cell.png")
shadow = dropshadow(flower, alpha=1.0) # = image(blur(flower), color=(0,0,0,1))
# Each "flower" is drawn with a shadow underneath to add some depth.
# The global shadow layer is at the bottom of the plant.
# Ideally, each growing root would have its own shadow,
# but it is faster this way using only one offscreen buffer for all global shadows
# and a pre-rendered shadow image for each individual flower.
class Root:
def __init__(self, x, y, angle=90, radius=20, step=60, time=1.0, color=Color(0)):
self.x = x
self.y = y
self.angle = angle
self.radius = radius # Segment length.
self.step = step # Maximum left or right rotation from current angle.
self.time = time
self.color = color
def copy(self):
return Root(
self.x,
self.y,
self.angle,
self.radius,
self.step,
self.time,
self.color.copy())
def update(self):
# The performance trick is that we don't keep a history,
# e.g. no list with all the previous segments in the growing root.
# We simply keep the position and heading of the last segment.
# The previous segments have been rendered in a texture, i.e. they are "frozen".
self.x, self.y = coordinates(self.x, self.y, self.radius, self.angle)
self.angle += random(-self.step, self.step)
self.time *= 0.8 + random(0.2)
def draw(self):
push()
translate(self.x, self.y)
strokewidth(2)
stroke(
self.color.r,
self.color.g,
self.color.b,
self.color.a * self.time) # More transparent over time.
ellipse(0, 0,
width = 0.2+ 0.5 * self.time * self.radius,
height = 0.2+ 0.5 * self.time * self.radius) # Smaller over time.
rotate(self.angle)
line(0, 0, self.radius, 0)
scale(0.2 + self.time)
image(shadow, -15, -15, width=20, height=20, alpha=0.5)
image(flower, -10, -10, width=20, height=20, alpha=0.5,
color=(canvas.mouse.relative_x*0.5+0.5, 1, self.time+0.5, 1))
pop()
CLR = Color(0.27,0.29,0.36)
CLR = lighter(CLR, 0.3)
plant = [Root(200, -50, color=CLR) for i in range(10)]
def grow(plant=[], branch=0.01):
""" Updates each root in the given list to a new position.
Roots can branch and will disappear over time.
Returns the updated list.
"""
new = []
for root in plant:
root.update()
if root.time > 0.05:
new.append(root)
elif len(plant) < 50:
# Replace the disappeared root with a new one.
# Vary the time (=lifespan) so new roots appear at irregular intervals.
x, y, angle = choice((
(200 + random(50), -50, 90+random(-10,10)),
#(-50, random(50), 0)
))
new.append(Root(x, y, angle=angle, color=CLR, time=random(0.5, 3.5, bias=0.3)))
if random() < branch:
new.append(root.copy())
return new
# Roots are drawn into an offscreen buffer instead of directly to the screen.
# This way we get an image with a transparent background, which we can use
# to generate a dropshadow on-the-fly.
# The bigger the size of the buffer, the more pixels and the slower it gets.
# We work at a lower resolution and then scale the buffer up to the size of the screen.
RESOLUTION = 0.5
buffer = OffscreenBuffer(
RESOLUTION * canvas.screen.width,
RESOLUTION * canvas.screen.height)
def draw(canvas):
# It takes some juggling with the contrast of the colors to avoid artefacts.
colorplane(0, 0, canvas.width, canvas.height,
lighter(color(0.14, 0.13, 0.18)),
color(0.07, 0.06, 0.14),
color(0.14, 0.20, 0.18),
color(0.07, 0.06, 0.14))
global plant
plant = grow(plant)
# Draw each root in the offscreen texture.
# The texture already contains whatever was drawn in it previous frame.
buffer.push()
for root in plant:
root.draw()
root.step = canvas.mouse.relative_x * 60
root.radius = canvas.mouse.relative_y * 30
buffer.pop()
# Every few frames, make the buffered image more transparent,
# so that old content fades away.
if canvas.frame % 2 == 0 and not canvas.mouse.pressed:
buffer.texture = transparent(buffer.texture, 0.9).texture
# Scale up the buffered image to the screen size.
# Draw the image with a dropshadow effect.
# Since the offscreen buffer is scaled, the edges will look rough.
# Apply a small blur effect to smoothen them.
img = buffer.texture
#img = mirror(img, vertical=True, dx=0.35, dy=0) # Interesting patterns.
image(dropshadow(img, alpha=1.0, amount=1), 0, -50,
width = canvas.width,
height = canvas.height+50)
# Hypnotizing breathing effect:
img = stretch(img, 0.2, 0.1, radius=0.75, zoom=0.4-cos(canvas.frame*0.01)*0.4)
image(img, 0, 0,
width = canvas.width,
height = canvas.height,
)#filter = blurred(scale=0.75))
canvas.fps = 20
canvas.size = 800, 600
canvas.fullscreen = True
canvas.run(draw) | examples/07-filter/09-buffer.py | 5,633 | Updates each root in the given list to a new position.
Roots can branch and will disappear over time.
Returns the updated list.
= image(blur(flower), color=(0,0,0,1)) Each "flower" is drawn with a shadow underneath to add some depth. The global shadow layer is at the bottom of the plant. Ideally, each growing root would have its own shadow, but it is faster this way using only one offscreen buffer for all global shadows and a pre-rendered shadow image for each individual flower. Segment length. Maximum left or right rotation from current angle. The performance trick is that we don't keep a history, e.g. no list with all the previous segments in the growing root. We simply keep the position and heading of the last segment. The previous segments have been rendered in a texture, i.e. they are "frozen". More transparent over time. Smaller over time. Replace the disappeared root with a new one. Vary the time (=lifespan) so new roots appear at irregular intervals.(-50, random(50), 0) Roots are drawn into an offscreen buffer instead of directly to the screen. This way we get an image with a transparent background, which we can use to generate a dropshadow on-the-fly. The bigger the size of the buffer, the more pixels and the slower it gets. We work at a lower resolution and then scale the buffer up to the size of the screen. It takes some juggling with the contrast of the colors to avoid artefacts. Draw each root in the offscreen texture. The texture already contains whatever was drawn in it previous frame. Every few frames, make the buffered image more transparent, so that old content fades away. Scale up the buffered image to the screen size. Draw the image with a dropshadow effect. Since the offscreen buffer is scaled, the edges will look rough. Apply a small blur effect to smoothen them. img = mirror(img, vertical=True, dx=0.35, dy=0) Interesting patterns. Hypnotizing breathing effect:filter = blurred(scale=0.75)) | 1,952 | en | 0.891721 |
import tkinter as tk
from PIL import Image, ImageTk
# The Custom Variable Widgets
class MyBar(tk.Canvas) :
def __init__(self, master:object, shape:object, value=0, maximum=100,
bg="#231303", trough_color='#8a7852', bar_color='#f7f4bf'):
"""Creating the alpha mask and creating a custom widget of the given shape and dimensions."""
# open shape mask with PIL
im_shape_alpha = Image.open(shape).convert('L')
# create bar shape image with the choosen backgroound color
im_shape = Image.new('RGBA', im_shape_alpha.size, bg)
# apply shape as alpha mask to "cut out" the bar shape
im_shape.putalpha(im_shape_alpha)
width, height = im_shape_alpha.size
# create the canvas
tk.Canvas.__init__(self, master, bg=trough_color, width=width, height=height, highlightthickness=0)
self._value = value # bar value
self.maximum = maximum # maximum value
# bar width and height
self.height = height
self.width = width
# create tkinter image for the shape from the PIL Image
self.img_trough = ImageTk.PhotoImage(im_shape, master=self)
# create bar to display the value
self.create_rectangle(0, height, width, height * (1 - value/self.maximum), width=0, fill=bar_color, tags='pbar')
# display shape on top
self.create_image(0, 0, anchor='nw', image=self.img_trough)
@property
def value(self):
"""Return bar's value."""
return self._value
@value.setter
def value(self, value:int):
"""Set bar's value."""
self._value = value
# adjust bar height to value
self.coords('pbar', 0, self.height, self.width, self.height*(1 - value/self.maximum)) | Scripts/mybar.py | 1,778 | Creating the alpha mask and creating a custom widget of the given shape and dimensions.
Return bar's value.
Set bar's value.
The Custom Variable Widgets open shape mask with PIL create bar shape image with the choosen backgroound color apply shape as alpha mask to "cut out" the bar shape create the canvas bar value maximum value bar width and height create tkinter image for the shape from the PIL Image create bar to display the value display shape on top adjust bar height to value | 487 | en | 0.682302 |
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
'''btDown.py - Download resource for HTTP/HTTPS/FTP/Thunder/Magnet/BT
Usage: python3 btDown.py <url> [path]
Required:
url HTTP/HTTPS/FTP/Thunder/MagNet/BT downloading URL
Optionals:
path The store path for the downloaded file
Notice: Python3 required for btDown.py
Author: zanran (wayne@zanran.me)
CreatedAt: Mon Oct 8 21:27:28 CST 2018
'''
import os
import sys
import requests
import time
import re
import enum
import base64
from urllib import parse, request
def usage(err=None):
if err:
print(err)
print(__doc__)
sys.exit(0)
@enum.unique
class PROTROL_TYEP(enum.Enum):
UNKNOWN = 0
HTTP = 1 # HTTP/HTTPS下载
FTP = 2 # FTP下载
BT = 3 # BT下载
MAGNET = 4 # 磁力下载
THUNDER = 5 # 讯雷下载
class BtDown(object):
def __init__(self, url, path = None):
self.origin_url = url
self.dest_path = path
def detectProtrolType(self, url):
bt_type = PROTROL_TYEP.UNKNOWN
if (re.match('^ftp://', url, re.IGNORECASE)):
bt_type = PROTROL_TYEP.FTP
elif (re.match('^thunder://', url, re.IGNORECASE)):
bt_type = PROTROL_TYEP.THUNDER
elif (re.match('^magnet:?', url, re.IGNORECASE)):
bt_type = PROTROL_TYEP.MAGNET
elif (re.search(r'\.torrent$', url, re.IGNORECASE)):
bt_type = PROTROL_TYEP.BT
# http/https detect must be after torrent
elif (re.match('^https?://', url, re.IGNORECASE)):
bt_type = PROTROL_TYEP.HTTP
return bt_type
def _parserThunderUrl(self, url):
thunder_url = re.sub('^thunder://', '', url, re.IGNORECASE)
normalize_url = base64.b64decode(thunder_url).decode()
normalize_url = re.sub('^AA', '', normalize_url)
normalize_url = re.sub('ZZ$', '', normalize_url)
return normalize_url
def _parserMagnetUrl(self, url):
return ''
def parseUrlProtrol(self, url):
normalize_url = url
bt_type = self.detectProtrolType(url)
if bt_type in [PROTROL_TYEP.THUNDER]:
normalize_url = self._parserThunderUrl(url)
elif bt_type in [PROTROL_TYEP.MAGNET]:
normalize_url = self._parserMagnetUrl(url)
elif bt_type in [PROTROL_TYEP.BT]:
raise Exception('BT (torrent) is unsupported by now !')
return normalize_url
def getTitle(self, url):
title = 'unnamed_file'
bt_type = self.detectProtrolType(url)
if bt_type in [PROTROL_TYEP.HTTP, PROTROL_TYEP.FTP]:
last_slash = url.rfind('/')
if last_slash != -1:
title = url[last_slash + 1:].strip()
if title.count('%') > 1:
title = parse.unquote(title)
return title
def _showDownloadProgress(self, file, percent):
base_file = os.path.basename(file)
if(percent > 100):
percent = 100
message = '\r Downloading %s ...... %2.f%%' % (base_file, percent)
print(message, end='')
return
def _download_http(self, url, dest_file):
res = requests.get(url, stream=True)
max_file_bytes = int(res.headers['Content-Length'])
chunk_size = 1024*1024*4
downloaded_size = 0
f = open(dest_file, 'wb')
for data in res.iter_content(chunk_size):
downloaded_size += len(data)
percent = downloaded_size / max_file_bytes * 100
self._showDownloadProgress(dest_file, percent)
f.write(data)
f.close()
def _download_ftp(self, url, dest_file):
def _report(blocknum, blocksize, totalsize):
if not totalsize:
return
percent = 100.0 * blocknum * blocksize / totalsize
self._showDownloadProgress(dest_file, percent)
url = parse.quote(url, safe=':/@')
request.urlretrieve(url, dest_file, _report)
def download(self):
print('Start downloading %s' % self.origin_url)
normalize_url = self.parseUrlProtrol(self.origin_url)
print('Parse real url %s' % normalize_url)
title = self.getTitle(normalize_url)
dest_file = title
if self.dest_path:
if not os.path.exists(self.dest_path):
os.makedirs(self.dest_path)
dest_file = os.path.join(self.dest_path, title)
if os.path.exists(dest_file):
os.remove(dest_file)
bt_type = self.detectProtrolType(normalize_url)
if bt_type in [PROTROL_TYEP.HTTP]:
self._download_http(normalize_url, dest_file)
elif bt_type in [PROTROL_TYEP.FTP]:
self._download_ftp(normalize_url, dest_file)
else:
raise Exception('Unknown protrol type detected !')
print('\nSaved file: %s' % dest_file)
return
def main():
if len(sys.argv) not in [2, 3]:
usage()
url = sys.argv[1]
path = None
if len(sys.argv) > 2:
path = sys.argv[2]
bt = BtDown(url, path)
bt.download()
print('------------------ Well done ------------------')
if __name__ == '__main__':
main()
| btDown.py | 5,205 | btDown.py - Download resource for HTTP/HTTPS/FTP/Thunder/Magnet/BT
Usage: python3 btDown.py <url> [path]
Required:
url HTTP/HTTPS/FTP/Thunder/MagNet/BT downloading URL
Optionals:
path The store path for the downloaded file
Notice: Python3 required for btDown.py
Author: zanran (wayne@zanran.me)
CreatedAt: Mon Oct 8 21:27:28 CST 2018
!/usr/bin/env python3-*- coding: utf-8 -*- HTTP/HTTPS下载 FTP下载 BT下载 磁力下载 讯雷下载 http/https detect must be after torrent | 480 | en | 0.691049 |
"""Commands for starting daemons."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import pprint
import confpy.api
import confpy.core.option
from .. import messages
cfg = confpy.api.Configuration(
transport=confpy.api.Namespace(
description='Message transport options.',
source=confpy.core.option.Option(
description='The transport to fetch new requests from.',
required=True,
),
error=confpy.core.option.Option(
description='The transport to which errors are written.',
required=True,
),
result=confpy.core.option.Option(
description='The transport to which results are written.',
required=True,
),
),
daemon=confpy.api.Namespace(
description='Long running daemon options.',
profiler=confpy.core.option.Option(
description='The profiler implementation to use.',
required=True,
),
process=confpy.core.option.Option(
description='The daemon interface implemention to use.',
required=True,
),
pidfile=confpy.api.StringOption(
description='The location to use as a pidfile.',
required=True,
),
),
)
def _common_args():
"""ArgumentParser setup for all CLI commands."""
parser = argparse.ArgumentParser(
description='Start a new profiler process.'
)
parser.add_argument(
'--config',
required=True,
help='The Python configuration file for the process.',
)
return parser
def profiler_main():
"""Manage a profiler daemon."""
parser = _common_args()
parser.add_argument(
'--action',
required=True,
choices=('start', 'stop', 'restart'),
)
args, _ = parser.parse_known_args()
cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF')
proc = cfg.daemon.process(
source_transport=cfg.transport.source,
error_transport=cfg.transport.error,
results_transport=cfg.transport.result,
profiler=cfg.daemon.profiler,
pidfile=cfg.daemon.pidfile,
)
if args.action == 'stop':
proc.stop()
if args.action == 'start':
proc.start()
if args.action == 'restart':
proc.restart()
def send_request():
"""Send a profile request to the daemon."""
parser = _common_args()
parser.add_argument(
'--identifier',
required=True,
help='The unique message identifier.',
)
parser.add_argument(
'--setup',
default='pass',
help='Any setup code if needed for the profile.',
)
parser.add_argument(
'--code',
required=True,
help='The code to profile.',
)
args, _ = parser.parse_known_args()
cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF')
cfg.transport.source().send(
messages.ProfileRequest(
identifier=args.identifier,
setup=args.setup,
code=args.code,
),
)
def fetch_result():
"""Fetch a result from the transport."""
parser = _common_args()
args, _ = parser.parse_known_args()
cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF')
transport = cfg.transport.result()
msg = transport.fetch()
if msg is not None:
transport.complete(msg)
pprint.pprint(msg.json)
def fetch_error():
"""Fetch an error from the transport."""
parser = _common_args()
args, _ = parser.parse_known_args()
cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF')
transport = cfg.transport.error()
msg = transport.fetch()
if msg is not None:
transport.complete(msg)
pprint.pprint(msg.json)
| pyperf/cmd/daemons.py | 3,964 | ArgumentParser setup for all CLI commands.
Fetch an error from the transport.
Fetch a result from the transport.
Manage a profiler daemon.
Send a profile request to the daemon.
Commands for starting daemons. | 207 | en | 0.652331 |
from kat.harness import Query, EDGE_STACK
from abstract_tests import AmbassadorTest, HTTP
from abstract_tests import ServiceType
from selfsigned import TLSCerts
from kat.utils import namespace_manifest
#####
# XXX This file is annoying.
#
# RedirectTestsWithProxyProto and RedirectTestsInvalidSecret used to be subclasses of RedirectTests,
# which makes a certain amount of sense. Problem is that when I wanted to modify just RedirectTests
# to have secrets defined, that ended up affecting the two subclasses in bad ways. There's basically
# no way to subclass an AmbassadorTest without having your base class be run separately, which isn't
# what I wanted here. Sigh.
class RedirectTests(AmbassadorTest):
target: ServiceType
edge_stack_cleartext_host = False
def init(self):
if EDGE_STACK:
self.xfail = "Not yet supported in Edge Stack"
self.xfail = "FIXME: IHA"
self.target = HTTP()
def requirements(self):
# only check https urls since test readiness will only end up barfing on redirect
yield from (r for r in super().requirements() if r[0] == "url" and r[1].url.startswith("https"))
def manifests(self):
return namespace_manifest("redirect-namespace") + f"""
---
apiVersion: v1
kind: Secret
metadata:
name: redirect-cert
namespace: redirect-namespace
type: kubernetes.io/tls
data:
tls.crt: {TLSCerts["localhost"].k8s_crt}
tls.key: {TLSCerts["localhost"].k8s_key}
---
apiVersion: v1
kind: Secret
metadata:
name: redirect-cert
type: kubernetes.io/tls
data:
tls.crt: {TLSCerts["localhost"].k8s_crt}
tls.key: {TLSCerts["localhost"].k8s_key}
""" + super().manifests()
def config(self):
# Use self here, not self.target, because we want the TLS module to
# be annotated on the Ambassador itself.
yield self, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Module
name: tls
ambassador_id: [{self.ambassador_id}]
config:
server:
enabled: True
secret: redirect-cert
redirect_cleartext_from: 8080
""")
yield self.target, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
name: tls_target_mapping
hostname: "*"
prefix: /tls-target/
service: {self.target.path.fqdn}
""")
def queries(self):
# [0]
yield Query(self.url("tls-target/", scheme="http"), expected=301)
# [1] -- PHASE 2
yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors",
scheme="https"),
insecure=True,
phase=2)
def check(self):
# For query 0, check the redirection target.
assert len(self.results[0].headers['Location']) > 0
assert self.results[0].headers['Location'][0].find('/tls-target/') > 0
# For query 1, we require no errors.
# XXX Ew. If self.results[1].json is empty, the harness won't convert it to a response.
errors = self.results[1].json
assert(len(errors) == 0)
class RedirectTestsWithProxyProto(AmbassadorTest):
target: ServiceType
def init(self):
self.xfail = "FIXME: IHA"
self.target = HTTP()
def requirements(self):
# only check https urls since test readiness will only end up barfing on redirect
yield from (r for r in super().requirements() if r[0] == "url" and r[1].url.startswith("https"))
def config(self):
yield self, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Module
name: ambassador
config:
use_proxy_proto: true
enable_ipv6: true
""")
yield self.target, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
name: tls_target_mapping
hostname: "*"
prefix: /tls-target/
service: {self.target.path.fqdn}
""")
def queries(self):
# TODO (concaf): FWIW, this query only covers one side of the story. This tests that this is the correct
# deviation from the normal behavior (301 response), but does not test a 301 when proxy proto is actually sent.
# This is because net/http does not yet support adding proxy proto to HTTP requests, and hence it's difficult
# to test with kat. We will need to open a raw TCP connection (e.g. telnet/nc) and send the entire HTTP Request
# in plaintext to test this behavior (or use curl with --haproxy-protocol).
yield Query(self.url("tls-target/"), error=[ "EOF", "connection reset by peer" ])
# We can't do the error check until we have the PROXY client mentioned above.
# # [1] -- PHASE 2
# yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors"), phase=2)
#
# def check(self):
# # We don't have to check anything about query 0, the "expected" clause is enough.
#
# # For query 1, we require no errors.
# # XXX Ew. If self.results[1].json is empty, the harness won't convert it to a response.
# errors = self.results[1].json
# assert(len(errors) == 0)
class RedirectTestsInvalidSecret(AmbassadorTest):
"""
This test tests that even if the specified secret is invalid, the rest of TLS Context should
go through. In this case, even though the secret does not exist, redirect_cleartext_from
should still take effect.
"""
target: ServiceType
def init(self):
if EDGE_STACK:
self.xfail = "Not yet supported in Edge Stack"
self.xfail = "FIXME: IHA"
self.target = HTTP()
def requirements(self):
# only check https urls since test readiness will only end up barfing on redirect
yield from (r for r in super().requirements() if r[0] == "url" and r[1].url.startswith("https"))
def config(self):
yield self, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Module
name: tls
ambassador_id: [{self.ambassador_id}]
config:
server:
enabled: True
secret: does-not-exist-secret
redirect_cleartext_from: 8080
""")
yield self.target, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
name: tls_target_mapping
hostname: "*"
prefix: /tls-target/
service: {self.target.path.fqdn}
""")
def queries(self):
# [0]
yield Query(self.url("tls-target/"), expected=301)
# There's kind of no way to do this. Looks like we need to speak HTTP to the port on which we
# think the server is listening for HTTPS? This is a bad config all the way around, really.
# # [1] -- PHASE 2
# yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors", scheme="https"), phase=2)
#
# def check(self):
# # We don't have to check anything about query 0, the "expected" clause is enough.
#
# # For query 1, we require no errors.
# # XXX Ew. If self.results[1].json is empty, the harness won't convert it to a response.
# errors = self.results[1].json
# assert(len(errors) == 0)
class XFPRedirect(AmbassadorTest):
parent: AmbassadorTest
target: ServiceType
edge_stack_cleartext_host = False
def init(self):
if EDGE_STACK:
self.xfail = "Not yet supported in Edge Stack"
self.target = HTTP()
self.add_default_http_listener = False
self.add_default_https_listener = False
def manifests(self):
return self.format('''
---
apiVersion: getambassador.io/v3alpha1
kind: Listener
metadata:
name: ambassador-listener-8080
spec:
ambassador_id: [{self.ambassador_id}]
port: 8080
protocol: HTTP
securityModel: XFP
l7Depth: 1
hostBinding:
namespace:
from: ALL
---
apiVersion: getambassador.io/v3alpha1
kind: Host
metadata:
name: weird-xfp-test-host
spec:
ambassador_id: [{self.ambassador_id}]
requestPolicy:
insecure:
action: Redirect
''') + super().manifests()
def config(self):
yield self.target, self.format("""
kind: Module
name: ambassador
config:
use_remote_address: false
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
name: {self.name}
hostname: "*"
prefix: /{self.name}/
service: {self.target.path.fqdn}
""")
def queries(self):
# [0]
yield Query(self.url(self.name + "/target/"), headers={ "X-Forwarded-Proto": "http" }, expected=301)
# [1]
yield Query(self.url(self.name + "/target/"), headers={ "X-Forwarded-Proto": "https" }, expected=200)
# [2] -- PHASE 2
yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors"), headers={ "X-Forwarded-Proto": "https" }, phase=2)
def check(self):
# For query 0, check the redirection target.
expected_location = ["https://" + self.path.fqdn + "/" + self.name + "/target/"]
actual_location = self.results[0].headers['Location']
assert actual_location == expected_location, "Expected redirect location to be {}, got {} instead".format(
expected_location,
actual_location
)
# For query 1, we don't have to check anything, the "expected" clause is enough.
# For query 2, we require no errors.
# XXX Ew. If self.results[2].json is empty, the harness won't convert it to a response.
errors = self.results[2].json
assert(len(errors) == 0)
def requirements(self):
# We're replacing super()'s requirements deliberately here: we need the XFP header or they can't work.
yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"X-Forwarded-Proto": "https"}))
yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"X-Forwarded-Proto": "https"}))
| python/tests/kat/t_redirect.py | 9,608 | This test tests that even if the specified secret is invalid, the rest of TLS Context should
go through. In this case, even though the secret does not exist, redirect_cleartext_from
should still take effect.
XXX This file is annoying. RedirectTestsWithProxyProto and RedirectTestsInvalidSecret used to be subclasses of RedirectTests, which makes a certain amount of sense. Problem is that when I wanted to modify just RedirectTests to have secrets defined, that ended up affecting the two subclasses in bad ways. There's basically no way to subclass an AmbassadorTest without having your base class be run separately, which isn't what I wanted here. Sigh. only check https urls since test readiness will only end up barfing on redirect Use self here, not self.target, because we want the TLS module to be annotated on the Ambassador itself. [0] [1] -- PHASE 2 For query 0, check the redirection target. For query 1, we require no errors. XXX Ew. If self.results[1].json is empty, the harness won't convert it to a response. only check https urls since test readiness will only end up barfing on redirect TODO (concaf): FWIW, this query only covers one side of the story. This tests that this is the correct deviation from the normal behavior (301 response), but does not test a 301 when proxy proto is actually sent. This is because net/http does not yet support adding proxy proto to HTTP requests, and hence it's difficult to test with kat. We will need to open a raw TCP connection (e.g. telnet/nc) and send the entire HTTP Request in plaintext to test this behavior (or use curl with --haproxy-protocol). We can't do the error check until we have the PROXY client mentioned above. [1] -- PHASE 2 yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors"), phase=2) def check(self): We don't have to check anything about query 0, the "expected" clause is enough. For query 1, we require no errors. XXX Ew. If self.results[1].json is empty, the harness won't convert it to a response. errors = self.results[1].json assert(len(errors) == 0) only check https urls since test readiness will only end up barfing on redirect [0] There's kind of no way to do this. Looks like we need to speak HTTP to the port on which we think the server is listening for HTTPS? This is a bad config all the way around, really. [1] -- PHASE 2 yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors", scheme="https"), phase=2) def check(self): We don't have to check anything about query 0, the "expected" clause is enough. For query 1, we require no errors. XXX Ew. If self.results[1].json is empty, the harness won't convert it to a response. errors = self.results[1].json assert(len(errors) == 0) [0] [1] [2] -- PHASE 2 For query 0, check the redirection target. For query 1, we don't have to check anything, the "expected" clause is enough. For query 2, we require no errors. XXX Ew. If self.results[2].json is empty, the harness won't convert it to a response. We're replacing super()'s requirements deliberately here: we need the XFP header or they can't work. | 3,135 | en | 0.878628 |
#!/usr/bin/env python3
# Copyright (C) 2019 - Virtual Open Systems SAS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author = Teodora Sechkova
# author_email = teodora@virtualopensystems.com
import bjointsp.api.placement as placement
# Start the placement server
def main():
placement.api.app.run(host='localhost', port=3800, debug=True)
if __name__ == '__main__':
main()
| src/bjointsp/main.py | 909 | !/usr/bin/env python3 Copyright (C) 2019 - Virtual Open Systems SAS Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. author = Teodora Sechkova author_email = teodora@virtualopensystems.com Start the placement server | 708 | en | 0.808647 |
"""
A simple Line class.
NOTE: This is NOT rosegraphics -- it is your OWN Line class.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Jacob Jarski.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import math
import m1t_test_Line as m1t
###############################################################################
# IMPORTANT:
# Your instructor will help you get started on this exercise.
###############################################################################
# -----------------------------------------------------------------------------
# DONE: 2. Right-click on the src folder and
# Mark Directory as ... Sources Root,
# if you have not already done so.
#
# Then, with your instructor, READ THE INSTRUCTIONS in file
# m0_INSTRUCTIONS.txt
# asking questions as needed. Once you understand the instructions,
# mark this _TODO_ as DONE.
# -----------------------------------------------------------------------------
###############################################################################
# NOTE: For ALL of the methods that you implement, the method is allowed
# to have additional side effects as needed by it and/or other methods.
###############################################################################
def main():
"""
Calls the TEST functions in this module, but ONLY if the method
to be tested has at least a partial implementation. That is,
a TEST function will not be called until you begin work
on the code that it is testing.
"""
if m1t.is_implemented('__init__'):
run_test_init()
if m1t.is_implemented('clone'):
run_test_clone()
if m1t.is_implemented('reverse'):
run_test_reverse()
if m1t.is_implemented('slope'):
run_test_slope()
if m1t.is_implemented('length'):
run_test_length()
if m1t.is_implemented('get_number_of_clones'):
run_test_get_number_of_clones()
if m1t.is_implemented('line_plus'):
run_test_line_plus()
if m1t.is_implemented('line_minus'):
run_test_line_minus()
if m1t.is_implemented('midpoint'):
run_test_midpoint()
if m1t.is_implemented('is_parallel'):
run_test_is_parallel()
if m1t.is_implemented('reset'):
run_test_reset()
###############################################################################
# Students:
# Do NOT touch the following Point class - it has no TO DO.
# Do NOT copy code from the methods in this Point class.
#
# DO ** READ ** this Point class,
# asking questions about any of it that you do not understand.
#
# DO ** CALL ** methods in this Point class as needed
# in implementing and testing the methods of the ** Line ** class.
#
# IMPORTANT, IMPORTANT, IMPORTANT:
# *** In your ** Line ** class methods, you should NEVER have code
# *** that a ** Point ** class method could do for you.
###############################################################################
# The Point class (and its methods) begins here.
###############################################################################
class Point(object):
""" Represents a point in 2-dimensional space. """
def __init__(self, x, y):
""" Sets instance variables x and y to the given coordinates. """
self.x = x
self.y = y
def __repr__(self):
"""
Returns a string representation of this Point.
For each coordinate (x and y), the representation:
- Uses no decimal points if the number is close to an integer,
- Else it uses 2 decimal places after the decimal point.
Examples:
Point(10, 3.14)
Point(3.01, 2.99)
"""
decimal_places = 2 # Use 2 places after the decimal point
formats = []
numbers = []
for coordinate in (self.x, self.y):
if abs(coordinate - round(coordinate)) < (10 ** -decimal_places):
# Treat it as an integer:
formats.append('{}')
numbers.append(round(coordinate))
else:
# Treat it as a float to decimal_places decimal places:
formats.append('{:.' + str(decimal_places) + 'f}')
numbers.append(round(coordinate, decimal_places))
format_string = 'Point(' + formats[0] + ', ' + formats[1] + ')'
return format_string.format(numbers[0], numbers[1])
def __eq__(self, p2):
"""
Defines == for Points: a == b is equivalent to a.__eq__(b).
Treats two numbers as "equal" if they are within 6 decimal
places of each other for both x and y coordinates.
"""
return (round(self.x, 6) == round(p2.x, 6) and
round(self.y, 6) == round(p2.y, 6))
def clone(self):
""" Returns a new Point at the same (x, y) as this Point. """
return Point(self.x, self.y)
def distance_from(self, p2):
""" Returns the distance this Point is from the given Point. """
dx_squared = (self.x - p2.x) ** 2
dy_squared = (self.y - p2.y) ** 2
return math.sqrt(dx_squared + dy_squared)
def halfway_to(self, p2):
"""
Given another Point object p2, returns a new Point
that is half-way between this Point and the given Point (p2).
"""
return Point((self.x + p2.x) / 2,
(self.y + p2.y) / 2)
def plus(self, p2):
"""
Returns a Point whose coordinates are those of this Point
PLUS the given Point. For example:
p1 = Point(500, 20)
p2 = Point(100, 13)
p3 = p1.plus(p2)
print(p3)
would print: Point(600, 33)
"""
return Point(self.x + p2.x, self.y + p2.y)
def minus(self, p2):
"""
Returns a Point whose coordinates are those of this Point
MINUS the given Point. For example:
p1 = Point(500, 20)
p2 = Point(100, 13)
p3 = p1.minus(p2)
print(p3)
would print: Point(400, 7)
"""
return Point(self.x - p2.x, self.y - p2.y)
###############################################################################
# The Line class (and its methods) begins here.
###############################################################################
class Line(object):
""" Represents a line segment in 2-dimensional space. """
def __init__(self, start, end):
self.start = start.clone()
self.originalstart = start.clone()
self.end = end.clone()
self.originalend = end.clone()
self.timescloned = 0
"""
What comes in:
-- self
-- a Point object named start
-- a Point object named end
where the two Points are to be the initial start and end points,
respectively, of this Line.
What goes out: Nothing (i.e., None).
Side effects: MUTATEs this Line by setting two instance
variables named:
-- start
-- end
to CLONES of the two Point arguments, respectively.
Other methods must maintain those instance variables as needed
so that they always indicate the CURRENT start and end points
of this Line.
Also, initializes other instance variables as needed
by other Line methods.
Example: This __init__ method runs when one constructs
a Line. So the 3rd of the following statements
invokes the __init__ method of this Line class:
p1 = Point(30, 17)
p2 = Point(50, 80)
line = Line(p1, p2) # Causes __init__ to run
print(line.start) # Should print Point(30, 17)
print(line.end) # Should print Point(50, 80)
print(line.start == p1) # Should print True
print(line.start is p1) # Should print False
Type hints:
:type start: Point
:type end: Point
"""
# ---------------------------------------------------------------------
# DONE: 3.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
def __repr__(self):
"""
What comes in:
-- self
What goes out: Returns a string representation of this Line,
in the form:
Line[(x1, y1), (x2, y2)]
Side effects: None.
Note: print(BLAH) causes BLAH's __repr__ to be called.
BLAH's __repr__ returns a string,
which the print function then prints.
Example: Since the print function calls __repr__ on the
object to be printed:
p1 = Point(30, 17)
p2 = Point(50, 80)
line = Line(p1, p2) # Causes __init__ to run
# The following statement causes __repr__ to run,
# hence should print: Line[(30, 17), (50, 80)]
print(line)
Type hints:
:rtype: str
"""
# ---------------------------------------------------------------------
# We have already implemented this __repr__ function for you.
# Do NOT modify it.
# ---------------------------------------------------------------------
start = repr(self.start).replace('Point', '')
end = repr(self.end).replace('Point', '')
return 'Line[{}, {}]'.format(start, end)
def __eq__(self, line2):
"""
What comes in:
-- self
-- a Line object
What goes out: Returns True if:
this Line's start point is equal to line2's start point AND
this Line's end point is equal to line2's end point.
Returns False otherwise.
Side effects: None.
Note: a == b is equivalent to a.__eq__(b).
Examples:
p1 = Point(30, 17)
p2 = Point(50, 80)
line1 = Line(p1, p2)
line2 = Line(p1, p2)
line3 = Line(p2, p1)
print(line1 == line1) # Should print: True
print(line1 == line2) # Should print: True
print(line1 == line3) # Should print: False
line1.start = Point(0, 0)
print(line1 == line2) # Should now print: False
Type hints:
:type line2: Line
:rtype: bool
"""
# ---------------------------------------------------------------------
# We have already implemented this __eq__ function for you.
# Do NOT modify it.
# ---------------------------------------------------------------------
return (self.start == line2.start) and (self.end == line2.end)
def clone(self):
self.timescloned = self.timescloned + 1
clone = Line(self.start, self.end)
return clone
"""
What comes in:
-- self
What goes out: Returns a new Line whose START is a clone of
this Line's START and whose END is a clone of this Line's END.
Side effects: None.
Example:
p1 = Point(30, 17)
p2 = Point(50, 80)
line1 = Line(p1, p2)
line2 = line1.clone()
print(line1) # Should print: Line[(30, 17), (50, 80)]
print(line2) # Should print: Line[(30, 17), (50, 80)]
print(line1 == line2) # Should print: True
print(line1 is line2) # Should print: False
print(line1.start is line2.start) # Should print: False
print(line1.end is line2.end) # Should print: False
line1.start = Point(11, 12)
print(line1) # Should print: Line[(11, 12), (50, 80)]
print(line2) # Should print: Line[(30, 17), (50, 80)]
print(line1 == line2) # Should now print: False
Type hints:
:rtype: Line
"""
# ---------------------------------------------------------------------
# DONE: 4.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
def reverse(self):
reversestart = self.end
reverseend = self.start
self.start = reversestart
self.end = reverseend
"""
What comes in:
-- self
What goes out: Nothing (i.e., None).
Side effects: MUTATES this Line so that its direction is reversed
(that is, its start and end points are swapped).
** Must NOT mutate its start and end points -- just SWAP them. **
Examples:
p1 = Point(30, 17)
p2 = Point(50, 80)
line1 = Line(p1, p2)
line2 = line1.clone()
print(line1) # Should print: Line[(30, 17), (50, 80)]
line1.reverse()
print(line1) # Should print: Line[(50, 80), (30, 17)]
print(line1 == line2) # Should print: False
line1.reverse()
print(line1 == line2) # Should now print: True
"""
# ---------------------------------------------------------------------
# DONE: 5.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
def slope(self):
slopex = (self.end.x-self.start.x)
slopey = (self.end.y-self.start.y)
if slopex == 0:
return math.inf
else:
return slopey/slopex
"""
What comes in:
-- self
What goes out: Returns the slope of this Line, or
math.inf
if the line is vertical (i.e., has "infinite" slope).
Side effects: None.
Examples:
p1 = Point(30, 3)
p2 = Point(50, 8)
line1 = Line(p1, p2)
# Since the slope is (8 - 3) / (50 - 30) , which is 0.25:
print(line1.slope()) # Should print [approximately]: 0.25
line2 = Line(Point(10, 10), Point(10, 5))
print(line2.slope()) # Should print: inf
# math.inf is NOT the STRING 'inf', so:
print(line2.slope() == 'inf') # Should print False
Type hints:
:rtype: float
"""
# ---------------------------------------------------------------------
# DONE: 6.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
def length(self):
length = math.sqrt(((self.start.x- self.end.x) ** 2) + ((self.start.y - self.end.y) ** 2))
return length
"""
What comes in:
-- self
What goes out: Returns the length of this Line.
Side effects: None.
Example:
p1 = Point(166, 10)
p2 = Point(100, 10)
line1 = Line(p1, p2)
# Since the distance from p1 to p2 is 66:
print(line1.length()) # Should print: 66.0
p3 = Point(0, 0)
p4 = Point(3, 4)
line2 = Line(p3, p4)
print(line2.length()) # Should print about 5.0
Type hints:
:rtype: float
"""
# ---------------------------------------------------------------------
# DONE: 7.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
def get_number_of_clones(self):
return self.timescloned
"""
What comes in:
-- self
What goes out:
-- Returns the number of times that this Line has been cloned
(via the clone method).
Side effects: None.
Example:
line1 = Line(Point(500, 20), Point(100, 8))
line2 = line1.clone()
line3 = line1.clone()
line4 = line3.clone()
line5 = line1.clone()
print(line1.get_number_of_clones())
print(line2.get_number_of_clones())
print(line3.get_number_of_clones())
print(line4.get_number_of_clones())
print(line5.get_number_of_clones())
would print:
3 [since there are three line1.clone() statements]
0 [since there are no line2.clone() statements]
1 [since there is one line3.clone() statement]
0 [since there are no line4.clone() statements]
0 [since there are no line5.clone() statements]
Type hints:
:rtype: int:
"""
# ---------------------------------------------------------------------
# DONE: 8.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
def line_plus(self, other_line):
"""
What comes in:
-- self
-- another Line object
What goes out:
-- Returns a Line whose:
-- start is the sum of this Line's start (a Point)
and the other_line's start (another Point).
-- end is the sum of this Line's end (a Point)
and the other_line's end (another Point).
Side effects: None.
Example:
line1 = Line(Point(500, 20), Point(100, 8))
line2 = Line(Point(100, 13), Point(400, 8))
line3 = line1.line_plus(line2)
print(line3)
would print: Line[(600, 33), (500, 16)]
Type hints:
:type other_line: Line
:rtype: Line:
"""
# ---------------------------------------------------------------------
# DONE: 9.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
start = Point(self.start.x + other_line.start.x, self.start.y + other_line.start.y)
end = Point(self.end.x + other_line.end.x, self.end.y + other_line.end.y)
line_plus = Line(start, end)
return line_plus
def line_minus(self, other_line):
"""
What comes in:
-- self
-- another Line object
What goes out:
-- Returns a Line whose:
-- start is this Line's start (a Point)
minus the other_line's start (another Point).
-- end is this Line's end (a Point)
minus the other_line's end (another Point).
Side effects: None.
Example:
line1 = Line(Point(500, 20), Point(100, 8))
line2 = Line(Point(100, 13), Point(400, 8))
line3 = line1.line_minus(line2)
print(line3)
would print: Line[(400, 7), (-300, 0)]
Type hints:
:type other_line: Line
:rtype: Line:
"""
# ---------------------------------------------------------------------
# DONE: 10.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
start = Point(self.start.x - other_line.start.x, self.start.y - other_line.start.y)
end = Point(self.end.x - other_line.end.x, self.end.y - other_line.end.y)
line_minus = Line(start, end)
return line_minus
def midpoint(self):
"""
What comes in:
-- self
What goes out: returns a Point at the midpoint of this Line.
Side effects: None.
Example:
p1 = Point(3, 10)
p2 = Point(9, 20)
line1 = Line(p1, p2)
print(line1.midpoint()) # Should print: Point(6, 15)
Type hints:
:rtype: Point
"""
# ---------------------------------------------------------------------
# DONE: 11.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
midpoint = Point((self.end.x + self.start.x)/2, (self.end.y + self.start.y)/2)
return midpoint
def is_parallel(self, line2):
"""
What comes in:
-- self
-- another Line object (line2)
What goes out: Returns True if this Line is parallel to the
given Line (line2). Returns False otherwise.
*** SEE THE IMPORTANT NOTE BELOW, re ROUNDING numbers.
Side effects: None.
Examples:
line1 = Line(Point(15, 30), Point(17, 50)) # slope is 10.0
line2 = Line(Point(10, 10), Point(15, 60)) # slope is 10.0
line3 = Line(Point(10, 10), Point(80, 80)) # slope is 7.0
line4 = Line(Point(10, 10), Point(10, 20)) # slope is inf
print(line1.is_parallel(line2)) # Should print: True
print(line2.is_parallel(line1)) # Should print: True
print(line1.is_parallel(line3)) # Should print: False
print(line1.is_parallel(line4)) # Should print: False
print(line1.is_parallel(line1)) # Should print: True
print(line4.is_parallel(line4)) # Should print: True
Type hints:
:type line2: Line
:rtype: bool
"""
selfslopex = (self.end.x - self.start.x)
line2slopex = (line2.end.x - line2.start.x)
if line2slopex == 0:
if line2slopex == selfslopex:
return True
else:
return False
if selfslopex == 0:
return False
selfslope =((self.end.y - self.start.y)/(self.end.x - self.start.x))
line2slope = ((line2.end.y - line2.start.y)/ (line2.end.x - line2.start.x))
if round(line2slope, 10) == round(selfslope, 10):
return True
else:
return False
# ---------------------------------------------------------------------
# DONE: 12.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
#######################################################################
#
# IMPORTANT: When you test whether two FLOATING POINT numbers
# are "equal", you must deal with the imprecision
# of floating-point arithmetic. For example, in REAL arithmetic,
# 1 / (24 * math.pi - 20 * math.pi)
# and
# 3 / (72 * math.pi - 60 * math.pi)
# are equal. But in FLOATING point arithmetic, they are:
# 0.07957747154594767
# and
# 0.07957747154594765
# respectively (hence NOT equal).
# Try it out if you don't believe me!
#
#######################################################################
# IMPORTANT BOTTOM-LINE: When you want to test whether two
# FLOATING POINT numbers a and b are the same, as in this method,
# DON'T use: a == b
# INSTEAD use: round(a, 12) == round(b, 12)
########################################################################
#
# The latter compares the numbers rounded to 12 decimal places.
# In the context of this exercise, doing so is adequate to ignore
# floating-point errors while distinguishing numbers that really
# are different from each other.
#######################################################################
def reset(self):
self.start = self.originalstart
self.end = self.originalend
"""
What comes in:
-- self
What goes out: Nothing (i.e., None).
Side effects: MUTATES this Line so that its start and end points
revert to what they were when this Line was constructed.
Examples:
p1 = Point(-3, -4)
p2 = Point(3, 4)
line1 = Line(p1, p2)
line2 = Line(Point(0, 1), Point(10, 20))
... [various actions, including some like these:]
line1.start = Point(100, 300)
line2.end = Point(99, 4)
line1.reverse()
# Should print: Line[(x1, y1), (x2, y2)] where (x1, y1) and
# (x2, y2) are the CURRENT coordinates of line1's endpoints.
print(line1)
print(line2) # Similarly for line2
line1.reset()
line2.reset()
print(line1) # Should print: Line[(-3, -4), (3, 4)]
print(line2) # Should print: Line[(0, 1), (10, 20)]
"""
# ---------------------------------------------------------------------
# DONE: 13.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
###############################################################################
# The TEST functions for the Line class begin here.
#
# We have already written the TEST functions. They all take the form:
# -- m1t.run_test_BLAH() # This runs OUR tests.
# -- One more test (or set of tests) that came directly from the Example
# in the specification.
###############################################################################
def run_test_init():
""" Tests the __init__ method of the Line class. """
m1t.run_test_init() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
p1 = Point(30, 17)
p2 = Point(50, 80)
line = Line(p1, p2) # Causes __init__ to run
print(line.start) # Should print Point(30, 17)
print(line.end) # Should print Point(50, 80)
print(line.start == p1) # Should print True
print(line.start is p1) # Should print False
print('The above should print:')
print(' Point(30, 17)')
print(' Point(50, 80)')
print(' True')
print(' False')
def run_test_clone():
""" Tests the clone method of the Line class. """
m1t.run_test_clone() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
p1 = Point(30, 17)
p2 = Point(50, 80)
line1 = Line(p1, p2)
line2 = line1.clone()
print(line1) # Should print: Line[(30, 17), (50, 80)]
print(line2) # Should print: Line[(30, 17), (50, 80)]
print(line1 == line2) # Should print: True
print(line1 is line2) # Should print: False
print(line1.start is line2.start) # Should print: False
print(line1.end is line2.end) # Should print: False
line1.start = Point(11, 12)
print(line1) # Should print: Line[(11, 12), (50, 80)]
print(line2) # Should print: Line[(30, 17), (50, 80)]
print(line1 == line2) # Should now print: False
print('The above should print:')
print(' Line[(30, 17), (50, 80)]')
print(' Line[(30, 17), (50, 80)]')
print(' True')
print(' False')
print(' False')
print(' False')
print(' Line[(11, 12), (50, 80)]')
print(' Line[(30, 17), (50, 80)')
print(' False')
def run_test_reverse():
""" Tests the reverse method of the Line class. """
m1t.run_test_reverse() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
p1 = Point(30, 17)
p2 = Point(50, 80)
line1 = Line(p1, p2)
line2 = line1.clone()
print(line1) # Should print: Line[(30, 17), (50, 80)]
line1.reverse()
print(line1) # Should print: Line[(50, 80), (30, 17)]
print(line1 == line2) # Should print: False
line1.reverse()
print(line1 == line2) # Should now print: True
print('The above should print:')
print(' Line[(30, 17), (50, 80)]')
print(' Line[(50, 80), (30, 17)')
print(' False')
print(' True')
def run_test_slope():
""" Tests the slope method of the Line class. """
m1t.run_test_slope() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
p1 = Point(30, 3)
p2 = Point(50, 8)
line1 = Line(p1, p2)
# Since the slope is (8 - 3) / (50 - 30) , which is 0.25:
print(line1.slope()) # Should print [approximately]: 0.25
line2 = Line(Point(10, 10), Point(10, 5))
print(line2.slope()) # Should print: inf
# math.inf is NOT the STRING 'inf', so:
print(line2.slope() == 'inf') # Should print False
print('The above should print:')
print(' 0.25 (approximately)')
print(' inf')
print(' False')
def run_test_length():
""" Tests the length method of the Line class. """
m1t.run_test_length() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
p1 = Point(166, 10)
p2 = Point(100, 10)
line1 = Line(p1, p2)
# Since the distance from p1 to p2 is 66:
print(line1.length()) # Should print: 66.0
p3 = Point(0, 0)
p4 = Point(3, 4)
line2 = Line(p3, p4)
print(line2.length()) # Should print about 5.0
print('The above should print:')
print(' 66.0')
print(' 5.0 (approximately)')
def run_test_get_number_of_clones():
""" Tests the get_number_of_clones method of the Line class. """
m1t.run_test_get_number_of_clones() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
line1 = Line(Point(500, 20), Point(100, 8))
line2 = line1.clone()
line3 = line1.clone()
line4 = line3.clone()
line5 = line1.clone()
print(line1.get_number_of_clones())
print(line2.get_number_of_clones())
print(line3.get_number_of_clones())
print(line4.get_number_of_clones())
print(line5.get_number_of_clones())
print('The above should print 3, then 0, then 1, then 0, then 0.')
def run_test_line_plus():
""" Tests the line_plus method of the Line class. """
m1t.run_test_line_plus() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
line1 = Line(Point(500, 20), Point(100, 8))
line2 = Line(Point(100, 13), Point(400, 8))
line3 = line1.line_plus(line2)
print(line3)
print('The above should print: Line[(600, 33), (500, 16)]')
def run_test_line_minus():
""" Tests the line_minus method of the Line class. """
m1t.run_test_line_minus() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
line1 = Line(Point(500, 20), Point(100, 8))
line2 = Line(Point(100, 13), Point(400, 8))
line3 = line1.line_minus(line2)
print(line3)
print('The above should print: Line[(400, 7), (-300, 0)]')
def run_test_midpoint():
""" Tests the midpoint method of the Line class. """
m1t.run_test_midpoint() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
p1 = Point(3, 10)
p2 = Point(9, 20)
line1 = Line(p1, p2)
print(line1.midpoint()) # Should print: Point(6, 15)
print('The above should print: Point(6, 15)')
def run_test_is_parallel():
""" Tests the is_parallel method of the Line class. """
m1t.run_test_is_parallel() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
line1 = Line(Point(15, 30), Point(17, 50)) # slope is 10.0
line2 = Line(Point(10, 10), Point(15, 60)) # slope is 10.0
line3 = Line(Point(10, 10), Point(80, 80)) # slope is 7.0
line4 = Line(Point(10, 10), Point(10, 20)) # slope is inf
print(line1.is_parallel(line2)) # Should print: True
print(line2.is_parallel(line1)) # Should print: True
print(line1.is_parallel(line3)) # Should print: False
print(line1.is_parallel(line4)) # Should print: False
print(line1.is_parallel(line1)) # Should print: True
print(line4.is_parallel(line4)) # Should print: True
print('The above should print:')
print(' True, True, False, False, True, True')
def run_test_reset():
""" Tests the reset method of the Line class. """
m1t.run_test_reset() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
p1 = Point(-3, -4)
p2 = Point(3, 4)
line1 = Line(p1, p2)
line2 = Line(Point(0, 1), Point(10, 20))
line1.start = Point(100, 300)
line2.end = Point(99, 4)
line1.reverse()
# Should print: Line[(x1, y1), (x2, y2)] where (x1, y1) and
# (x2, y2) are the CURRENT coordinates of line1's endpoints.
print(line1)
print(line2) # Similarly for line2
line1.reset()
line2.reset()
print(line1) # Should print: Line[(-3, -4), (3, 4)]
print(line2) # Should print: Line[(0, 1), (10, 20)]
print('The above should print:')
print(' Line[(3, 4), (100, 300)]')
print(' Line[(0, 1), (99, 4)]')
print(' Line[(-3, -4), (3, 4)]')
print(' Line[(0, 1), (10, 20)]')
# -----------------------------------------------------------------------------
# If this module is running at the top level (as opposed to being
# imported by another module), then call the 'main' function.
# It is necessary here to enable the automatic testing in m1t_test_Line.py.
# -----------------------------------------------------------------------------
if __name__ == '__main__':
main()
| src/m1_Line.py | 38,309 | Represents a line segment in 2-dimensional space.
Represents a point in 2-dimensional space.
Defines == for Points: a == b is equivalent to a.__eq__(b).
Treats two numbers as "equal" if they are within 6 decimal
places of each other for both x and y coordinates.
What comes in:
-- self
-- a Line object
What goes out: Returns True if:
this Line's start point is equal to line2's start point AND
this Line's end point is equal to line2's end point.
Returns False otherwise.
Side effects: None.
Note: a == b is equivalent to a.__eq__(b).
Examples:
p1 = Point(30, 17)
p2 = Point(50, 80)
line1 = Line(p1, p2)
line2 = Line(p1, p2)
line3 = Line(p2, p1)
print(line1 == line1) # Should print: True
print(line1 == line2) # Should print: True
print(line1 == line3) # Should print: False
line1.start = Point(0, 0)
print(line1 == line2) # Should now print: False
Type hints:
:type line2: Line
:rtype: bool
Sets instance variables x and y to the given coordinates.
Returns a string representation of this Point.
For each coordinate (x and y), the representation:
- Uses no decimal points if the number is close to an integer,
- Else it uses 2 decimal places after the decimal point.
Examples:
Point(10, 3.14)
Point(3.01, 2.99)
What comes in:
-- self
What goes out: Returns a string representation of this Line,
in the form:
Line[(x1, y1), (x2, y2)]
Side effects: None.
Note: print(BLAH) causes BLAH's __repr__ to be called.
BLAH's __repr__ returns a string,
which the print function then prints.
Example: Since the print function calls __repr__ on the
object to be printed:
p1 = Point(30, 17)
p2 = Point(50, 80)
line = Line(p1, p2) # Causes __init__ to run
# The following statement causes __repr__ to run,
# hence should print: Line[(30, 17), (50, 80)]
print(line)
Type hints:
:rtype: str
Returns a new Point at the same (x, y) as this Point.
Returns the distance this Point is from the given Point.
Given another Point object p2, returns a new Point
that is half-way between this Point and the given Point (p2).
What comes in:
-- self
-- another Line object (line2)
What goes out: Returns True if this Line is parallel to the
given Line (line2). Returns False otherwise.
*** SEE THE IMPORTANT NOTE BELOW, re ROUNDING numbers.
Side effects: None.
Examples:
line1 = Line(Point(15, 30), Point(17, 50)) # slope is 10.0
line2 = Line(Point(10, 10), Point(15, 60)) # slope is 10.0
line3 = Line(Point(10, 10), Point(80, 80)) # slope is 7.0
line4 = Line(Point(10, 10), Point(10, 20)) # slope is inf
print(line1.is_parallel(line2)) # Should print: True
print(line2.is_parallel(line1)) # Should print: True
print(line1.is_parallel(line3)) # Should print: False
print(line1.is_parallel(line4)) # Should print: False
print(line1.is_parallel(line1)) # Should print: True
print(line4.is_parallel(line4)) # Should print: True
Type hints:
:type line2: Line
:rtype: bool
What comes in:
-- self
-- another Line object
What goes out:
-- Returns a Line whose:
-- start is this Line's start (a Point)
minus the other_line's start (another Point).
-- end is this Line's end (a Point)
minus the other_line's end (another Point).
Side effects: None.
Example:
line1 = Line(Point(500, 20), Point(100, 8))
line2 = Line(Point(100, 13), Point(400, 8))
line3 = line1.line_minus(line2)
print(line3)
would print: Line[(400, 7), (-300, 0)]
Type hints:
:type other_line: Line
:rtype: Line:
What comes in:
-- self
-- another Line object
What goes out:
-- Returns a Line whose:
-- start is the sum of this Line's start (a Point)
and the other_line's start (another Point).
-- end is the sum of this Line's end (a Point)
and the other_line's end (another Point).
Side effects: None.
Example:
line1 = Line(Point(500, 20), Point(100, 8))
line2 = Line(Point(100, 13), Point(400, 8))
line3 = line1.line_plus(line2)
print(line3)
would print: Line[(600, 33), (500, 16)]
Type hints:
:type other_line: Line
:rtype: Line:
Calls the TEST functions in this module, but ONLY if the method
to be tested has at least a partial implementation. That is,
a TEST function will not be called until you begin work
on the code that it is testing.
What comes in:
-- self
What goes out: returns a Point at the midpoint of this Line.
Side effects: None.
Example:
p1 = Point(3, 10)
p2 = Point(9, 20)
line1 = Line(p1, p2)
print(line1.midpoint()) # Should print: Point(6, 15)
Type hints:
:rtype: Point
Returns a Point whose coordinates are those of this Point
MINUS the given Point. For example:
p1 = Point(500, 20)
p2 = Point(100, 13)
p3 = p1.minus(p2)
print(p3)
would print: Point(400, 7)
Returns a Point whose coordinates are those of this Point
PLUS the given Point. For example:
p1 = Point(500, 20)
p2 = Point(100, 13)
p3 = p1.plus(p2)
print(p3)
would print: Point(600, 33)
Tests the clone method of the Line class.
Tests the get_number_of_clones method of the Line class.
Tests the __init__ method of the Line class.
Tests the is_parallel method of the Line class.
Tests the length method of the Line class.
Tests the line_minus method of the Line class.
Tests the line_plus method of the Line class.
Tests the midpoint method of the Line class.
Tests the reset method of the Line class.
Tests the reverse method of the Line class.
Tests the slope method of the Line class.
A simple Line class.
NOTE: This is NOT rosegraphics -- it is your OWN Line class.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Jacob Jarski.
DONE: 1. PUT YOUR NAME IN THE ABOVE LINE. IMPORTANT: Your instructor will help you get started on this exercise. ----------------------------------------------------------------------------- DONE: 2. Right-click on the src folder and Mark Directory as ... Sources Root, if you have not already done so. Then, with your instructor, READ THE INSTRUCTIONS in file m0_INSTRUCTIONS.txt asking questions as needed. Once you understand the instructions, mark this _TODO_ as DONE. ----------------------------------------------------------------------------- NOTE: For ALL of the methods that you implement, the method is allowed to have additional side effects as needed by it and/or other methods. Students: Do NOT touch the following Point class - it has no TO DO. Do NOT copy code from the methods in this Point class. DO ** READ ** this Point class, asking questions about any of it that you do not understand. DO ** CALL ** methods in this Point class as needed in implementing and testing the methods of the ** Line ** class. IMPORTANT, IMPORTANT, IMPORTANT: *** In your ** Line ** class methods, you should NEVER have code *** that a ** Point ** class method could do for you. The Point class (and its methods) begins here. Use 2 places after the decimal point Treat it as an integer: Treat it as a float to decimal_places decimal places: The Line class (and its methods) begins here. --------------------------------------------------------------------- DONE: 3. a. READ the above specification, including the Example. ** ASK QUESTIONS AS NEEDED. ** ** Be sure you understand it, ESPECIALLY the Example. b. Implement and test this method. The tests are already written (below). They include the Example in the above doc-string. --------------------------------------------------------------------- --------------------------------------------------------------------- We have already implemented this __repr__ function for you. Do NOT modify it. --------------------------------------------------------------------- --------------------------------------------------------------------- We have already implemented this __eq__ function for you. Do NOT modify it. --------------------------------------------------------------------- --------------------------------------------------------------------- DONE: 4. a. READ the above specification, including the Example. ** ASK QUESTIONS AS NEEDED. ** ** Be sure you understand it, ESPECIALLY the Example. b. Implement and test this method. The tests are already written (below). They include the Example in the above doc-string. --------------------------------------------------------------------- --------------------------------------------------------------------- DONE: 5. a. READ the above specification, including the Example. ** ASK QUESTIONS AS NEEDED. ** ** Be sure you understand it, ESPECIALLY the Example. b. Implement and test this method. The tests are already written (below). They include the Example in the above doc-string. --------------------------------------------------------------------- --------------------------------------------------------------------- DONE: 6. a. READ the above specification, including the Example. ** ASK QUESTIONS AS NEEDED. ** ** Be sure you understand it, ESPECIALLY the Example. b. Implement and test this method. The tests are already written (below). They include the Example in the above doc-string. --------------------------------------------------------------------- --------------------------------------------------------------------- DONE: 7. a. READ the above specification, including the Example. ** ASK QUESTIONS AS NEEDED. ** ** Be sure you understand it, ESPECIALLY the Example. b. Implement and test this method. The tests are already written (below). They include the Example in the above doc-string. --------------------------------------------------------------------- --------------------------------------------------------------------- DONE: 8. a. READ the above specification, including the Example. ** ASK QUESTIONS AS NEEDED. ** ** Be sure you understand it, ESPECIALLY the Example. b. Implement and test this method. The tests are already written (below). They include the Example in the above doc-string. --------------------------------------------------------------------- --------------------------------------------------------------------- DONE: 9. a. READ the above specification, including the Example. ** ASK QUESTIONS AS NEEDED. ** ** Be sure you understand it, ESPECIALLY the Example. b. Implement and test this method. The tests are already written (below). They include the Example in the above doc-string. --------------------------------------------------------------------- --------------------------------------------------------------------- DONE: 10. a. READ the above specification, including the Example. ** ASK QUESTIONS AS NEEDED. ** ** Be sure you understand it, ESPECIALLY the Example. b. Implement and test this method. The tests are already written (below). They include the Example in the above doc-string. --------------------------------------------------------------------- --------------------------------------------------------------------- DONE: 11. a. READ the above specification, including the Example. ** ASK QUESTIONS AS NEEDED. ** ** Be sure you understand it, ESPECIALLY the Example. b. Implement and test this method. The tests are already written (below). They include the Example in the above doc-string. --------------------------------------------------------------------- --------------------------------------------------------------------- DONE: 12. a. READ the above specification, including the Example. ** ASK QUESTIONS AS NEEDED. ** ** Be sure you understand it, ESPECIALLY the Example. b. Implement and test this method. The tests are already written (below). They include the Example in the above doc-string. --------------------------------------------------------------------- IMPORTANT: When you test whether two FLOATING POINT numbers are "equal", you must deal with the imprecision of floating-point arithmetic. For example, in REAL arithmetic, 1 / (24 * math.pi - 20 * math.pi) and 3 / (72 * math.pi - 60 * math.pi) are equal. But in FLOATING point arithmetic, they are: 0.07957747154594767 and 0.07957747154594765 respectively (hence NOT equal). Try it out if you don't believe me! IMPORTANT BOTTOM-LINE: When you want to test whether two FLOATING POINT numbers a and b are the same, as in this method, DON'T use: a == b INSTEAD use: round(a, 12) == round(b, 12) The latter compares the numbers rounded to 12 decimal places. In the context of this exercise, doing so is adequate to ignore floating-point errors while distinguishing numbers that really are different from each other. --------------------------------------------------------------------- DONE: 13. a. READ the above specification, including the Example. ** ASK QUESTIONS AS NEEDED. ** ** Be sure you understand it, ESPECIALLY the Example. b. Implement and test this method. The tests are already written (below). They include the Example in the above doc-string. --------------------------------------------------------------------- The TEST functions for the Line class begin here. We have already written the TEST functions. They all take the form: -- m1t.run_test_BLAH() This runs OUR tests. -- One more test (or set of tests) that came directly from the Example in the specification. This runs OUR tests. ------------------------------------------------------------------------- One ADDITIONAL test (or set of tests). ------------------------------------------------------------------------- Causes __init__ to run Should print Point(30, 17) Should print Point(50, 80) Should print True Should print False This runs OUR tests. ------------------------------------------------------------------------- One ADDITIONAL test (or set of tests). ------------------------------------------------------------------------- Should print: Line[(30, 17), (50, 80)] Should print: Line[(30, 17), (50, 80)] Should print: True Should print: False Should print: False Should print: False Should print: Line[(11, 12), (50, 80)] Should print: Line[(30, 17), (50, 80)] Should now print: False This runs OUR tests. ------------------------------------------------------------------------- One ADDITIONAL test (or set of tests). ------------------------------------------------------------------------- Should print: Line[(30, 17), (50, 80)] Should print: Line[(50, 80), (30, 17)] Should print: False Should now print: True This runs OUR tests. ------------------------------------------------------------------------- One ADDITIONAL test (or set of tests). ------------------------------------------------------------------------- Since the slope is (8 - 3) / (50 - 30) , which is 0.25: Should print [approximately]: 0.25 Should print: inf math.inf is NOT the STRING 'inf', so: Should print False This runs OUR tests. ------------------------------------------------------------------------- One ADDITIONAL test (or set of tests). ------------------------------------------------------------------------- Since the distance from p1 to p2 is 66: Should print: 66.0 Should print about 5.0 This runs OUR tests. ------------------------------------------------------------------------- One ADDITIONAL test (or set of tests). ------------------------------------------------------------------------- This runs OUR tests. ------------------------------------------------------------------------- One ADDITIONAL test (or set of tests). ------------------------------------------------------------------------- This runs OUR tests. ------------------------------------------------------------------------- One ADDITIONAL test (or set of tests). ------------------------------------------------------------------------- This runs OUR tests. ------------------------------------------------------------------------- One ADDITIONAL test (or set of tests). ------------------------------------------------------------------------- Should print: Point(6, 15) This runs OUR tests. ------------------------------------------------------------------------- One ADDITIONAL test (or set of tests). ------------------------------------------------------------------------- slope is 10.0 slope is 10.0 slope is 7.0 slope is inf Should print: True Should print: True Should print: False Should print: False Should print: True Should print: True This runs OUR tests. ------------------------------------------------------------------------- One ADDITIONAL test (or set of tests). ------------------------------------------------------------------------- Should print: Line[(x1, y1), (x2, y2)] where (x1, y1) and (x2, y2) are the CURRENT coordinates of line1's endpoints. Similarly for line2 Should print: Line[(-3, -4), (3, 4)] Should print: Line[(0, 1), (10, 20)] ----------------------------------------------------------------------------- If this module is running at the top level (as opposed to being imported by another module), then call the 'main' function. It is necessary here to enable the automatic testing in m1t_test_Line.py. ----------------------------------------------------------------------------- | 17,728 | en | 0.743491 |
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='product-name']/h1",
'price' : "//p[@class='special-price']/span[@class='price']|//span[@class='regular-price']/span[@class='price']",
'category' : "//div[@class='breadcrumbs']/ul/li/a",
'description' : "//div[@class='box-collateral box-description']/div[@id='details-area']",
'images' : "//p[@class='product-image']/a/@href",
'canonical' : "",
'base_url' : "",
'brand' : "",
'in_stock' : "",
'guarantee' : "",
'promotion' : ""
}
name = 'azora.vn'
allowed_domains = ['azora.vn']
start_urls = ['http://azora.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = ['']
rules = [
#Rule(LinkExtractor(), 'parse_item'),
#Rule(LinkExtractor(), 'parse'),
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+\.html($|\?p=\d+$)']), 'parse_item_and_links'),
]
| scraper/storage_spiders/azoravn.py | 1,029 | Auto generated by generator.py. Delete this line if you make modification.Rule(LinkExtractor(), 'parse_item'),Rule(LinkExtractor(), 'parse'), | 141 | en | 0.518468 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
import re
import base64
import json
import os
import tempfile
import requests
import urllib3
from kubernetes_py.utils.ConvertData import convert
from six.moves.urllib.parse import urlencode
RE_VALID_SSL_IP = re.compile(
r'^https://(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])')
class HttpRequest:
def __init__(self, method='GET', host='localhost:80', url='/', data=None, auth=None,
cert=None, ca_cert=None, ca_cert_data=None, token=None):
self.http_method = method
self.http_host = host
self.url = url
self.data = data
self.auth = auth
self.cert = cert
self.ca_cert = ca_cert
self.ca_cert_data = ca_cert_data
self.token = token
def send(self):
state = dict(success=False, reason=None, status=None, data=None)
http_headers = dict()
http_headers['Accept'] = 'application/json'
if self.http_method in ['PUT', 'POST', 'PATCH']:
http_headers['Content-type'] = 'application/json'
if self.token is not None:
http_headers['Authorization'] = 'Bearer {token}'.format(token=self.token)
if self.data is not None and self.http_method in ['GET']:
url = "{0}?{1}".format(self.url, urlencode(self.data))
self.url = url
self.url = self.http_host + self.url
temp = None
verify = False
if self.ca_cert is not None:
verify = self.ca_cert
if self.ca_cert_data is not None:
temp = tempfile.NamedTemporaryFile(delete=False)
data = base64.b64decode(self.ca_cert_data)
temp.write(data)
temp.close()
verify = temp.name
# TODO: TLS issue with Python 2.7 and urllib3 when hostname is an IP address
# A better fix should be found but I can't think of anything else for now.
search_result = RE_VALID_SSL_IP.search(self.http_host)
if search_result:
verify = False
urllib3.disable_warnings()
try:
response = requests.request(
method=self.http_method,
url=self.url,
auth=self.auth,
cert=self.cert,
headers=http_headers,
data="" if self.data is None else json.dumps(self.data),
verify=verify
)
except Exception as err:
raise err
finally:
if temp is not None:
os.unlink(temp.name)
state['status'] = response.status_code
state['reason'] = response.reason
# There was an issue with "kubectl logs" type requests where returned content is "text/plain" and
# we do have characters of unknown origin.
try:
resp_data = response.content.decode('utf-8')
except UnicodeDecodeError:
resp_data = response.content
if len(resp_data) > 0:
try:
state['data'] = convert(data=json.loads(resp_data))
except Exception:
state['data'] = resp_data
if 200 <= state['status'] <= 299:
state['success'] = True
return state
| kubernetes_py/utils/HttpRequest.py | 3,446 | !/usr/bin/env python -*- coding: utf-8 -*- This file is subject to the terms and conditions defined in file 'LICENSE.md', which is part of this source code package. TODO: TLS issue with Python 2.7 and urllib3 when hostname is an IP address A better fix should be found but I can't think of anything else for now. There was an issue with "kubectl logs" type requests where returned content is "text/plain" and we do have characters of unknown origin. | 449 | en | 0.950389 |
"""
The code below crawls the annotations of the MADE 1.0 Train Data and stores them
as Corpus ID, Annotation ID, Type, Length, Offset, Text in the
CSV_Annotations.csv file.
Input Files:
All xml files in the annotations folder in the made_train_data folder
Output Files:
CSV_Annotations.csv
Note: Make sure to delete the CSV_Annotations.csv file if already existing in
the folder as this code appends to the existing file.
"""
# Importing required Files
import os
import xml.etree.ElementTree as ET
import csv
final =list()
final.append(["Content ID", "Annotation ID", "Type", "Length", "Offset", "Text"])
# Reading required files
path ="C:\\Project_NLP_Final\\Project Dataset\\made_train_data\\annotations\\"
dirListing = os.listdir(path)
for item in dirListing:
tree = ET.parse(path + '\\' + item)
root = tree.getroot()
annot = dict()
for i in root.findall('./document/passage'):
flag = 0
for doc in i.findall('./annotation'):
annot=list()
annot.append(item[0:-9])
annot.append(doc.get('id'))
for typ in doc:
if typ.tag =='infon':
annot.append(typ.text)
elif typ.tag =='location':
annot.append(typ.get('length'))
annot.append(typ.get('offset'))
elif typ.tag == 'text':
annot.append(typ.text)
final.append(annot)
flag = 1
if flag == 0:
annot = [item[0:-9], None, None, None, None, None]
final.append(annot)
# Writing the required files
with open("C:\\Project_NLP_Final\\Project Dataset\\PreProcessing\\Regex\\CSV_Annotations.csv",'a', encoding = 'utf8', newline='') as outcsv:
writer = csv.writer(outcsv, delimiter=',',quotechar = '"')
for row in final:
writer.writerow(row) | Code/PreProcessing/Regex/annotation_crawling.py | 1,919 | The code below crawls the annotations of the MADE 1.0 Train Data and stores them
as Corpus ID, Annotation ID, Type, Length, Offset, Text in the
CSV_Annotations.csv file.
Input Files:
All xml files in the annotations folder in the made_train_data folder
Output Files:
CSV_Annotations.csv
Note: Make sure to delete the CSV_Annotations.csv file if already existing in
the folder as this code appends to the existing file.
Importing required Files Reading required files Writing the required files | 506 | en | 0.809458 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
One of the trickier parts of creating a mock btrfs filesystem is tracking
the structures of the write forks, respecting `truncate`, `write`, and
`clone` operations. We achieve this as follows:
- Sequentially apply `btrfs send` operations to create & update:
* `IncompleteInode`s and their `Extent`s,
* the path -> `IncompleteInode` mapping.
- Run `extents_to_chunks_with_clones()` to summarize which files clone
which other files. A quick clarificaiton of the notation:
* `Extent` is actually a tree of extents, which captures the history of
how the file's sequence of extents was created. Refer to `extent.py`.
* `Chunk` more directly corresponds to a filesystem extent. It's either
data or a hole of a given length. A file is just a contiguous sequence
of `Chunk`s. Beyond recording the kind, and the length, each `Chunk`
records precisely how other files clone from it.
So `extents_to_chunks_with_clones()` flattens the history-preserving,
clone-aware tree in `Extent` objects into a test-friendly list of
`Chunk`s.
For testing, it is important to produce a representation that is as
normalized as possible: our output should deterministically and uniquely
capture the information we wish to test, and omit everything else[1].
We do NOT want our output to depend on the order of the operations that
created the filesystem, but only on the final filesystem state.
Specifically:
* For any byte offset[2] in the file, we need to know whether it's a
`HOLE`, or it contains `DATA` (see `Extent.Kind`). An offset -> kind
map is too verbose to use in manual tests, so we merge adjacent
offsets with the same `Extent.Kind` into `Chunk`s.
* For any offset in the file, we need to know whether it is a clone of
any other file locations (i.e. copy-on-write sharing of underlying
storage). For this reason, each `Chunk` has a set of `ChunkClones`,
which form a normalized[3] description of the shared-storage links on
the filesystem.
To give an example -- let's say that columns are byte offsets, and we
have this 10-byte extent, parts of which were cloned to make files
`A`, `B`, and `C`:
0123456789 # offsets on disk
BBBBBAAA # some part of file `B` includes offsets 1-5; `A` -- 6-8
AAACCCCC # `A` ALSO includes 0-2, possibly separated from its 6-8
(Aside: `test_extents_to_chunks_with_clones` also uses such figures)
Reading this figure, we see that:
- A has a 6-byte DATA `Chunk` with two `ChunkClones`:
* From `offset` 1 into B at `offset` 0 with length 2, aka `B:0+2@1`
* From `offset` 3 into C at `offset` 3 with length 2, aka `C:3+2@3'
- B has a 5-byte DATA `Chunk` with two `ChunkClones`:
* From `offset` 0 into A at `offset` 1 with length 2, aka `A:1+2@0`
* From `offset` 2 into C at `offset` 0 with length 3, aka `C:0+3@2'
- C has a 5-byte DATA `Chunk` with two `ChunkClones`:
* From `offset` 0 into B at `offset` 2 with length 3, aka `B:2+3@0`
* From `offset` 3 into A at `offset` 3 with length 2, aka `A:3+2@3'
You can see that our representation of "a set of `ChunkClone`s for
every `Chunk`" is NOT parsimonious. If the same range of bytes is
cloned into N `Chunk`s, each of those `Chunk`s will refer to every
other `Chunk`, for a total of N*(N-1)/2 references. This is far less
efficient than a spanning tree with `N - 1` references.
E.g. in the above example, N = 4, and we stored 6 `ChunkClones`:
{'A': {'B:0+2@1', 'C:3+2@3'},
'B': {'A:1+2@0', 'C:0+3@2'},
'C': {'B:2+3@0', 'A:3+2@3'}}
The redundancy is obvious, e.g. each of these pairs are mirror images:
- 'A': 'B:0+2@1' versus 'B': 'A:1+2@0'
- 'A': 'C:3+2@3' versus 'C': 'A:3+2@3'
- 'B': 'C:0+3@2' versus 'C': 'B:2+3@0'
Picking one ChunkClone from each line would make a 3-edge spanning tree.
Using an inefficient presentation is an intentional design decision.
In most test filesystems, the copy number of any Chunk will be low, so
the cost of enumerating all references is minimal. The upside of this
quadratic representation is that it is unique and simple.
In contrast, presenting the clone structure via a spanning tree breaks
the symmetry, and then each test author has to understand the process
by which the N-1 spanning tree edges are selected. It's easy to make
such a process deterministic, but it still adds cognitive load.
[1] The current code tracks clones of HOLEs, because it makes no effort to
ignore them. I would guess that btrfs lacks this tracking, since such
clones would save no space. Once this is confirmed, it would be very
easy to either ignore, or leave unpopulated the `chunk_clones` field for
`Chunk` object with `kind == Extent.Kind.HOLE`.
[2] I refer to "bytes" throughout, but in actuality filesystems are
block-oriented. To deal with this, divide all lengths and offsets by
your block size to get the sense of "bytes" used here.
[3] The current code does NOT merge adjacent ChunkClones that were created
by separate `clone` operations. This is easy to fix once it comes up in
real applications. Tested in `test_cannot_merge_adjacent_clones()`.
"""
# Future: frozentypes instead of NamedTuples can permit some cleanups below.
import functools
from collections import defaultdict
from typing import Dict, Iterable, NamedTuple, Sequence, Tuple
from .extent import Extent
from .inode import Chunk, ChunkClone, Clone
from .inode_id import InodeID
class _CloneExtentRef(NamedTuple):
"""
Connects a part of a HOLE/DATA leaf Extent to a location in an Inode.
Although the Extent is shared between many inodes and/or disjoint
locations in the same inode, each _CloneExtentRef object is specific to
one occurrence of this Extent in the `gen_trimmed_leaves` of one inode.
We initially create a _CloneExtentRef for every piece of every inode,
but later we only retain those have some inter-inode overlap within
their `.extent`, thus identifying cloned chunks of inodes.
Aside: Unlike the simplified data model in `inode.py`, the Extent's
object identity captures the original reason that parts of some inodes
became identified via a clone relationship. We mostly use this for
assertions.
Future: With `frozentype`, __new__ could assert that `offset` and
`clone.length` are sane with respect to `extent`.
"""
clone: Clone # `clone.length` trims `extent`
extent: Extent
offset: int # Trims `extent`
# The position in `gen_trimmed_leaves` of the specific trimmed leaf that
# is being connected to another inode.
#
# It is possible for a Inode to have two instances of the same Extent
# with the same offset & length in its `gen_trimmed_leaves` stream, see
# e.g. `test_multi_extent`. In that case, we cannot correctly assign
# `ChunkClone`s to their trimmed leaves solely based on the content of
# the trimmed leaf: `(offset, length, extent)`.
#
# You might ask why the `ChunkClone` lists would differ between
# identical trimmed extents? Here is why: the first has to refer to the
# second, but not to itself, and conversely, the second must refer to
# the first, but not to itself.
#
# We could avoid this denormalization by keying `CloneChunk`s on
# `(inode_offset, offset, length, extent)`, which is unique. And
# `extents_to_chunks_with_clones` does already track `inode_offset`.
# However, the denormalized approach seemed cleaner.
leaf_idx: int
def __repr__(self): # pragma: no cover
return (
f"{self.clone.inode_id}:{self.clone.offset}"
f"+{self.clone.length}:{id(self.extent)}" # Extent is too noisy
)
# If these change, we have to update `_clone_op_compare_key`
assert Clone._fields.index("inode_id") == 0
assert _CloneExtentRef._fields.index("clone") == 0
# Our _CloneOp ordering obeys the following invariants:
# - sort by position first
# - sort by action second, putting POPs before PUSHes (see their def'ns)
# We do not need finer-grained ordering because:
# (1) we only do work on POPs,
# (2) the work done on all the POPs at one position does not depend on the
# order of the _CloneOps -- we symmetrically record the relationship in
# both directions:
# (just-popped op, each unpopped op)
# (each unpopped op, just-popped op)
#
# We could get the desired ordering implicitly by:
# - relying on the order of field declaration in `_CloneOp` (not bad)
# - making `Inode`s comparable (a bit ugly, comparing Extents is pricy,
# comparing InodeIDs would require some comparator boilerplate)
# Luckily, being explicit is not *that* painful.
def _clone_op_compare_key(c: "_CloneOp"):
return (
# The preceding asserts make these [1:] hacks tolerable.
c.pos,
c.action,
c.ref[1:],
c.ref.clone[1:],
c.ref.clone.inode_id.id,
)
def _clone_op_compare(fn):
@functools.wraps(fn)
def cmp(self: "_CloneOp", other: "_CloneOp"):
assert isinstance(other, _CloneOp)
# We only compare ops within one extent. The tests assume this to
# justify focusing on single-extent examples, so check it.
assert self.ref.extent is other.ref.extent
# All our items are distinct, since `clone.offset` is `inode_offset`,
# which is strictly increasing in each inode. We have no business
# comparing a _CloneOp with itself.
assert tuple.__ne__(self, other)
return fn(_clone_op_compare_key(self), _clone_op_compare_key(other))
return cmp
class _CloneOp(NamedTuple):
PUSH = "push"
POP = "pop"
assert POP < PUSH # We want to sort all POPs before any PUSHes
pos: int
action: str
ref: _CloneExtentRef
# NamedTuple confuses functools.total_ordering, so define all 6 comparators
__eq__ = _clone_op_compare(tuple.__eq__)
__ne__ = _clone_op_compare(tuple.__ne__)
__lt__ = _clone_op_compare(tuple.__lt__)
__le__ = _clone_op_compare(tuple.__le__)
__gt__ = _clone_op_compare(tuple.__gt__)
__ge__ = _clone_op_compare(tuple.__ge__)
def _leaf_extent_id_to_clone_ops(
ids_and_extents: Iterable[Tuple[InodeID, Extent]]
):
"""
To collect the parts of a Chunk that are cloned, we will run a variation
on the standard interval-overlap algorithm. We first sort the starts &
ends of each interval, and then do a sequential scan that uses starts to
add, and ends to remove, a tracking object from a "current intervals"
structure.
This function simply prepares the set of interval starts & ends for each
InodeID, the computation is in `_leaf_ref_to_chunk_clones_from_clone_ops`.
"""
leaf_extent_id_to_clone_ops = defaultdict(list)
for ino_id, extent in ids_and_extents:
file_offset = 0
for leaf_idx, (offset, length, leaf_extent) in enumerate(
extent.gen_trimmed_leaves()
):
ref = _CloneExtentRef(
clone=Clone(inode_id=ino_id, offset=file_offset, length=length),
extent=leaf_extent,
offset=offset,
leaf_idx=leaf_idx,
)
leaf_extent_id_to_clone_ops[id(leaf_extent)].extend(
[
_CloneOp(pos=offset, action=_CloneOp.PUSH, ref=ref),
_CloneOp(pos=offset + length, action=_CloneOp.POP, ref=ref),
]
)
file_offset += length
return leaf_extent_id_to_clone_ops
def _leaf_ref_to_chunk_clones_from_clone_ops(
extent_id: int, clone_ops: Iterable[_CloneOp]
):
"As per `_leaf_extent_id_to_clone_ops`, this computes interval overlaps"
active_ops: Dict[_CloneExtentRef, _CloneOp] = {} # Tracks open intervals
leaf_ref_to_chunk_clones = defaultdict(list)
for op in sorted(clone_ops):
# Whenever an interval (aka an Inode's Extent's "trimmed leaf")
# ends, we create `ChunkClone` objects **to** and **from** all the
# concurrently open intervals.
if op.action is _CloneOp.POP:
pushed_op = active_ops.pop(op.ref)
assert pushed_op.ref is op.ref
assert id(op.ref.extent) == extent_id
assert pushed_op.pos == op.ref.offset
assert pushed_op.pos + op.ref.clone.length == op.pos
for clone_op in active_ops.values():
assert op.ref.extent is clone_op.ref.extent
# The cloned portion's extent offset is the larger of the 2
bigger_offset = max(clone_op.ref.offset, op.ref.offset)
# Record that `clone_op` clones part of `op`'s inode.
leaf_ref_to_chunk_clones[op.ref].append(
ChunkClone(
offset=bigger_offset,
clone=Clone(
inode_id=clone_op.ref.clone.inode_id,
offset=clone_op.ref.clone.offset
+ (bigger_offset - clone_op.ref.offset),
length=op.pos - bigger_offset,
),
)
)
# Record that `op` clones part of `clone_op`'s inode.
leaf_ref_to_chunk_clones[clone_op.ref].append(
ChunkClone(
offset=bigger_offset,
clone=Clone(
inode_id=op.ref.clone.inode_id,
offset=op.ref.clone.offset
+ (bigger_offset - op.ref.offset),
length=op.pos - bigger_offset, # Same length
),
)
)
# Sorting guarantees all POPs for `pos` are handled before PUSHes
elif op.action == _CloneOp.PUSH:
assert op.ref not in active_ops
active_ops[op.ref] = op
else:
raise AssertionError(op) # pragma: no cover
return leaf_ref_to_chunk_clones
def _id_to_leaf_idx_to_chunk_clones(
ids_and_extents: Iterable[Tuple[InodeID, Extent]]
):
'Aggregates newly created ChunkClones per InodeID, and per "trimmed leaf"'
id_to_leaf_idx_to_chunk_clones = defaultdict(dict)
for extent_id, clone_ops in _leaf_extent_id_to_clone_ops(
ids_and_extents
).items():
leaf_ref_to_chunk_clones = _leaf_ref_to_chunk_clones_from_clone_ops(
extent_id, clone_ops
)
for leaf_ref, offsets_clones in leaf_ref_to_chunk_clones.items():
d = id_to_leaf_idx_to_chunk_clones[leaf_ref.clone.inode_id]
# A `leaf_idx` from a specific inode ID refers to one extent,
# and each extent is handled in one iteration, so it cannot be
# that two iterations contribute to the same `leaf_idx` key.
assert leaf_ref.leaf_idx not in d
# `leaf_idx` is the position in `gen_trimmed_leaves` of the
# chunk, whose clones we computed. That fully specifies where
# `extents_to_chunks_with_clones` should put the clones.
d[leaf_ref.leaf_idx] = offsets_clones
return id_to_leaf_idx_to_chunk_clones
def extents_to_chunks_with_clones(
ids_and_extents: Sequence[Tuple[InodeID, Extent]]
) -> Iterable[Tuple[InodeID, Sequence[Chunk]]]:
"""
Converts the nested, history-preserving `Extent` structures into flat
sequences of `Chunk`s, while being careful to annotate cloned parts as
described in this file's docblock. The `InodeID`s are needed to ensure
that the `Chunk`s' `Clone` objects refer to the appropriate files.
"""
id_to_leaf_idx_to_chunk_clones = _id_to_leaf_idx_to_chunk_clones(
ids_and_extents
)
for ino_id, extent in ids_and_extents:
leaf_to_chunk_clones = id_to_leaf_idx_to_chunk_clones.get(ino_id, {})
new_chunks = []
for leaf_idx, (offset, length, extent) in enumerate(
extent.gen_trimmed_leaves()
):
chunk_clones = leaf_to_chunk_clones.get(leaf_idx, [])
assert isinstance(extent.content, Extent.Kind)
# If the chunk kind matches, merge into the previous chunk.
if new_chunks and new_chunks[-1].kind == extent.content:
prev_length = new_chunks[-1].length
prev_clones = new_chunks[-1].chunk_clones
else: # Otherwise, make a new one.
prev_length = 0
prev_clones = set()
new_chunks.append(None)
new_chunks[-1] = Chunk(
kind=extent.content,
length=length + prev_length,
chunk_clones=prev_clones,
)
new_chunks[-1].chunk_clones.update(
# Future: when switching to frozentype, __new__ should
# validate that clone offset & length are sane relative
# to the trimmed extent.
ChunkClone(
clone=clone,
# Subtract `offset` because `ChunkClone.offset` is
# Extent-relative, but in the actual file layout, the
# leaf Extent is trimmed further.
offset=clone_offset + prev_length - offset,
)
for clone_offset, clone in chunk_clones
)
# Future: `deepfrozen` was made for this:
yield ino_id, tuple(
Chunk(
kind=c.kind,
length=c.length,
chunk_clones=frozenset(c.chunk_clones),
)
for c in new_chunks
)
| antlir/btrfs_diff/extents_to_chunks.py | 18,147 | Connects a part of a HOLE/DATA leaf Extent to a location in an Inode.
Although the Extent is shared between many inodes and/or disjoint
locations in the same inode, each _CloneExtentRef object is specific to
one occurrence of this Extent in the `gen_trimmed_leaves` of one inode.
We initially create a _CloneExtentRef for every piece of every inode,
but later we only retain those have some inter-inode overlap within
their `.extent`, thus identifying cloned chunks of inodes.
Aside: Unlike the simplified data model in `inode.py`, the Extent's
object identity captures the original reason that parts of some inodes
became identified via a clone relationship. We mostly use this for
assertions.
Future: With `frozentype`, __new__ could assert that `offset` and
`clone.length` are sane with respect to `extent`.
Aggregates newly created ChunkClones per InodeID, and per "trimmed leaf"
To collect the parts of a Chunk that are cloned, we will run a variation
on the standard interval-overlap algorithm. We first sort the starts &
ends of each interval, and then do a sequential scan that uses starts to
add, and ends to remove, a tracking object from a "current intervals"
structure.
This function simply prepares the set of interval starts & ends for each
InodeID, the computation is in `_leaf_ref_to_chunk_clones_from_clone_ops`.
As per `_leaf_extent_id_to_clone_ops`, this computes interval overlaps
Converts the nested, history-preserving `Extent` structures into flat
sequences of `Chunk`s, while being careful to annotate cloned parts as
described in this file's docblock. The `InodeID`s are needed to ensure
that the `Chunk`s' `Clone` objects refer to the appropriate files.
One of the trickier parts of creating a mock btrfs filesystem is tracking
the structures of the write forks, respecting `truncate`, `write`, and
`clone` operations. We achieve this as follows:
- Sequentially apply `btrfs send` operations to create & update:
* `IncompleteInode`s and their `Extent`s,
* the path -> `IncompleteInode` mapping.
- Run `extents_to_chunks_with_clones()` to summarize which files clone
which other files. A quick clarificaiton of the notation:
* `Extent` is actually a tree of extents, which captures the history of
how the file's sequence of extents was created. Refer to `extent.py`.
* `Chunk` more directly corresponds to a filesystem extent. It's either
data or a hole of a given length. A file is just a contiguous sequence
of `Chunk`s. Beyond recording the kind, and the length, each `Chunk`
records precisely how other files clone from it.
So `extents_to_chunks_with_clones()` flattens the history-preserving,
clone-aware tree in `Extent` objects into a test-friendly list of
`Chunk`s.
For testing, it is important to produce a representation that is as
normalized as possible: our output should deterministically and uniquely
capture the information we wish to test, and omit everything else[1].
We do NOT want our output to depend on the order of the operations that
created the filesystem, but only on the final filesystem state.
Specifically:
* For any byte offset[2] in the file, we need to know whether it's a
`HOLE`, or it contains `DATA` (see `Extent.Kind`). An offset -> kind
map is too verbose to use in manual tests, so we merge adjacent
offsets with the same `Extent.Kind` into `Chunk`s.
* For any offset in the file, we need to know whether it is a clone of
any other file locations (i.e. copy-on-write sharing of underlying
storage). For this reason, each `Chunk` has a set of `ChunkClones`,
which form a normalized[3] description of the shared-storage links on
the filesystem.
To give an example -- let's say that columns are byte offsets, and we
have this 10-byte extent, parts of which were cloned to make files
`A`, `B`, and `C`:
0123456789 # offsets on disk
BBBBBAAA # some part of file `B` includes offsets 1-5; `A` -- 6-8
AAACCCCC # `A` ALSO includes 0-2, possibly separated from its 6-8
(Aside: `test_extents_to_chunks_with_clones` also uses such figures)
Reading this figure, we see that:
- A has a 6-byte DATA `Chunk` with two `ChunkClones`:
* From `offset` 1 into B at `offset` 0 with length 2, aka `B:0+2@1`
* From `offset` 3 into C at `offset` 3 with length 2, aka `C:3+2@3'
- B has a 5-byte DATA `Chunk` with two `ChunkClones`:
* From `offset` 0 into A at `offset` 1 with length 2, aka `A:1+2@0`
* From `offset` 2 into C at `offset` 0 with length 3, aka `C:0+3@2'
- C has a 5-byte DATA `Chunk` with two `ChunkClones`:
* From `offset` 0 into B at `offset` 2 with length 3, aka `B:2+3@0`
* From `offset` 3 into A at `offset` 3 with length 2, aka `A:3+2@3'
You can see that our representation of "a set of `ChunkClone`s for
every `Chunk`" is NOT parsimonious. If the same range of bytes is
cloned into N `Chunk`s, each of those `Chunk`s will refer to every
other `Chunk`, for a total of N*(N-1)/2 references. This is far less
efficient than a spanning tree with `N - 1` references.
E.g. in the above example, N = 4, and we stored 6 `ChunkClones`:
{'A': {'B:0+2@1', 'C:3+2@3'},
'B': {'A:1+2@0', 'C:0+3@2'},
'C': {'B:2+3@0', 'A:3+2@3'}}
The redundancy is obvious, e.g. each of these pairs are mirror images:
- 'A': 'B:0+2@1' versus 'B': 'A:1+2@0'
- 'A': 'C:3+2@3' versus 'C': 'A:3+2@3'
- 'B': 'C:0+3@2' versus 'C': 'B:2+3@0'
Picking one ChunkClone from each line would make a 3-edge spanning tree.
Using an inefficient presentation is an intentional design decision.
In most test filesystems, the copy number of any Chunk will be low, so
the cost of enumerating all references is minimal. The upside of this
quadratic representation is that it is unique and simple.
In contrast, presenting the clone structure via a spanning tree breaks
the symmetry, and then each test author has to understand the process
by which the N-1 spanning tree edges are selected. It's easy to make
such a process deterministic, but it still adds cognitive load.
[1] The current code tracks clones of HOLEs, because it makes no effort to
ignore them. I would guess that btrfs lacks this tracking, since such
clones would save no space. Once this is confirmed, it would be very
easy to either ignore, or leave unpopulated the `chunk_clones` field for
`Chunk` object with `kind == Extent.Kind.HOLE`.
[2] I refer to "bytes" throughout, but in actuality filesystems are
block-oriented. To deal with this, divide all lengths and offsets by
your block size to get the sense of "bytes" used here.
[3] The current code does NOT merge adjacent ChunkClones that were created
by separate `clone` operations. This is easy to fix once it comes up in
real applications. Tested in `test_cannot_merge_adjacent_clones()`.
!/usr/bin/env python3 Copyright (c) Facebook, Inc. and its affiliates. This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree. Future: frozentypes instead of NamedTuples can permit some cleanups below. `clone.length` trims `extent` Trims `extent` The position in `gen_trimmed_leaves` of the specific trimmed leaf that is being connected to another inode. It is possible for a Inode to have two instances of the same Extent with the same offset & length in its `gen_trimmed_leaves` stream, see e.g. `test_multi_extent`. In that case, we cannot correctly assign `ChunkClone`s to their trimmed leaves solely based on the content of the trimmed leaf: `(offset, length, extent)`. You might ask why the `ChunkClone` lists would differ between identical trimmed extents? Here is why: the first has to refer to the second, but not to itself, and conversely, the second must refer to the first, but not to itself. We could avoid this denormalization by keying `CloneChunk`s on `(inode_offset, offset, length, extent)`, which is unique. And `extents_to_chunks_with_clones` does already track `inode_offset`. However, the denormalized approach seemed cleaner. pragma: no cover Extent is too noisy If these change, we have to update `_clone_op_compare_key` Our _CloneOp ordering obeys the following invariants: - sort by position first - sort by action second, putting POPs before PUSHes (see their def'ns) We do not need finer-grained ordering because: (1) we only do work on POPs, (2) the work done on all the POPs at one position does not depend on the order of the _CloneOps -- we symmetrically record the relationship in both directions: (just-popped op, each unpopped op) (each unpopped op, just-popped op) We could get the desired ordering implicitly by: - relying on the order of field declaration in `_CloneOp` (not bad) - making `Inode`s comparable (a bit ugly, comparing Extents is pricy, comparing InodeIDs would require some comparator boilerplate) Luckily, being explicit is not *that* painful. The preceding asserts make these [1:] hacks tolerable. We only compare ops within one extent. The tests assume this to justify focusing on single-extent examples, so check it. All our items are distinct, since `clone.offset` is `inode_offset`, which is strictly increasing in each inode. We have no business comparing a _CloneOp with itself. We want to sort all POPs before any PUSHes NamedTuple confuses functools.total_ordering, so define all 6 comparators Tracks open intervals Whenever an interval (aka an Inode's Extent's "trimmed leaf") ends, we create `ChunkClone` objects **to** and **from** all the concurrently open intervals. The cloned portion's extent offset is the larger of the 2 Record that `clone_op` clones part of `op`'s inode. Record that `op` clones part of `clone_op`'s inode. Same length Sorting guarantees all POPs for `pos` are handled before PUSHes pragma: no cover A `leaf_idx` from a specific inode ID refers to one extent, and each extent is handled in one iteration, so it cannot be that two iterations contribute to the same `leaf_idx` key. `leaf_idx` is the position in `gen_trimmed_leaves` of the chunk, whose clones we computed. That fully specifies where `extents_to_chunks_with_clones` should put the clones. If the chunk kind matches, merge into the previous chunk. Otherwise, make a new one. Future: when switching to frozentype, __new__ should validate that clone offset & length are sane relative to the trimmed extent. Subtract `offset` because `ChunkClone.offset` is Extent-relative, but in the actual file layout, the leaf Extent is trimmed further. Future: `deepfrozen` was made for this: | 10,867 | en | 0.894589 |
from sympy.vector.coordsysrect import CoordSysCartesian
from sympy.vector.scalar import BaseScalar
from sympy import sin, cos, pi, ImmutableMatrix as Matrix, \
symbols, simplify, zeros, expand
from sympy.vector.functions import express
from sympy.vector.point import Point
from sympy.vector.vector import Vector
from sympy.vector.orienters import (AxisOrienter, BodyOrienter,
SpaceOrienter, QuaternionOrienter)
a, b, c, q = symbols('a b c q')
q1, q2, q3, q4 = symbols('q1 q2 q3 q4')
def test_func_args():
A = CoordSysCartesian('A')
assert A.x.func(*A.x.args) == A.x
expr = 3*A.x + 4*A.y
assert expr.func(*expr.args) == expr
assert A.i.func(*A.i.args) == A.i
v = A.x*A.i + A.y*A.j + A.z*A.k
assert v.func(*v.args) == v
assert A.origin.func(*A.origin.args) == A.origin
def test_coordsyscartesian_equivalence():
A = CoordSysCartesian('A')
A1 = CoordSysCartesian('A')
assert A1 == A
B = CoordSysCartesian('B')
assert A != B
def test_orienters():
A = CoordSysCartesian('A')
axis_orienter = AxisOrienter(a, A.k)
body_orienter = BodyOrienter(a, b, c, '123')
space_orienter = SpaceOrienter(a, b, c, '123')
q_orienter = QuaternionOrienter(q1, q2, q3, q4)
assert axis_orienter.rotation_matrix(A) == Matrix([
[ cos(a), sin(a), 0],
[-sin(a), cos(a), 0],
[ 0, 0, 1]])
assert body_orienter.rotation_matrix() == Matrix([
[ cos(b)*cos(c), sin(a)*sin(b)*cos(c) + sin(c)*cos(a),
sin(a)*sin(c) - sin(b)*cos(a)*cos(c)],
[-sin(c)*cos(b), -sin(a)*sin(b)*sin(c) + cos(a)*cos(c),
sin(a)*cos(c) + sin(b)*sin(c)*cos(a)],
[ sin(b), -sin(a)*cos(b),
cos(a)*cos(b)]])
assert space_orienter.rotation_matrix() == Matrix([
[cos(b)*cos(c), sin(c)*cos(b), -sin(b)],
[sin(a)*sin(b)*cos(c) - sin(c)*cos(a),
sin(a)*sin(b)*sin(c) + cos(a)*cos(c), sin(a)*cos(b)],
[sin(a)*sin(c) + sin(b)*cos(a)*cos(c), -sin(a)*cos(c) +
sin(b)*sin(c)*cos(a), cos(a)*cos(b)]])
assert q_orienter.rotation_matrix() == Matrix([
[q1**2 + q2**2 - q3**2 - q4**2, 2*q1*q4 + 2*q2*q3,
-2*q1*q3 + 2*q2*q4],
[-2*q1*q4 + 2*q2*q3, q1**2 - q2**2 + q3**2 - q4**2,
2*q1*q2 + 2*q3*q4],
[2*q1*q3 + 2*q2*q4,
-2*q1*q2 + 2*q3*q4, q1**2 - q2**2 - q3**2 + q4**2]])
def test_coordinate_vars():
"""
Tests the coordinate variables functionality with respect to
reorientation of coordinate systems.
"""
A = CoordSysCartesian('A')
# Note that the name given on the lhs is different from A.x._name
assert BaseScalar('A.x', 0, A, 'A_x', r'\mathbf{{x}_{A}}') == A.x
assert BaseScalar('A.y', 1, A, 'A_y', r'\mathbf{{y}_{A}}') == A.y
assert BaseScalar('A.z', 2, A, 'A_z', r'\mathbf{{z}_{A}}') == A.z
assert BaseScalar('A.x', 0, A, 'A_x', r'\mathbf{{x}_{A}}').__hash__() == A.x.__hash__()
assert isinstance(A.x, BaseScalar) and \
isinstance(A.y, BaseScalar) and \
isinstance(A.z, BaseScalar)
assert A.x*A.y == A.y*A.x
assert A.scalar_map(A) == {A.x: A.x, A.y: A.y, A.z: A.z}
assert A.x.system == A
assert A.x.diff(A.x) == 1
B = A.orient_new_axis('B', q, A.k)
assert B.scalar_map(A) == {B.z: A.z, B.y: -A.x*sin(q) + A.y*cos(q),
B.x: A.x*cos(q) + A.y*sin(q)}
assert A.scalar_map(B) == {A.x: B.x*cos(q) - B.y*sin(q),
A.y: B.x*sin(q) + B.y*cos(q), A.z: B.z}
assert express(B.x, A, variables=True) == A.x*cos(q) + A.y*sin(q)
assert express(B.y, A, variables=True) == -A.x*sin(q) + A.y*cos(q)
assert express(B.z, A, variables=True) == A.z
assert expand(express(B.x*B.y*B.z, A, variables=True)) == \
expand(A.z*(-A.x*sin(q) + A.y*cos(q))*(A.x*cos(q) + A.y*sin(q)))
assert express(B.x*B.i + B.y*B.j + B.z*B.k, A) == \
(B.x*cos(q) - B.y*sin(q))*A.i + (B.x*sin(q) + \
B.y*cos(q))*A.j + B.z*A.k
assert simplify(express(B.x*B.i + B.y*B.j + B.z*B.k, A, \
variables=True)) == \
A.x*A.i + A.y*A.j + A.z*A.k
assert express(A.x*A.i + A.y*A.j + A.z*A.k, B) == \
(A.x*cos(q) + A.y*sin(q))*B.i + \
(-A.x*sin(q) + A.y*cos(q))*B.j + A.z*B.k
assert simplify(express(A.x*A.i + A.y*A.j + A.z*A.k, B, \
variables=True)) == \
B.x*B.i + B.y*B.j + B.z*B.k
N = B.orient_new_axis('N', -q, B.k)
assert N.scalar_map(A) == \
{N.x: A.x, N.z: A.z, N.y: A.y}
C = A.orient_new_axis('C', q, A.i + A.j + A.k)
mapping = A.scalar_map(C)
assert mapping[A.x] == (C.x*(2*cos(q) + 1)/3 +
C.y*(-2*sin(q + pi/6) + 1)/3 +
C.z*(-2*cos(q + pi/3) + 1)/3)
assert mapping[A.y] == (C.x*(-2*cos(q + pi/3) + 1)/3 +
C.y*(2*cos(q) + 1)/3 +
C.z*(-2*sin(q + pi/6) + 1)/3)
assert mapping[A.z] == (C.x*(-2*sin(q + pi/6) + 1)/3 +
C.y*(-2*cos(q + pi/3) + 1)/3 +
C.z*(2*cos(q) + 1)/3)
D = A.locate_new('D', a*A.i + b*A.j + c*A.k)
assert D.scalar_map(A) == {D.z: A.z - c, D.x: A.x - a, D.y: A.y - b}
E = A.orient_new_axis('E', a, A.k, a*A.i + b*A.j + c*A.k)
assert A.scalar_map(E) == {A.z: E.z + c,
A.x: E.x*cos(a) - E.y*sin(a) + a,
A.y: E.x*sin(a) + E.y*cos(a) + b}
assert E.scalar_map(A) == {E.x: (A.x - a)*cos(a) + (A.y - b)*sin(a),
E.y: (-A.x + a)*sin(a) + (A.y - b)*cos(a),
E.z: A.z - c}
F = A.locate_new('F', Vector.zero)
assert A.scalar_map(F) == {A.z: F.z, A.x: F.x, A.y: F.y}
def test_rotation_matrix():
N = CoordSysCartesian('N')
A = N.orient_new_axis('A', q1, N.k)
B = A.orient_new_axis('B', q2, A.i)
C = B.orient_new_axis('C', q3, B.j)
D = N.orient_new_axis('D', q4, N.j)
E = N.orient_new_space('E', q1, q2, q3, '123')
F = N.orient_new_quaternion('F', q1, q2, q3, q4)
G = N.orient_new_body('G', q1, q2, q3, '123')
assert N.rotation_matrix(C) == Matrix([
[- sin(q1) * sin(q2) * sin(q3) + cos(q1) * cos(q3), - sin(q1) *
cos(q2), sin(q1) * sin(q2) * cos(q3) + sin(q3) * cos(q1)], \
[sin(q1) * cos(q3) + sin(q2) * sin(q3) * cos(q1), \
cos(q1) * cos(q2), sin(q1) * sin(q3) - sin(q2) * cos(q1) * \
cos(q3)], [- sin(q3) * cos(q2), sin(q2), cos(q2) * cos(q3)]])
test_mat = D.rotation_matrix(C) - Matrix(
[[cos(q1) * cos(q3) * cos(q4) - sin(q3) * (- sin(q4) * cos(q2) +
sin(q1) * sin(q2) * cos(q4)), - sin(q2) * sin(q4) - sin(q1) *
cos(q2) * cos(q4), sin(q3) * cos(q1) * cos(q4) + cos(q3) * \
(- sin(q4) * cos(q2) + sin(q1) * sin(q2) * cos(q4))], \
[sin(q1) * cos(q3) + sin(q2) * sin(q3) * cos(q1), cos(q1) * \
cos(q2), sin(q1) * sin(q3) - sin(q2) * cos(q1) * cos(q3)], \
[sin(q4) * cos(q1) * cos(q3) - sin(q3) * (cos(q2) * cos(q4) + \
sin(q1) * sin(q2) * \
sin(q4)), sin(q2) *
cos(q4) - sin(q1) * sin(q4) * cos(q2), sin(q3) * \
sin(q4) * cos(q1) + cos(q3) * (cos(q2) * cos(q4) + \
sin(q1) * sin(q2) * sin(q4))]])
assert test_mat.expand() == zeros(3, 3)
assert E.rotation_matrix(N) == Matrix(
[[cos(q2)*cos(q3), sin(q3)*cos(q2), -sin(q2)],
[sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1), \
sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3), sin(q1)*cos(q2)], \
[sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3), - \
sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1), cos(q1)*cos(q2)]])
assert F.rotation_matrix(N) == Matrix([[
q1**2 + q2**2 - q3**2 - q4**2,
2*q1*q4 + 2*q2*q3, -2*q1*q3 + 2*q2*q4],[ -2*q1*q4 + 2*q2*q3,
q1**2 - q2**2 + q3**2 - q4**2, 2*q1*q2 + 2*q3*q4],
[2*q1*q3 + 2*q2*q4,
-2*q1*q2 + 2*q3*q4,
q1**2 - q2**2 - q3**2 + q4**2]])
assert G.rotation_matrix(N) == Matrix([[
cos(q2)*cos(q3), sin(q1)*sin(q2)*cos(q3) + sin(q3)*cos(q1),
sin(q1)*sin(q3) - sin(q2)*cos(q1)*cos(q3)], [
-sin(q3)*cos(q2), -sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3),
sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1)],[
sin(q2), -sin(q1)*cos(q2), cos(q1)*cos(q2)]])
def test_vector():
"""
Tests the effects of orientation of coordinate systems on
basic vector operations.
"""
N = CoordSysCartesian('N')
A = N.orient_new_axis('A', q1, N.k)
B = A.orient_new_axis('B', q2, A.i)
C = B.orient_new_axis('C', q3, B.j)
#Test to_matrix
v1 = a*N.i + b*N.j + c*N.k
assert v1.to_matrix(A) == Matrix([[ a*cos(q1) + b*sin(q1)],
[-a*sin(q1) + b*cos(q1)],
[ c]])
#Test dot
assert N.i.dot(A.i) == cos(q1)
assert N.i.dot(A.j) == -sin(q1)
assert N.i.dot(A.k) == 0
assert N.j.dot(A.i) == sin(q1)
assert N.j.dot(A.j) == cos(q1)
assert N.j.dot(A.k) == 0
assert N.k.dot(A.i) == 0
assert N.k.dot(A.j) == 0
assert N.k.dot(A.k) == 1
assert N.i.dot(A.i + A.j) == -sin(q1) + cos(q1) == \
(A.i + A.j).dot(N.i)
assert A.i.dot(C.i) == cos(q3)
assert A.i.dot(C.j) == 0
assert A.i.dot(C.k) == sin(q3)
assert A.j.dot(C.i) == sin(q2)*sin(q3)
assert A.j.dot(C.j) == cos(q2)
assert A.j.dot(C.k) == -sin(q2)*cos(q3)
assert A.k.dot(C.i) == -cos(q2)*sin(q3)
assert A.k.dot(C.j) == sin(q2)
assert A.k.dot(C.k) == cos(q2)*cos(q3)
#Test cross
assert N.i.cross(A.i) == sin(q1)*A.k
assert N.i.cross(A.j) == cos(q1)*A.k
assert N.i.cross(A.k) == -sin(q1)*A.i - cos(q1)*A.j
assert N.j.cross(A.i) == -cos(q1)*A.k
assert N.j.cross(A.j) == sin(q1)*A.k
assert N.j.cross(A.k) == cos(q1)*A.i - sin(q1)*A.j
assert N.k.cross(A.i) == A.j
assert N.k.cross(A.j) == -A.i
assert N.k.cross(A.k) == Vector.zero
assert N.i.cross(A.i) == sin(q1)*A.k
assert N.i.cross(A.j) == cos(q1)*A.k
assert N.i.cross(A.i + A.j) == sin(q1)*A.k + cos(q1)*A.k
assert (A.i + A.j).cross(N.i) == (-sin(q1) - cos(q1))*N.k
assert A.i.cross(C.i) == sin(q3)*C.j
assert A.i.cross(C.j) == -sin(q3)*C.i + cos(q3)*C.k
assert A.i.cross(C.k) == -cos(q3)*C.j
assert C.i.cross(A.i) == (-sin(q3)*cos(q2))*A.j + \
(-sin(q2)*sin(q3))*A.k
assert C.j.cross(A.i) == (sin(q2))*A.j + (-cos(q2))*A.k
assert express(C.k.cross(A.i), C).trigsimp() == cos(q3)*C.j
def test_orient_new_methods():
N = CoordSysCartesian('N')
orienter1 = AxisOrienter(q4, N.j)
orienter2 = SpaceOrienter(q1, q2, q3, '123')
orienter3 = QuaternionOrienter(q1, q2, q3, q4)
orienter4 = BodyOrienter(q1, q2, q3, '123')
D = N.orient_new('D', (orienter1, ))
E = N.orient_new('E', (orienter2, ))
F = N.orient_new('F', (orienter3, ))
G = N.orient_new('G', (orienter4, ))
assert D == N.orient_new_axis('D', q4, N.j)
assert E == N.orient_new_space('E', q1, q2, q3, '123')
assert F == N.orient_new_quaternion('F', q1, q2, q3, q4)
assert G == N.orient_new_body('G', q1, q2, q3, '123')
def test_locatenew_point():
"""
Tests Point class, and locate_new method in CoordSysCartesian.
"""
A = CoordSysCartesian('A')
assert isinstance(A.origin, Point)
v = a*A.i + b*A.j + c*A.k
C = A.locate_new('C', v)
assert C.origin.position_wrt(A) == \
C.position_wrt(A) == \
C.origin.position_wrt(A.origin) == v
assert A.origin.position_wrt(C) == \
A.position_wrt(C) == \
A.origin.position_wrt(C.origin) == -v
assert A.origin.express_coordinates(C) == (-a, -b, -c)
p = A.origin.locate_new('p', -v)
assert p.express_coordinates(A) == (-a, -b, -c)
assert p.position_wrt(C.origin) == p.position_wrt(C) == \
-2 * v
p1 = p.locate_new('p1', 2*v)
assert p1.position_wrt(C.origin) == Vector.zero
assert p1.express_coordinates(C) == (0, 0, 0)
p2 = p.locate_new('p2', A.i)
assert p1.position_wrt(p2) == 2*v - A.i
assert p2.express_coordinates(C) == (-2*a + 1, -2*b, -2*c)
def test_evalf():
A = CoordSysCartesian('A')
v = 3*A.i + 4*A.j + a*A.k
assert v.n() == v.evalf()
assert v.evalf(subs={a:1}) == v.subs(a, 1).evalf()
| sympy/vector/tests/test_coordsysrect.py | 12,706 | Tests the coordinate variables functionality with respect to
reorientation of coordinate systems.
Tests Point class, and locate_new method in CoordSysCartesian.
Tests the effects of orientation of coordinate systems on
basic vector operations.
Note that the name given on the lhs is different from A.x._nameTest to_matrixTest dotTest cross | 341 | en | 0.793412 |
# Webhooks for external integrations.
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.models import get_client
from zerver.lib.actions import check_send_stream_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import UserProfile
import ujson
from typing import Any, Dict
@api_key_only_webhook_view('Semaphore')
@has_request_variables
def api_semaphore_webhook(request, user_profile,
payload=REQ(argument_type='body'),
stream=REQ(default='builds')):
# type: (HttpRequest, UserProfile, Dict[str, Any], str) -> HttpResponse
# semaphore only gives the last commit, even if there were multiple commits
# since the last build
branch_name = payload["branch_name"]
project_name = payload["project_name"]
result = payload["result"]
event = payload["event"]
commit_id = payload["commit"]["id"]
commit_url = payload["commit"]["url"]
author_email = payload["commit"]["author_email"]
message = payload["commit"]["message"]
if event == "build":
build_url = payload["build_url"]
build_number = payload["build_number"]
content = u"[build %s](%s): %s\n" % (build_number, build_url, result)
elif event == "deploy":
build_url = payload["build_html_url"]
build_number = payload["build_number"]
deploy_url = payload["html_url"]
deploy_number = payload["number"]
server_name = payload["server_name"]
content = u"[deploy %s](%s) of [build %s](%s) on server %s: %s\n" % \
(deploy_number, deploy_url, build_number, build_url, server_name, result)
else: # should never get here
content = u"%s: %s\n" % (event, result)
content += "!avatar(%s) [`%s`](%s): %s" % (author_email, commit_id[:7],
commit_url, message)
subject = u"%s/%s" % (project_name, branch_name)
check_send_stream_message(user_profile, request.client, stream, subject, content)
return json_success()
| zerver/webhooks/semaphore/view.py | 2,201 | Webhooks for external integrations. type: (HttpRequest, UserProfile, Dict[str, Any], str) -> HttpResponse semaphore only gives the last commit, even if there were multiple commits since the last build should never get here | 222 | en | 0.874717 |
#!/usr/bin/env python
#$Id: rotate_molecule.py,v 1.2.10.1 2016/02/11 09:24:08 annao Exp $
import os
from MolKit import Read
from MolKit.pdbWriter import PdbWriter, PdbqsWriter, PdbqWriter, PdbqtWriter
from mglutil.math.rotax import rotax
import numpy
if __name__ == '__main__':
import sys
import getopt
def usage():
"Print helpful, accurate usage statement to stdout."
print("Usage: rotate_molecule.py -f filename")
print()
print(" Description of command...")
print(" [-f] filename")
print(" Optional parameters:")
print(" [-o] alternative output filename")
print(" (default is 'rotated_' +filename)")
print(" [-y] rotate around the y axis")
print(" (default is rotation around the z axis)")
print(" [-x] rotate around the x axis")
print(" (default is rotation around the z axis)")
print(" [-u] user-defined axis of rotation '1.0,2.0,-6.2'")
print(" (default is rotation around the z axis)")
print(" [-a] angle for rotation about axis ")
print(" (default is rotation around the z axis)")
print(" [-v] verbose output")
# process command arguments
try:
opt_list, args = getopt.getopt(sys.argv[1:], 'f:o:xyu:a:v')
except getopt.GetoptError as msg:
print('rotate_molecule.py: %s' %msg)
usage()
sys.exit(2)
# initialize required parameters
#-f: pdb_filename_stem
filename = None
# optional parameters
verbose = None
outputfilename = None
rotation = 'z'
#arbitrary axis angle for rotation
axis = None
angle = None
#'f:o:v'
for o, a in opt_list:
print("o=", o, " a=",a)
if o in ('-f', '--f'):
filename = a
if verbose: print('set filename to ', filename)
outputfilename = 'rotated_' + filename
if o in ('-o', '--o'):
outputfilename = a
if verbose:
print('set output outputfilename to ', a)
if o in ('-x', '--x'):
rotation = 'x'
if verbose: print('set rotation to ', rotation)
if o in ('-y', '--y'):
rotation = 'y'
if verbose: print('set rotation to ', rotation)
if o in ('-u', '--u'):
axis = a
if verbose: print('set user-defined axis to ', axis)
if o in ('-a', '--a'):
angle = a
if verbose: print('set angle for rotation to ', angle)
if o in ('-v', '--v'):
verbose = True
if verbose: print('set verbose to ', True)
if o in ('-h', '--'):
usage()
sys.exit()
if not filename:
print('rotate_molecule: filename must be specified.')
usage()
sys.exit()
mol = Read(filename)[0]
if verbose: print('read ', filename)
filetype = os.path.splitext(os.path.basename(filename))[1]
if verbose: print("filetype=", filetype)
writer = None
if filetype=='.pdbqt':
writer = PdbqtWriter()
elif filetype=='.pdbq':
writer = PdbqWriter()
elif filetype=='.pdbqs':
writer = PdbqsWriter()
elif filetype=='.pdb':
writer = PdbWriter()
else:
print('Sorry! Unable to write this filetype->', filetype)
center = numpy.add.reduce(mol.allAtoms.coords)/len(mol.allAtoms)
crds = numpy.array(mol.allAtoms.coords)
center = numpy.add.reduce(crds)/len(mol.allAtoms)
crds = crds - center
crds = crds.tolist()
mol.allAtoms.updateCoords(crds)
lenCoords = len(crds)
#rotate the atoms here
if axis is not None and angle is not None:
rot = (float(angle)* 3.14159/180.)%(2 * numpy.pi)
x = numpy.array([0.,0.,0.])
y = numpy.array(list(map(float,axis.split(','))))
matrix = rotax(x,y, rot)
_ones = numpy.ones(lenCoords, 'f')
_ones.shape = (lenCoords,1)
mov_coords = numpy.concatenate((crds, _ones),1)
newcoords = numpy.dot(mov_coords, matrix)
nc = newcoords[:,:3].astype('f')
for i in range(lenCoords):
mol.allAtoms[i]._coords[0] = nc[i].tolist()
else:
if rotation=='z':
#for rotation around z-axis:
for a in mol.allAtoms:
a._coords[0][0] = -1.*a._coords[0][0]
a._coords[0][1] = -1.*a._coords[0][1]
elif rotation=='y':
#for rotation around y-axis:
for a in mol.allAtoms:
a._coords[0][0] = -1.*a._coords[0][0]
a._coords[0][2] = -1.*a._coords[0][2]
elif rotation=='x':
#for rotation around x-axis:
for a in mol.allAtoms:
a._coords[0][1] = -1.*a._coords[0][1]
a._coords[0][2] = -1.*a._coords[0][2]
ncrds = numpy.array(mol.allAtoms.coords)
ncrds = ncrds + center
ncrds = ncrds.tolist()
mol.allAtoms.updateCoords(ncrds)
if writer:
outptr = open(outputfilename, 'w')
liglines = mol.parser.allLines
ctr = 0
for l in liglines:
if l.find("ATOM")!=0 and l.find("HETATM")!=0:
outptr.write(l)
else:
writer.write_atom(outptr, mol.allAtoms[ctr])
ctr += 1
outptr.close()
# To execute this command type:
# rotate_molecule.py -f filename [-o outputfilename -u axis -a angle to rotate] -v
| AutoDockTools/Utilities24/rotate_molecule.py | 5,544 | Print helpful, accurate usage statement to stdout.
!/usr/bin/env python$Id: rotate_molecule.py,v 1.2.10.1 2016/02/11 09:24:08 annao Exp $ process command arguments initialize required parameters-f: pdb_filename_stem optional parametersarbitrary axis angle for rotation'f:o:v'rotate the atoms herefor rotation around z-axis:for rotation around y-axis:for rotation around x-axis: To execute this command type: rotate_molecule.py -f filename [-o outputfilename -u axis -a angle to rotate] -v | 489 | en | 0.412342 |
import subprocess
import socket
import tempfile
import redis
import time
import os
import itertools
import sys
# Environment variable pointing to the redis executable
REDIS_PATH_ENVVAR = 'REDIS_PATH'
def get_random_port():
sock = socket.socket()
sock.listen(0)
_, port = sock.getsockname()
sock.close()
return port
class DisposableRedis(object):
def __init__(self, port=None, path='redis-server', **extra_args):
"""
:param port: port number to start the redis server on. Specify none to automatically generate
:type port: int|None
:param extra_args: any extra arguments kwargs will be passed to redis server as --key val
"""
self._port = port
# this will hold the actual port the redis is listening on. It's equal to `_port` unless `_port` is None
# in that case `port` is randomly generated
self.port = None
self.extra_args = list(itertools.chain(
*(('--%s'%k, v) for k, v in extra_args.items())
))
self.path = os.getenv(REDIS_PATH_ENVVAR, path)
def __enter__(self):
if self._port is None:
self.port = get_random_port()
else:
self.port = self._port
args = [self.path,
'--port', str(self.port),
'--dir', tempfile.gettempdir(),
'--save', ''] + self.extra_args
self.process = subprocess.Popen(
args,
#cwd=os.getcwd(),
stdin=subprocess.PIPE,
stdout=open(os.devnull, 'w')
# stdout=sys.stdout,
# env=os.environ.copy()
)
while True:
try:
self.client().ping()
break
except redis.ConnectionError:
self.process.poll()
if self.process.returncode is not None:
raise RuntimeError("Process has exited")
time.sleep(0.1)
return self.client()
def __exit__(self, exc_type, exc_val, exc_tb):
self.process.terminate()
def client(self):
"""
:rtype: redis.StrictRedis
"""
return redis.StrictRedis(port=self.port, decode_responses=True)
| RAMP/disposableredis/__init__.py | 2,250 | :param port: port number to start the redis server on. Specify none to automatically generate
:type port: int|None
:param extra_args: any extra arguments kwargs will be passed to redis server as --key val
:rtype: redis.StrictRedis
Environment variable pointing to the redis executable this will hold the actual port the redis is listening on. It's equal to `_port` unless `_port` is None in that case `port` is randomly generatedcwd=os.getcwd(), stdout=sys.stdout, env=os.environ.copy() | 512 | en | 0.512678 |
# Copyright (c) 2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of LightSim2grid, LightSim2grid implements a c++ backend targeting the Grid2Op platform.
import unittest
import numpy as np
import pdb
import pandapower.networks as pn
from lightsim2grid_cpp import PandaPowerConverter
class MakeTests(unittest.TestCase):
def setUp(self):
self.converter = PandaPowerConverter()
self.tol = 1e-8
def assert_equal(self, tmp, ref):
assert np.max(np.abs(tmp - ref)) <= self.tol
assert np.sum(np.abs(tmp - ref)) <= tmp.shape[0] * self.tol
def test_case6_data(self):
net = pn.case6ww()
self.converter.set_sn_mva(net.sn_mva) # TODO raise an error if not set !
self.converter.set_f_hz(net.f_hz)
line_r, line_x, line_h = self.converter.get_line_param(
net.line["r_ohm_per_km"].values * net.line["length_km"].values,
net.line["x_ohm_per_km"].values * net.line["length_km"].values,
net.line["c_nf_per_km"].values * net.line["length_km"].values,
net.line["g_us_per_km"].values * net.line["length_km"].values,
net.bus.loc[net.line["from_bus"]]["vn_kv"],
net.bus.loc[net.line["to_bus"]]["vn_kv"]
)
res_r = np.array([0.001, 0.0005, 0.001, 0.0008, 0.0005, 0.0005, 0.001, 0.0007, 0.0012, 0.0002, 0.002])
res_x = np.array([0.002, 0.002, 0.003, 0.003, 0.0025, 0.001, 0.003, 0.002, 0.0026, 0.001, 0.004])
res_h = np.array([4.+0.j, 4.+0.j, 6.+0.j, 6.+0.j, 6.+0.j, 2.+0.j, 4.+0.j, 5.+0.j, 5.+0.j, 2.+0.j, 8.+0.j])
self.assert_equal(line_r, res_r)
self.assert_equal(line_x, res_x)
self.assert_equal(line_h, res_h)
def test_case30_data(self):
net = pn.case30()
self.converter.set_sn_mva(net.sn_mva) # TODO raise an error if not set !
self.converter.set_f_hz(net.f_hz)
line_r, line_x, line_h = self.converter.get_line_param(
net.line["r_ohm_per_km"].values * net.line["length_km"].values,
net.line["x_ohm_per_km"].values * net.line["length_km"].values,
net.line["c_nf_per_km"].values * net.line["length_km"].values,
net.line["g_us_per_km"].values * net.line["length_km"].values,
net.bus.loc[net.line["from_bus"]]["vn_kv"],
net.bus.loc[net.line["to_bus"]]["vn_kv"]
)
res_r = np.array([0.0002, 0.0005, 0. , 0. , 0. , 0. , 0. , 0. ,
0.0012, 0.0007, 0.0009, 0.0022, 0.0006, 0.0008, 0.0011, 0.0006,
0.0003, 0.0009, 0.0003, 0.0003, 0.0007, 0.0001, 0.001 , 0.0001,
0.0012, 0.0013, 0.0019, 0.0025, 0.0011, 0. , 0.0022, 0.0032,
0.0024, 0.0006, 0.0005, 0.0002, 0.0006, 0.0001, 0.0005, 0.0003,
0.0001])
res_x = np.array([0.0006, 0.0019, 0.0021, 0.0056, 0.0021, 0.0011, 0.0026, 0.0014,
0.0026, 0.0013, 0.002 , 0.002 , 0.0017, 0.0019, 0.0022, 0.0013,
0.0007, 0.0021, 0.0008, 0.0007, 0.0015, 0.0002, 0.002 , 0.0004,
0.0018, 0.0027, 0.0033, 0.0038, 0.0021, 0.004 , 0.0042, 0.006 ,
0.0045, 0.002 , 0.002 , 0.0006, 0.0018, 0.0004, 0.0012, 0.0008,
0.0004])
res_h = np.array([3.+0.j, 2.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 2.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 2.+0.j, 2.+0.j, 1.+0.j, 2.+0.j, 0.+0.j, 1.+0.j, 1.+0.j,
0.+0.j])
self.assert_equal(line_r, res_r)
self.assert_equal(line_x, res_x)
self.assert_equal(line_h, res_h)
def test_case118_data(self):
net = pn.case118()
self.converter.set_sn_mva(net.sn_mva) # TODO raise an error if not set !
self.converter.set_f_hz(net.f_hz)
line_r, line_x, line_h = self.converter.get_line_param(
net.line["r_ohm_per_km"].values * net.line["length_km"].values,
net.line["x_ohm_per_km"].values * net.line["length_km"].values,
net.line["c_nf_per_km"].values * net.line["length_km"].values,
net.line["g_us_per_km"].values * net.line["length_km"].values,
net.bus.loc[net.line["from_bus"]]["vn_kv"],
net.bus.loc[net.line["to_bus"]]["vn_kv"]
)
res_r = np.array([3.030e-04, 1.290e-04, 5.950e-05, 8.820e-05, 4.880e-04, 4.460e-04,
8.660e-05, 4.010e-04, 4.280e-04, 4.050e-04, 1.230e-04, 4.440e-04,
3.090e-04, 1.870e-04, 6.010e-04, 3.760e-05, 5.460e-05, 1.700e-04,
2.940e-04, 1.560e-04, 2.980e-04, 1.120e-04, 6.250e-04, 4.300e-04,
4.840e-04, 3.020e-04, 3.500e-04, 2.000e-04, 2.390e-04, 1.390e-04,
5.180e-04, 2.380e-04, 2.540e-04, 9.900e-05, 3.930e-04, 8.620e-05,
3.870e-04, 2.580e-04, 4.810e-04, 2.230e-04, 1.320e-04, 3.560e-04,
1.620e-04, 2.690e-04, 1.830e-04, 2.380e-04, 2.225e-04, 4.540e-04,
6.480e-04, 1.780e-04, 1.710e-04, 1.730e-04, 3.970e-04, 1.800e-04,
2.770e-04, 1.230e-04, 2.460e-04, 2.150e-04, 1.600e-04, 4.510e-04,
4.660e-04, 5.350e-04, 6.050e-04, 9.940e-05, 1.400e-04, 5.300e-04,
2.610e-04, 5.300e-04, 7.440e-04, 1.050e-04, 3.906e-04, 2.780e-04,
2.200e-04, 2.470e-04, 9.130e-05, 6.150e-04, 1.350e-04, 1.640e-04,
2.300e-05, 5.950e-04, 3.290e-04, 1.450e-04, 1.640e-04, 2.120e-04,
1.320e-04, 1.760e-05, 4.540e-04, 1.230e-04, 1.119e-04, 2.520e-04,
1.200e-04, 1.830e-04, 2.090e-04, 3.420e-04, 1.350e-04, 1.560e-04,
2.410e-04, 3.180e-04, 1.913e-04, 2.370e-04, 4.310e-05, 7.990e-05,
4.740e-04, 1.080e-04, 3.170e-04, 2.980e-04, 2.290e-04, 1.190e-04,
3.800e-04, 7.520e-04, 2.240e-05, 1.100e-04, 4.150e-04, 8.710e-05,
2.560e-05, 3.210e-04, 5.930e-04, 4.640e-05, 4.590e-05, 1.840e-04,
1.450e-04, 5.550e-04, 4.100e-04, 6.080e-04, 4.130e-04, 2.240e-04,
4.000e-04, 3.800e-04, 6.010e-04, 2.440e-05, 1.910e-04, 7.150e-04,
7.150e-04, 6.840e-04, 1.790e-04, 2.670e-04, 4.860e-04, 2.030e-04,
4.050e-04, 2.630e-04, 2.580e-05, 7.300e-04, 8.690e-04, 1.690e-04,
2.750e-05, 4.880e-05, 3.430e-04, 4.740e-04, 3.430e-04, 2.550e-04,
5.030e-04, 2.090e-04, 8.250e-04, 8.030e-04, 4.739e-04, 3.170e-04,
3.280e-04, 2.640e-05, 1.230e-04, 8.240e-05, 1.720e-05, 9.010e-05,
2.030e-04, 2.690e-05, 1.800e-04, 1.800e-04, 4.820e-04, 2.580e-04,
2.240e-04, 8.440e-04, 9.850e-04, 3.000e-04, 2.210e-05])
res_x = np.array([9.990e-04, 4.240e-04, 1.960e-04, 3.550e-04, 1.960e-03, 1.800e-03,
4.540e-04, 1.323e-03, 1.410e-03, 1.220e-03, 4.060e-04, 1.480e-03,
1.010e-03, 6.160e-04, 1.999e-03, 1.240e-04, 2.440e-04, 4.850e-04,
1.050e-03, 7.040e-04, 8.530e-04, 3.665e-04, 1.320e-03, 1.480e-03,
1.600e-03, 6.410e-04, 1.230e-03, 1.020e-03, 1.730e-03, 7.120e-04,
1.880e-03, 9.970e-04, 8.360e-04, 5.050e-04, 1.581e-03, 3.400e-04,
1.272e-03, 8.480e-04, 1.580e-03, 7.320e-04, 4.340e-04, 1.820e-03,
5.300e-04, 8.690e-04, 9.340e-04, 1.080e-03, 7.310e-04, 2.060e-03,
2.950e-03, 5.800e-04, 5.470e-04, 8.850e-04, 1.790e-03, 8.130e-04,
1.262e-03, 5.590e-04, 1.120e-03, 7.070e-04, 5.250e-04, 2.040e-03,
1.584e-03, 1.625e-03, 2.290e-03, 3.780e-04, 5.470e-04, 1.830e-03,
7.030e-04, 1.830e-03, 2.444e-03, 2.880e-04, 1.813e-03, 7.620e-04,
7.550e-04, 6.400e-04, 3.010e-04, 2.030e-03, 6.120e-04, 7.410e-04,
1.040e-04, 1.950e-03, 1.400e-03, 4.810e-04, 5.440e-04, 8.340e-04,
4.370e-04, 7.980e-05, 1.801e-03, 5.050e-04, 4.930e-04, 1.170e-03,
3.940e-04, 8.490e-04, 9.700e-04, 1.590e-03, 4.920e-04, 8.000e-04,
1.080e-03, 1.630e-03, 8.550e-04, 9.430e-04, 5.040e-04, 8.600e-04,
1.563e-03, 3.310e-04, 1.153e-03, 9.850e-04, 7.550e-04, 5.400e-04,
1.244e-03, 2.470e-03, 1.020e-04, 4.970e-04, 1.420e-03, 2.680e-04,
9.400e-05, 1.060e-03, 1.680e-03, 5.400e-04, 2.080e-04, 6.050e-04,
4.870e-04, 1.830e-03, 1.350e-03, 2.454e-03, 1.681e-03, 9.010e-04,
1.356e-03, 1.270e-03, 1.890e-03, 3.050e-04, 6.250e-04, 3.230e-03,
3.230e-03, 1.860e-03, 5.050e-04, 7.520e-04, 1.370e-03, 5.880e-04,
1.635e-03, 1.220e-03, 3.220e-04, 2.890e-03, 2.910e-03, 7.070e-04,
9.550e-05, 1.510e-04, 9.660e-04, 1.340e-03, 9.660e-04, 7.190e-04,
2.293e-03, 6.880e-04, 2.510e-03, 2.390e-03, 2.158e-03, 1.450e-03,
1.500e-03, 1.350e-04, 5.610e-04, 3.760e-04, 2.000e-04, 9.860e-04,
6.820e-04, 3.020e-04, 9.190e-04, 9.190e-04, 2.180e-03, 1.170e-03,
1.015e-03, 2.778e-03, 3.240e-03, 1.270e-03, 4.115e-03])
res_h = np.array([ 2.54 +0.j, 1.082+0.j, 0.502+0.j, 0.878+0.j, 4.88 +0.j,
4.444+0.j, 1.178+0.j, 3.368+0.j, 3.6 +0.j, 12.4 +0.j,
1.034+0.j, 3.68 +0.j, 10.38 +0.j, 1.572+0.j, 4.978+0.j,
1.264+0.j, 0.648+0.j, 4.72 +0.j, 2.28 +0.j, 1.87 +0.j,
8.174+0.j, 3.796+0.j, 2.58 +0.j, 3.48 +0.j, 4.06 +0.j,
1.234+0.j, 2.76 +0.j, 2.76 +0.j, 4.7 +0.j, 1.934+0.j,
5.28 +0.j, 10.6 +0.j, 2.14 +0.j, 5.48 +0.j, 4.14 +0.j,
0.874+0.j, 3.268+0.j, 2.18 +0.j, 4.06 +0.j, 1.876+0.j,
1.11 +0.j, 4.94 +0.j, 5.44 +0.j, 2.3 +0.j, 2.54 +0.j,
2.86 +0.j, 1.876+0.j, 5.46 +0.j, 4.72 +0.j, 6.04 +0.j,
1.474+0.j, 2.4 +0.j, 4.76 +0.j, 2.16 +0.j, 3.28 +0.j,
1.464+0.j, 2.94 +0.j, 1.816+0.j, 5.36 +0.j, 5.41 +0.j,
4.07 +0.j, 4.08 +0.j, 6.2 +0.j, 0.986+0.j, 1.434+0.j,
4.72 +0.j, 1.844+0.j, 4.72 +0.j, 6.268+0.j, 0.76 +0.j,
4.61 +0.j, 2.02 +0.j, 2. +0.j, 6.2 +0.j, 0.768+0.j,
5.18 +0.j, 1.628+0.j, 1.972+0.j, 0.276+0.j, 5.02 +0.j,
3.58 +0.j, 1.198+0.j, 1.356+0.j, 2.14 +0.j, 4.44 +0.j,
0.21 +0.j, 4.66 +0.j, 1.298+0.j, 1.142+0.j, 2.98 +0.j,
1.01 +0.j, 2.16 +0.j, 2.46 +0.j, 4.04 +0.j, 4.98 +0.j,
8.64 +0.j, 2.84 +0.j, 17.64 +0.j, 2.16 +0.j, 2.38 +0.j,
51.4 +0.j, 90.8 +0.j, 3.99 +0.j, 0.83 +0.j, 11.73 +0.j,
2.51 +0.j, 1.926+0.j, 1.426+0.j, 3.194+0.j, 6.32 +0.j,
0.268+0.j, 1.318+0.j, 3.66 +0.j, 0.568+0.j, 0.984+0.j,
2.7 +0.j, 4.2 +0.j, 42.2 +0.j, 0.55 +0.j, 1.552+0.j,
1.222+0.j, 4.66 +0.j, 3.44 +0.j, 6.068+0.j, 4.226+0.j,
2.24 +0.j, 3.32 +0.j, 3.16 +0.j, 4.72 +0.j, 116.2 +0.j,
1.604+0.j, 8.6 +0.j, 8.6 +0.j, 4.44 +0.j, 1.258+0.j,
1.874+0.j, 3.42 +0.j, 1.396+0.j, 4.058+0.j, 3.1 +0.j,
123. +0.j, 7.38 +0.j, 7.3 +0.j, 2.02 +0.j, 0.732+0.j,
0.374+0.j, 2.42 +0.j, 3.32 +0.j, 2.42 +0.j, 1.788+0.j,
5.98 +0.j, 1.748+0.j, 5.69 +0.j, 5.36 +0.j, 5.646+0.j,
3.76 +0.j, 3.88 +0.j, 1.456+0.j, 1.468+0.j, 0.98 +0.j,
21.6 +0.j, 104.6 +0.j, 1.738+0.j, 38. +0.j, 2.48 +0.j,
2.48 +0.j, 5.78 +0.j, 3.1 +0.j, 2.682+0.j, 7.092+0.j,
8.28 +0.j, 12.2 +0.j, 10.198+0.j])
self.assert_equal(line_r, res_r)
self.assert_equal(line_x, res_x)
self.assert_equal(line_h, res_h)
pp_net = net
# fix the missing values
tap_step_pct = 1.0 * pp_net.trafo["tap_step_percent"].values
tap_step_pct[~np.isfinite(tap_step_pct)] = 0.
tap_pos = 1.0 * pp_net.trafo["tap_pos"].values
tap_pos[~np.isfinite(tap_pos)] = 0.
is_tap_hv_side = pp_net.trafo["tap_side"].values == "hv"
is_tap_hv_side[~np.isfinite(is_tap_hv_side)] = True
tap_angles_ = 1.0 * pp_net.trafo["tap_step_degree"].values
tap_angles_[~np.isfinite(tap_angles_)] = 0.
tap_angles_ = np.deg2rad(tap_angles_)
trafo_r, trafo_x, trafo_b = self.converter.get_trafo_param(tap_step_pct,
tap_pos,
tap_angles_, # in radian !
is_tap_hv_side,
pp_net.bus.loc[pp_net.trafo["hv_bus"]]["vn_kv"],
pp_net.bus.loc[pp_net.trafo["lv_bus"]]["vn_kv"],
pp_net.trafo["vk_percent"].values,
pp_net.trafo["vkr_percent"].values,
pp_net.trafo["sn_mva"].values,
pp_net.trafo["pfe_kw"].values,
pp_net.trafo["i0_percent"].values,
)
trafo_r_res = np.array([0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 2.81494977e-04,
3.39887086e-06, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 1.37295648e-05, 0.00000000e+00,
1.73571860e-05])
trafo_x_res = np.array([2.67000000e-04, 3.82000000e-04, 3.70000000e-04, 2.06930358e-03,
4.04933224e-05, 3.88000000e-04, 3.75000000e-04, 3.86000000e-04,
2.68000000e-04, 3.70000000e-04, 1.59594718e-04, 3.70000000e-04,
2.01181945e-04])
trafo_h_res = np.array([ 0. -0.j , 0. -0.j ,
0. -0.j , 4.4602909 -0.00140652j,
16.40272367-0.00022869j, 0. -0.j ,
0. -0.j , 0. -0.j ,
0. -0.j , 0. -0.j ,
63.96323106-0.01411497j, 0. -0.j ,
81.1310369 -0.02879733j])
self.assert_equal(trafo_r, trafo_r_res)
self.assert_equal(trafo_x, trafo_x_res)
self.assert_equal(trafo_b, trafo_h_res)
if __name__ == "__main__":
unittest.main() | lightsim2grid/tests/test_DataConverter.py | 16,742 | Copyright (c) 2020, RTE (https://www.rte-france.com) See AUTHORS.txt This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, you can obtain one at http://mozilla.org/MPL/2.0/. SPDX-License-Identifier: MPL-2.0 This file is part of LightSim2grid, LightSim2grid implements a c++ backend targeting the Grid2Op platform. TODO raise an error if not set ! TODO raise an error if not set ! TODO raise an error if not set ! fix the missing values in radian ! | 572 | en | 0.681502 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from .video_record import VideoRecord
from datetime import timedelta
import time
def timestamp_to_sec(timestamp):
x = time.strptime(timestamp, '%H:%M:%S.%f')
sec = float(timedelta(hours=x.tm_hour,
minutes=x.tm_min,
seconds=x.tm_sec).total_seconds()) + float(
timestamp.split('.')[-1]) / 100
return sec
class EpicKitchensVideoRecord(VideoRecord):
def __init__(self, tup):
self._index = str(tup[0])
self._series = tup[1]
@property
def participant(self):
return self._series['participant_id']
@property
def untrimmed_video_name(self):
return self._series['video_id']
@property
def start_frame(self):
return int(round(timestamp_to_sec(self._series['start_timestamp']) * self.fps))
@property
def end_frame(self):
return int(round(timestamp_to_sec(self._series['stop_timestamp']) * self.fps))
@property
def fps(self):
is_100 = len(self.untrimmed_video_name.split('_')[1]) == 3
return 50 if is_100 else 60
@property
def num_frames(self):
return self.end_frame - self.start_frame
@property
def label(self):
return {'verb': self._series['verb_class'] if 'verb_class' in self._series else -1,
'noun': self._series['noun_class'] if 'noun_class' in self._series else -1}
@property
def metadata(self):
return {'narration_id': self._index} | slowfast/datasets/epickitchens_record.py | 1,577 | !/usr/bin/env python3 Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. | 91 | en | 0.864982 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch
import torch.nn as nn
from opacus import PerSampleGradientClipper
from opacus.dp_model_inspector import DPModelInspector
from opacus.layers import DPLSTM, DPMultiheadAttention, SequenceBias
from opacus.utils.clipping import ConstantFlatClipper
class LayersGradTest(unittest.TestCase):
def setUp(self):
self.validator = DPModelInspector()
def _reset_seeds(self):
torch.manual_seed(1337)
torch.cuda.manual_seed(1337)
def _run_once(self, layer, criterion, *args):
self._reset_seeds()
layer.zero_grad()
output = layer(*args)
if isinstance(output, tuple):
output = output[0]
output = output.squeeze()
y = torch.zeros_like(output)
loss = criterion(output, y)
loss.backward()
def _check_one_layer(self, layer, *args, **kwargs):
self._check_one_layer_with_criterion(
layer, nn.L1Loss(reduction="mean"), *args, **kwargs
)
self._check_one_layer_with_criterion(
layer, nn.L1Loss(reduction="sum"), *args, **kwargs
)
def _check_one_layer_with_criterion(self, layer, criterion, *args, **kwargs):
self.validator.validate(layer)
for name, param in layer.named_parameters():
if ("weight" in name) or ("bias" in name):
nn.init.uniform_(param, -1.0, 1.0)
# run without DP
self._run_once(layer, criterion, *args)
vanilla_run_grads = [
(name, p.grad.detach())
for (name, p) in layer.named_parameters()
if p.requires_grad
]
# run with DP
clipper = PerSampleGradientClipper(
layer,
ConstantFlatClipper(1e9),
batch_first=kwargs.get("batch_first", True),
loss_reduction=criterion.reduction,
)
self._run_once(layer, criterion, *args)
for param_name, param in layer.named_parameters():
if param.requires_grad:
self.assertTrue(
hasattr(param, "grad_sample"),
f"Per-sample gradients haven't been computed for {param_name}",
)
clipper.clip_and_accumulate()
clipper.pre_step()
private_run_grads = [
(name, p.grad.detach())
for (name, p) in layer.named_parameters()
if p.requires_grad
]
# compare
for (vanilla_name, vanilla_grad), (private_name, private_grad) in zip(
vanilla_run_grads, private_run_grads
):
assert vanilla_name == private_name
self.assertTrue(
torch.allclose(vanilla_grad, private_grad, atol=10e-5, rtol=10e-3),
f"Gradient mismatch. Parameter: {layer}.{vanilla_name}, loss: {criterion.reduction}",
)
clipper.close()
def test_conv1d(self):
x = torch.randn(64, 16, 24)
layer = nn.Conv1d(16, 32, 3, 1)
self._check_one_layer(layer, x)
def test_conv2d(self):
x = torch.randn(64, 16, 24, 24)
layer = nn.Conv2d(16, 32, 3, 1)
self._check_one_layer(layer, x)
def test_linear(self):
self._check_one_layer(nn.Linear(8, 4), torch.randn(16, 8))
self._check_one_layer(nn.Linear(8, 4), torch.randn(16, 8, 8))
def test_layernorm(self):
x = torch.randn(64, 16, 24, 24)
self._check_one_layer(nn.LayerNorm(24), x)
self._check_one_layer(nn.LayerNorm((24, 24)), x)
self._check_one_layer(nn.LayerNorm((16, 24, 24)), x)
def test_groupnorm(self):
self._check_one_layer(nn.GroupNorm(4, 16), torch.randn(64, 16, 10))
self._check_one_layer(nn.GroupNorm(4, 16), torch.randn(64, 16, 10, 9))
self._check_one_layer(nn.GroupNorm(4, 16), torch.randn(64, 16, 10, 9, 8))
def test_instancenorm(self):
self._check_one_layer(
nn.InstanceNorm1d(16, affine=True), torch.randn(64, 16, 10)
)
self._check_one_layer(
nn.InstanceNorm2d(16, affine=True), torch.randn(64, 16, 10, 9)
)
self._check_one_layer(
nn.InstanceNorm3d(16, affine=True), torch.randn(64, 16, 10, 9, 8)
)
def test_sequence_bias(self):
x = torch.randn(4, 3, 2)
layer = SequenceBias(2)
self._check_one_layer(layer, x, batch_first=False)
def test_multihead_attention(self):
x = torch.randn(16, 24, 32)
layer = DPMultiheadAttention(32, 1)
self._check_one_layer(layer, x, x, x, batch_first=False)
layer = DPMultiheadAttention(32, 1, bias=True, add_bias_kv=True, dropout=0.05)
self._check_one_layer(layer, x, x, x, batch_first=False)
layer = DPMultiheadAttention(32, 1, bias=True, add_bias_kv=True)
self._check_one_layer(layer, x, x, x, batch_first=False)
layer = DPMultiheadAttention(
32, 1, bias=True, add_bias_kv=True, add_zero_attn=True
)
self._check_one_layer(layer, x, x, x, batch_first=False)
q = torch.randn(16, 24, 32)
k = torch.randn(20, 24, 28)
v = torch.randn(20, 24, 28)
layer = DPMultiheadAttention(
32, 1, bias=True, add_bias_kv=True, add_zero_attn=True, kdim=28, vdim=28
)
self._check_one_layer(layer, q, k, v, batch_first=False)
def test_embedding(self):
layer = nn.Embedding(256, 100)
x1 = torch.randint(0, 255, (128, 42)).long()
x2 = torch.randint(0, 255, (64,)).long()
self._check_one_layer(layer, x1)
self._check_one_layer(layer, x2)
def test_lstm_batch_first(self):
# input size : 25 output size : 12 minibatch : 30 sequence length : 20
# Test batch_first=True case
layer = DPLSTM(25, 12, 1, batch_first=True)
x = torch.randn(30, 20, 25)
self._check_one_layer(layer, x, batch_first=True)
def test_lstm_batch_second(self):
# input size : 25 output size : 12 minibatch : 30 sequence length : 20
# Test batch_first=False case
layer = DPLSTM(25, 12, 1, batch_first=False)
x = torch.randn(20, 30, 25)
self._check_one_layer(layer, x, batch_first=False)
| DP_FL_recreate/opacus/tests/layers_grad_test.py | 6,316 | !/usr/bin/env python3 Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved run without DP run with DP compare input size : 25 output size : 12 minibatch : 30 sequence length : 20 Test batch_first=True case input size : 25 output size : 12 minibatch : 30 sequence length : 20 Test batch_first=False case | 318 | en | 0.499042 |
import os
import shutil
import yaml
from mock import patch
from dsl_parser.exceptions import DSLParsingLogicException
from .. import cfy
from ... import env
from ...config import config
from ...commands import init
from .test_base import CliCommandTest
from .constants import BLUEPRINTS_DIR, SAMPLE_INPUTS_PATH, \
DEFAULT_BLUEPRINT_FILE_NAME, SAMPLE_CUSTOM_NAME_ARCHIVE
class InitTest(CliCommandTest):
def test_init_initialized_directory(self):
self.use_manager()
self.invoke(
'cfy init',
err_str_segment='Environment is already initialized')
def test_init_overwrite(self):
# Config values shouldn't change between init resets
with open(config.CLOUDIFY_CONFIG_PATH) as f:
conf = yaml.safe_load(f.read())
self.assertFalse(conf['colors'])
with open(config.CLOUDIFY_CONFIG_PATH, 'w') as f:
conf['colors'] = True
f.write(yaml.safe_dump(conf))
cfy.invoke('init -r')
with open(config.CLOUDIFY_CONFIG_PATH) as f:
conf = yaml.safe_load(f.read())
self.assertTrue(conf['colors'])
def test_init_overwrite_hard(self):
# Config values should change between hard init resets
with open(config.CLOUDIFY_CONFIG_PATH) as f:
conf = yaml.safe_load(f.read())
self.assertFalse(conf['colors'])
with open(config.CLOUDIFY_CONFIG_PATH, 'w') as f:
conf['colors'] = True
f.write(yaml.safe_dump(conf))
self.invoke('cfy init -r --hard')
with open(config.CLOUDIFY_CONFIG_PATH) as f:
conf = yaml.safe_load(f.read())
self.assertFalse(conf['colors'])
def test_init_overwrite_on_initial_init(self):
# Simply verifying the overwrite flag doesn't break the first init
cfy.purge_dot_cloudify()
self.invoke('cfy init -r')
def test_init_invalid_blueprint_path(self):
self.invoke(
'cfy init idonotexist.yaml',
err_str_segment='You must provide either a path to a local file',
)
def test_init_default_outputs(self):
blueprint_path = os.path.join(
BLUEPRINTS_DIR,
'local',
DEFAULT_BLUEPRINT_FILE_NAME
)
self.invoke('cfy init {0}'.format(blueprint_path))
cfy.register_commands()
output = self.invoke(
'cfy deployments outputs -b local').logs.split('\n')
self.assertIn(' "key1": "default_val1", ', output)
self.assertIn(' "key2": "default_val2", ', output)
self.assertIn(' "key3": "default_val3", ', output)
self.assertIn(' "param": null, ', output)
self.assertIn(' "custom_param": null, ', output)
self.assertIn(' "provider_context": null', output)
def test_init_default_inputs(self):
blueprint_path = os.path.join(
BLUEPRINTS_DIR,
'local',
DEFAULT_BLUEPRINT_FILE_NAME
)
command = 'cfy init {0}'.format(blueprint_path)
self.invoke(command)
cfy.register_commands()
output = self.invoke(
'cfy deployments inputs -b local').logs.split('\n')
self.assertIn(' "key1": "default_val1", ', output)
self.assertIn(' "key2": "default_val2", ', output)
self.assertIn(' "key3": "default_val3"', output)
def test_init_with_inputs(self):
blueprint_path = os.path.join(
BLUEPRINTS_DIR,
'local',
DEFAULT_BLUEPRINT_FILE_NAME
)
command = 'cfy init {0} -i {1} -i key3=val3'.format(
blueprint_path,
SAMPLE_INPUTS_PATH
)
self.invoke(command)
cfy.register_commands()
output = self.invoke(
'cfy deployments inputs -b local').logs.split('\n')
self.assertIn(' "key1": "val1", ', output)
self.assertIn(' "key2": "val2", ', output)
self.assertIn(' "key3": "val3"', output)
def test_init_validate_definitions_version_false(self):
with open(config.CLOUDIFY_CONFIG_PATH) as f:
conf = yaml.safe_load(f.read())
with open(config.CLOUDIFY_CONFIG_PATH, 'w') as f:
conf['validate_definitions_version'] = False
f.write(yaml.safe_dump(conf))
self.invoke(
'cfy init {0}/local/blueprint_validate_definitions_version.yaml'
.format(BLUEPRINTS_DIR)
)
def test_init_validate_definitions_version_true(self):
self.invoke(
'cfy init {0}/local/blueprint_validate_definitions_version.yaml'
.format(BLUEPRINTS_DIR),
err_str_segment='description not supported in version',
exception=DSLParsingLogicException
)
@patch('cloudify.workflows.local.init_env')
@patch('cloudify_cli.local._install_plugins')
def test_init_install_plugins(self, install_plugins_mock, *_):
blueprint_path = os.path.join(
BLUEPRINTS_DIR,
'local',
'blueprint_with_plugins.yaml'
)
command = 'cfy init {0} --install-plugins'.format(blueprint_path)
self.invoke(command)
install_plugins_mock.assert_called_with(blueprint_path=blueprint_path)
@patch('cloudify.workflows.local.init_env')
def test_init_with_empty_requirements(self, *_):
blueprint_path = os.path.join(
BLUEPRINTS_DIR,
'local',
'blueprint_without_plugins.yaml'
)
command = 'cfy init {0} --install-plugins'.format(blueprint_path)
self.invoke(command)
def test_init_missing_plugins(self):
# TODO: put back possible solutions
blueprint_path = os.path.join(
BLUEPRINTS_DIR,
'local',
'blueprint_with_plugins.yaml'
)
self.invoke(
'cfy init {0}'.format(blueprint_path),
err_str_segment='mapping error: No module named tasks',
exception=ImportError
)
def test_no_init(self):
# make sure no error is thrown
cfy.purge_dot_cloudify()
self.invoke('cfy profiles list')
def test_init_blueprint_archive_default_name(self):
self.invoke(
'cfy init {0}'.format(SAMPLE_CUSTOM_NAME_ARCHIVE),
err_str_segment='Could not find `blueprint.yaml`'
)
def test_init_blueprint_archive(self):
self.invoke(
'cfy init {0} -b local -n simple_blueprint.yaml'
.format(SAMPLE_CUSTOM_NAME_ARCHIVE)
)
cfy.register_commands()
output = self.invoke(
'cfy deployments inputs -b local').logs.split('\n')
self.assertIn(' "key1": "default_val1", ', output)
self.assertIn(' "key2": "default_val2", ', output)
self.assertIn(' "key3": "default_val3"', output)
def test_set_config(self):
shutil.rmtree(env.CLOUDIFY_WORKDIR)
os.makedirs(env.CLOUDIFY_WORKDIR)
self.assertFalse(os.path.isfile(
os.path.join(env.CLOUDIFY_WORKDIR, 'config.yaml')))
init.set_config()
self.assertTrue(os.path.isfile(
os.path.join(env.CLOUDIFY_WORKDIR, 'config.yaml')))
| cloudify_cli/tests/commands/test_init.py | 7,217 | Config values shouldn't change between init resets Config values should change between hard init resets Simply verifying the overwrite flag doesn't break the first init TODO: put back possible solutions make sure no error is thrown | 231 | en | 0.824172 |
import atexit
import logging
import multiprocessing
import gc
import os
from sys import exit
import warnings
import click
import dask
from distributed import Nanny, Worker
from distributed.security import Security
from distributed.cli.utils import check_python_3, install_signal_handlers
from distributed.comm import get_address_host_port
from distributed.preloading import validate_preload_argv
from distributed.proctitle import (
enable_proctitle_on_children,
enable_proctitle_on_current,
)
from toolz import valmap
from tornado.ioloop import IOLoop, TimeoutError
from tornado import gen
logger = logging.getLogger("distributed.dask_worker")
pem_file_option_type = click.Path(exists=True, resolve_path=True)
@click.command(context_settings=dict(ignore_unknown_options=True))
@click.argument("scheduler", type=str, required=False)
@click.option(
"--tls-ca-file",
type=pem_file_option_type,
default=None,
help="CA cert(s) file for TLS (in PEM format)",
)
@click.option(
"--tls-cert",
type=pem_file_option_type,
default=None,
help="certificate file for TLS (in PEM format)",
)
@click.option(
"--tls-key",
type=pem_file_option_type,
default=None,
help="private key file for TLS (in PEM format)",
)
@click.option(
"--worker-port",
type=int,
default=0,
help="Serving computation port, defaults to random",
)
@click.option(
"--nanny-port", type=int, default=0, help="Serving nanny port, defaults to random"
)
@click.option(
"--bokeh-port", type=int, default=None, help="Deprecated. See --dashboard-address"
)
@click.option(
"--dashboard-address",
type=str,
default=":0",
help="Address on which to listen for diagnostics dashboard",
)
@click.option(
"--dashboard/--no-dashboard",
"dashboard",
default=True,
required=False,
help="Launch the Dashboard [default: --dashboard]",
)
@click.option(
"--bokeh/--no-bokeh",
"bokeh",
default=None,
help="Deprecated. See --dashboard/--no-dashboard.",
required=False,
)
@click.option(
"--listen-address",
type=str,
default=None,
help="The address to which the worker binds. Example: tcp://0.0.0.0:9000",
)
@click.option(
"--contact-address",
type=str,
default=None,
help="The address the worker advertises to the scheduler for "
"communication with it and other workers. "
"Example: tcp://127.0.0.1:9000",
)
@click.option(
"--host",
type=str,
default=None,
help="Serving host. Should be an ip address that is"
" visible to the scheduler and other workers. "
"See --listen-address and --contact-address if you "
"need different listen and contact addresses. "
"See --interface.",
)
@click.option(
"--interface", type=str, default=None, help="Network interface like 'eth0' or 'ib0'"
)
@click.option(
"--protocol", type=str, default=None, help="Protocol like tcp, tls, or ucx"
)
@click.option("--nthreads", type=int, default=0, help="Number of threads per process.")
@click.option(
"--nprocs",
type=int,
default=1,
show_default=True,
help="Number of worker processes to launch.",
)
@click.option(
"--name",
type=str,
default=None,
help="A unique name for this worker like 'worker-1'. "
"If used with --nprocs then the process number "
"will be appended like name-0, name-1, name-2, ...",
)
@click.option(
"--memory-limit",
default="auto",
show_default=True,
help="Bytes of memory per process that the worker can use. "
"This can be an integer (bytes), "
"float (fraction of total system memory), "
"string (like 5GB or 5000M), "
"'auto', or zero for no memory management",
)
@click.option(
"--reconnect/--no-reconnect",
default=True,
help="Reconnect to scheduler if disconnected [default: --reconnect]",
)
@click.option(
"--nanny/--no-nanny",
default=True,
help="Start workers in nanny process for management [default: --nanny]",
)
@click.option("--pid-file", type=str, default="", help="File to write the process PID")
@click.option(
"--local-directory", default="", type=str, help="Directory to place worker files"
)
@click.option(
"--resources",
type=str,
default="",
help='Resources for task constraints like "GPU=2 MEM=10e9". '
"Resources are applied separately to each worker process "
"(only relevant when starting multiple worker processes with '--nprocs').",
)
@click.option(
"--scheduler-file",
type=str,
default="",
help="Filename to JSON encoded scheduler information. "
"Use with dask-scheduler --scheduler-file",
)
@click.option(
"--death-timeout",
type=str,
default=None,
help="Seconds to wait for a scheduler before closing",
)
@click.option(
"--dashboard-prefix", type=str, default="", help="Prefix for the dashboard"
)
@click.option(
"--lifetime",
type=str,
default="",
help="If provided, shut down the worker after this duration.",
)
@click.option(
"--lifetime-stagger",
type=str,
default="0 seconds",
show_default=True,
help="Random amount by which to stagger lifetime values",
)
@click.option(
"--lifetime-restart/--no-lifetime-restart",
"lifetime_restart",
default=False,
show_default=True,
required=False,
help="Whether or not to restart the worker after the lifetime lapses. "
"This assumes that you are using the --lifetime and --nanny keywords",
)
@click.option(
"--preload",
type=str,
multiple=True,
is_eager=True,
help="Module that should be loaded by each worker process "
'like "foo.bar" or "/path/to/foo.py"',
)
@click.argument(
"preload_argv", nargs=-1, type=click.UNPROCESSED, callback=validate_preload_argv
)
@click.version_option()
def main(
scheduler,
host,
worker_port,
listen_address,
contact_address,
nanny_port,
nthreads,
nprocs,
nanny,
name,
pid_file,
resources,
dashboard,
bokeh,
bokeh_port,
scheduler_file,
dashboard_prefix,
tls_ca_file,
tls_cert,
tls_key,
dashboard_address,
**kwargs
):
g0, g1, g2 = gc.get_threshold() # https://github.com/dask/distributed/issues/1653
gc.set_threshold(g0 * 3, g1 * 3, g2 * 3)
enable_proctitle_on_current()
enable_proctitle_on_children()
if bokeh_port is not None:
warnings.warn(
"The --bokeh-port flag has been renamed to --dashboard-address. "
"Consider adding ``--dashboard-address :%d`` " % bokeh_port
)
dashboard_address = bokeh_port
if bokeh is not None:
warnings.warn(
"The --bokeh/--no-bokeh flag has been renamed to --dashboard/--no-dashboard. "
)
dashboard = bokeh
sec = Security(
**{
k: v
for k, v in [
("tls_ca_file", tls_ca_file),
("tls_worker_cert", tls_cert),
("tls_worker_key", tls_key),
]
if v is not None
}
)
if nprocs > 1 and worker_port != 0:
logger.error(
"Failed to launch worker. You cannot use the --port argument when nprocs > 1."
)
exit(1)
if nprocs > 1 and not nanny:
logger.error(
"Failed to launch worker. You cannot use the --no-nanny argument when nprocs > 1."
)
exit(1)
if contact_address and not listen_address:
logger.error(
"Failed to launch worker. "
"Must specify --listen-address when --contact-address is given"
)
exit(1)
if nprocs > 1 and listen_address:
logger.error(
"Failed to launch worker. "
"You cannot specify --listen-address when nprocs > 1."
)
exit(1)
if (worker_port or host) and listen_address:
logger.error(
"Failed to launch worker. "
"You cannot specify --listen-address when --worker-port or --host is given."
)
exit(1)
try:
if listen_address:
(host, worker_port) = get_address_host_port(listen_address, strict=True)
if contact_address:
# we only need this to verify it is getting parsed
(_, _) = get_address_host_port(contact_address, strict=True)
else:
# if contact address is not present we use the listen_address for contact
contact_address = listen_address
except ValueError as e:
logger.error("Failed to launch worker. " + str(e))
exit(1)
if nanny:
port = nanny_port
else:
port = worker_port
if not nthreads:
nthreads = multiprocessing.cpu_count() // nprocs
if pid_file:
with open(pid_file, "w") as f:
f.write(str(os.getpid()))
def del_pid_file():
if os.path.exists(pid_file):
os.remove(pid_file)
atexit.register(del_pid_file)
if resources:
resources = resources.replace(",", " ").split()
resources = dict(pair.split("=") for pair in resources)
resources = valmap(float, resources)
else:
resources = None
loop = IOLoop.current()
if nanny:
kwargs.update({"worker_port": worker_port, "listen_address": listen_address})
t = Nanny
else:
if nanny_port:
kwargs["service_ports"] = {"nanny": nanny_port}
t = Worker
if (
not scheduler
and not scheduler_file
and dask.config.get("scheduler-address", None) is None
):
raise ValueError(
"Need to provide scheduler address like\n"
"dask-worker SCHEDULER_ADDRESS:8786"
)
nannies = [
t(
scheduler,
scheduler_file=scheduler_file,
nthreads=nthreads,
loop=loop,
resources=resources,
security=sec,
contact_address=contact_address,
host=host,
port=port,
dashboard_address=dashboard_address if dashboard else None,
service_kwargs={"dashboard": {"prefix": dashboard_prefix}},
name=name if nprocs == 1 or not name else name + "-" + str(i),
**kwargs
)
for i in range(nprocs)
]
@gen.coroutine
def close_all():
# Unregister all workers from scheduler
if nanny:
yield [n.close(timeout=2) for n in nannies]
def on_signal(signum):
logger.info("Exiting on signal %d", signum)
close_all()
@gen.coroutine
def run():
yield nannies
yield [n.finished() for n in nannies]
install_signal_handlers(loop, cleanup=on_signal)
try:
loop.run_sync(run)
except TimeoutError:
# We already log the exception in nanny / worker. Don't do it again.
raise TimeoutError("Timed out starting worker.") from None
except KeyboardInterrupt:
pass
finally:
logger.info("End worker")
def go():
check_python_3()
main()
if __name__ == "__main__":
go()
| distributed/cli/dask_worker.py | 11,096 | https://github.com/dask/distributed/issues/1653 we only need this to verify it is getting parsed if contact address is not present we use the listen_address for contact Unregister all workers from scheduler We already log the exception in nanny / worker. Don't do it again. | 273 | en | 0.89385 |
# --- Imports --- #
import torch
import torch.nn.functional as F
# --- Perceptual loss network --- #
class LossNetwork(torch.nn.Module):
def __init__(self, vgg_model):
super(LossNetwork, self).__init__()
self.vgg_layers = vgg_model
self.layer_name_mapping = {
'3': "relu1_2",
'8': "relu2_2",
'15': "relu3_3"
}
def output_features(self, x):
output = {}
for name, module in self.vgg_layers._modules.items():
x = module(x)
if name in self.layer_name_mapping:
output[self.layer_name_mapping[name]] = x
return list(output.values())
def forward(self, dehaze, gt):
loss = []
dehaze_features = self.output_features(dehaze)
gt_features = self.output_features(gt)
for dehaze_feature, gt_feature in zip(dehaze_features, gt_features):
loss.append(F.mse_loss(dehaze_feature, gt_feature))
return sum(loss)/len(loss) | perceptual.py | 1,001 | --- Imports --- --- Perceptual loss network --- | 49 | en | 0.924908 |
import heapq
import math
import random as rnd
from functools import partial
from .core import Bag
def sample(population, k):
"""Chooses k unique random elements from a bag.
Returns a new bag containing elements from the population while
leaving the original population unchanged.
Parameters
----------
population: Bag
Elements to sample.
k: integer, optional
Number of elements to sample.
Examples
--------
>>> import dask.bag as db # doctest: +SKIP
... from dask.bag import random
...
... b = db.from_sequence(range(5), npartitions=2)
... list(random.sample(b, 3).compute())
[1, 3, 5]
"""
return _sample(population=population, k=k, replace=False)
def choices(population, k=1):
"""
Return a k sized list of elements chosen with replacement.
Parameters
----------
population: Bag
Elements to sample.
k: integer, optional
Number of elements to sample.
Examples
--------
>>> import dask.bag as db # doctest: +SKIP
... from dask.bag import random
...
... b = db.from_sequence(range(5), npartitions=2)
... list(random.choices(b, 3).compute())
[1, 1, 5]
"""
return _sample(population=population, k=k, replace=True)
def _sample(population, k, replace=False):
return population.reduction(
partial(_sample_map_partitions, k=k, replace=replace),
partial(_sample_reduce, k=k, replace=replace),
out_type=Bag,
)
def _sample_map_partitions(population, k, replace):
"""
Map function used on the sample and choices functions.
Parameters
----------
population : list
List of elements to sample.
k : int, optional
Number of elements to sample. Default is 1.
Returns
-------
sample: list
List of sampled elements from the partition.
lx: int
Number of elements on the partition.
k: int
Number of elements to sample.
"""
lx = len(population)
real_k = k if k <= lx else lx
sample_func = rnd.choices if replace else rnd.sample
# because otherwise it raises IndexError:
sampled = [] if real_k == 0 else sample_func(population=population, k=real_k)
return sampled, lx
def _sample_reduce(reduce_iter, k, replace):
"""
Reduce function used on the sample and choice functions.
Parameters
----------
reduce_iter : iterable
Each element is a tuple coming generated by the _sample_map_partitions function.
Returns a sequence of uniformly distributed samples;
"""
ns_ks = []
s = []
n = 0
# unfolding reduce outputs
for i in reduce_iter:
(s_i, n_i) = i
s.extend(s_i)
n += n_i
k_i = len(s_i)
ns_ks.append((n_i, k_i))
if k < 0 or (k > n and not replace):
raise ValueError("Sample larger than population or is negative")
# creating the probability array
p = []
for n_i, k_i in ns_ks:
if k_i > 0:
p_i = n_i / (k_i * n)
p += [p_i] * k_i
sample_func = rnd.choices if replace else _weighted_sampling_without_replacement
return sample_func(population=s, weights=p, k=k)
def _weighted_sampling_without_replacement(population, weights, k):
"""
Source:
Weighted random sampling with a reservoir, Pavlos S. Efraimidis, Paul G. Spirakis
"""
elt = [(math.log(rnd.random()) / weights[i], i) for i in range(len(weights))]
return [population[x[1]] for x in heapq.nlargest(k, elt)]
| ServerComponent/venv/Lib/site-packages/dask/bag/random.py | 3,552 | Map function used on the sample and choices functions.
Parameters
----------
population : list
List of elements to sample.
k : int, optional
Number of elements to sample. Default is 1.
Returns
-------
sample: list
List of sampled elements from the partition.
lx: int
Number of elements on the partition.
k: int
Number of elements to sample.
Reduce function used on the sample and choice functions.
Parameters
----------
reduce_iter : iterable
Each element is a tuple coming generated by the _sample_map_partitions function.
Returns a sequence of uniformly distributed samples;
Source:
Weighted random sampling with a reservoir, Pavlos S. Efraimidis, Paul G. Spirakis
Return a k sized list of elements chosen with replacement.
Parameters
----------
population: Bag
Elements to sample.
k: integer, optional
Number of elements to sample.
Examples
--------
>>> import dask.bag as db # doctest: +SKIP
... from dask.bag import random
...
... b = db.from_sequence(range(5), npartitions=2)
... list(random.choices(b, 3).compute())
[1, 1, 5]
Chooses k unique random elements from a bag.
Returns a new bag containing elements from the population while
leaving the original population unchanged.
Parameters
----------
population: Bag
Elements to sample.
k: integer, optional
Number of elements to sample.
Examples
--------
>>> import dask.bag as db # doctest: +SKIP
... from dask.bag import random
...
... b = db.from_sequence(range(5), npartitions=2)
... list(random.sample(b, 3).compute())
[1, 3, 5]
because otherwise it raises IndexError: unfolding reduce outputs creating the probability array | 1,640 | en | 0.577329 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pip install nvidia-ml-py3 --user
import pynvml
try:
pynvml.nvmlInit()
except pynvml.NVMLError as error:
print(error)
# Driver Not Loaded 驱动加载失败(没装驱动或者驱动有问题)
# Insufficent Permission 没有以管理员权限运行 pynvml.NVMLError_DriverNotLoaded: Driver Not Loaded
exit()
try:
print(pynvml.nvmlDeviceGetCount())
except pynvml.NVMLError as error:
print(error)
print(pynvml.nvmlDeviceGetCount())# total gpu count = 1
print(pynvml.nvmlSystemGetDriverVersion()) # 396.54
GPU_ID = 0
handle = pynvml.nvmlDeviceGetHandleByIndex(GPU_ID)
print(pynvml.nvmlDeviceGetName(handle)) # GeForce GTX 1060
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
MB_SIZE = 1024*1024
print(meminfo.total/MB_SIZE) # 6078 MB
print(meminfo.used/MB_SIZE) # 531 MB
print(meminfo.free/MB_SIZE) # 5546 MB
pynvml.nvmlShutdown()
| setup/nvidia/nvml-test.py | 919 | !/usr/bin/env python -*- coding: utf-8 -*- pip install nvidia-ml-py3 --user Driver Not Loaded 驱动加载失败(没装驱动或者驱动有问题) Insufficent Permission 没有以管理员权限运行 pynvml.NVMLError_DriverNotLoaded: Driver Not Loaded total gpu count = 1 396.54 GeForce GTX 1060 6078 MB 531 MB 5546 MB | 272 | en | 0.346293 |
# Time: O(n * n!/(c_a!*...*c_z!), n is the length of A, B,
# c_a...c_z is the count of each alphabet,
# n = sum(c_a...c_z)
# Space: O(n * n!/(c_a!*...*c_z!)
# 854
# Strings A and B are K-similar (for some non-negative integer K)
# if we can swap the positions of two letters
# in A exactly K times so that the resulting string equals B.
#
# Given two anagrams A and B, return the smallest K for which A and B are
# K-similar.
#
# Example 1:
#
# Input: A = "ab", B = "ba"
# Output: 1
# Example 2:
#
# Input: A = "abc", B = "bca"
# Output: 2
# Example 3:
#
# Input: A = "abac", B = "baca"
# Output: 2
# Example 4:
#
# Input: A = "aabc", B = "abca"
# Output: 2
# Note:
# - 1 <= A.length == B.length <= 20
# - A and B contain only lowercase letters from
# the set {'a', 'b', 'c', 'd', 'e', 'f'}
# Solution Framework:
# The underlying graph of the problem is a graph with 6 nodes 'a', 'b', ..., 'f' and the edges A[i] -> B[i].
# Our goal is for this graph to have only self-edges (edges of the form a -> a.)
# If A = 'ca...' and B = 'ab...', then the first two edges of the underlying graph are c -> a and a -> b;
# and a swap between A[1] and A[0] changes these two edges to the single edge c -> b. Let's call this type
# of operation 'cutting corners'. Intuitively, our optimal swap schedule always increases the # of matches
# (A[i] == B[i]s) for each swap, so cutting corners is the only type of operation we need to consider.
# (This is essentially the happy swap assumption, proved in 765 - Couples Holding Hands)
#
# Now consider 'cycle decomposition' of the underlying graph. [This decomposition (or the # of cycles),
# is not necessarily unique.] Through operations of cutting corners, we'll delete all the (non-self) edges.
# Each cycle of length k requires k-1 operations to delete. Thus, the answer is just the minimum possible
# value of sum(C_k - 1), where C_1,... C_k are the lengths of the cycles in some cycle decomposition of
# the underlying graph. This can be re-written as (# of non-self edges) - (# of cycles).
# Hence, we want to maximize the # of cycles in a cycle decomposition of the underlying graph.
import collections
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
class Solution(object):
def kSimilarity(self, A, B):
"""
:type A: str
:type B: str
:rtype: int
"""
# Perform a regular breadth-first search: the neighbors to each node string S are all the strings
# reachable with 1 swap to get the first unmatched character in S matched.
# we can prove that an optimal solution swaps the left-most unmatched character A[i] with an
# appropriate match A[j] which equals to B[i] (j > i), as this increases # of self-edges.
# Time complexity: This reduces the # of "neighbors" of a node (string state) from O(N^2) to O(N):
# O(N^2): swap any pair of chars in the string,
# O(N): only swap the first unmatched char.
def neighbors(s):
for i, c in enumerate(s):
if c != B[i]:
break
t = list(s)
for j in xrange(i+1, len(s)):
if t[j] == B[i]:
t[i], t[j] = t[j], t[i]
yield "".join(t)
t[j], t[i] = t[i], t[j]
q = collections.deque([A])
steps = {A:0} # we need a set to remove repeatedness anyway, so put 'steps' together
while q:
s = q.popleft()
if s == B:
return steps[s]
for t in neighbors(s):
if t not in steps:
steps[t] = steps[s] + 1
q.append(t)
print(Solution().kSimilarity('abac', 'baca')) | Python/k-similar-strings.py | 3,813 | :type A: str
:type B: str
:rtype: int
Time: O(n * n!/(c_a!*...*c_z!), n is the length of A, B, c_a...c_z is the count of each alphabet, n = sum(c_a...c_z) Space: O(n * n!/(c_a!*...*c_z!) 854 Strings A and B are K-similar (for some non-negative integer K) if we can swap the positions of two letters in A exactly K times so that the resulting string equals B. Given two anagrams A and B, return the smallest K for which A and B are K-similar. Example 1: Input: A = "ab", B = "ba" Output: 1 Example 2: Input: A = "abc", B = "bca" Output: 2 Example 3: Input: A = "abac", B = "baca" Output: 2 Example 4: Input: A = "aabc", B = "abca" Output: 2 Note: - 1 <= A.length == B.length <= 20 - A and B contain only lowercase letters from the set {'a', 'b', 'c', 'd', 'e', 'f'} Solution Framework: The underlying graph of the problem is a graph with 6 nodes 'a', 'b', ..., 'f' and the edges A[i] -> B[i]. Our goal is for this graph to have only self-edges (edges of the form a -> a.) If A = 'ca...' and B = 'ab...', then the first two edges of the underlying graph are c -> a and a -> b; and a swap between A[1] and A[0] changes these two edges to the single edge c -> b. Let's call this type of operation 'cutting corners'. Intuitively, our optimal swap schedule always increases the of matches (A[i] == B[i]s) for each swap, so cutting corners is the only type of operation we need to consider. (This is essentially the happy swap assumption, proved in 765 - Couples Holding Hands) Now consider 'cycle decomposition' of the underlying graph. [This decomposition (or the of cycles), is not necessarily unique.] Through operations of cutting corners, we'll delete all the (non-self) edges. Each cycle of length k requires k-1 operations to delete. Thus, the answer is just the minimum possible value of sum(C_k - 1), where C_1,... C_k are the lengths of the cycles in some cycle decomposition of the underlying graph. This can be re-written as ( of non-self edges) - ( of cycles). Hence, we want to maximize the of cycles in a cycle decomposition of the underlying graph. Python 2 Python 3 Perform a regular breadth-first search: the neighbors to each node string S are all the strings reachable with 1 swap to get the first unmatched character in S matched. we can prove that an optimal solution swaps the left-most unmatched character A[i] with an appropriate match A[j] which equals to B[i] (j > i), as this increases of self-edges. Time complexity: This reduces the of "neighbors" of a node (string state) from O(N^2) to O(N): O(N^2): swap any pair of chars in the string, O(N): only swap the first unmatched char. we need a set to remove repeatedness anyway, so put 'steps' together | 2,748 | en | 0.878143 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import subprocess
from ..utils.scripts import make_path
from ..utils.testing import Checker
from .obs_table import ObservationTable
from .hdu_index_table import HDUIndexTable
from .obs_table import ObservationTableChecker
from .observations import DataStoreObservation, Observations, ObservationChecker
__all__ = ["DataStore"]
log = logging.getLogger(__name__)
class DataStore(object):
"""IACT data store.
The data selection and access happens using an observation
and an HDU index file as described at :ref:`gadf:iact-storage`.
See :gp-extra-notebook:`cta_1dc_introduction` for usage examples.
Parameters
----------
hdu_table : `~gammapy.data.HDUIndexTable`
HDU index table
obs_table : `~gammapy.data.ObservationTable`
Observation index table
Examples
--------
Here's an example how to create a `DataStore` to access H.E.S.S. data:
>>> from gammapy.data import DataStore
>>> data_store = DataStore.from_dir('$GAMMAPY_DATA/hess-dl3-dr1')
>>> data_store.info()
"""
DEFAULT_HDU_TABLE = "hdu-index.fits.gz"
"""Default HDU table filename."""
DEFAULT_OBS_TABLE = "obs-index.fits.gz"
"""Default observation table filename."""
def __init__(self, hdu_table=None, obs_table=None):
self.hdu_table = hdu_table
self.obs_table = obs_table
def __str__(self):
return self.info(show=False)
@classmethod
def from_file(cls, filename, hdu_hdu="HDU_INDEX", hdu_obs="OBS_INDEX"):
"""Create from a FITS file.
The FITS file must contain both index files.
Parameters
----------
filename : str, Path
FITS filename
hdu_hdu : str or int
FITS HDU name or number for the HDU index table
hdu_obs : str or int
FITS HDU name or number for the observation index table
"""
filename = make_path(filename)
hdu_table = HDUIndexTable.read(filename, hdu=hdu_hdu, format="fits")
obs_table = ObservationTable.read(filename, hdu=hdu_obs, format="fits")
return cls(hdu_table=hdu_table, obs_table=obs_table)
@classmethod
def from_dir(cls, base_dir, hdu_table_filename=None, obs_table_filename=None):
"""Create from a directory.
Parameters
----------
base_dir : str, Path
Base directory of the data files.
hdu_table_filename : str, Path
Filename of the HDU index file. May be specified either relative
to `base_dir` or as an absolute path. If None, the default filename
will be looked for.
obs_table_filename : str, Path
Filename of the observation index file. May be specified either relative
to `base_dir` or as an absolute path. If None, the default filename
will be looked for.
"""
base_dir = make_path(base_dir)
if hdu_table_filename:
hdu_table_filename = make_path(hdu_table_filename)
if (base_dir / hdu_table_filename).exists():
hdu_table_filename = base_dir / hdu_table_filename
else:
hdu_table_filename = base_dir / cls.DEFAULT_HDU_TABLE
if obs_table_filename:
obs_table_filename = make_path(obs_table_filename)
if (base_dir / obs_table_filename).exists():
obs_table_filename = base_dir / obs_table_filename
else:
obs_table_filename = base_dir / cls.DEFAULT_OBS_TABLE
if not hdu_table_filename.exists():
raise IOError("File not found: {}".format(hdu_table_filename))
log.debug("Reading {}".format(hdu_table_filename))
hdu_table = HDUIndexTable.read(str(hdu_table_filename), format="fits")
hdu_table.meta["BASE_DIR"] = str(base_dir)
if not obs_table_filename.exists():
raise IOError("File not found: {}".format(obs_table_filename))
log.debug("Reading {}".format(str(obs_table_filename)))
obs_table = ObservationTable.read(str(obs_table_filename), format="fits")
return cls(hdu_table=hdu_table, obs_table=obs_table)
@classmethod
def from_config(cls, config):
"""Create from a config dict."""
base_dir = config["base_dir"]
hdu_table_filename = config.get("hduindx", cls.DEFAULT_HDU_TABLE)
obs_table_filename = config.get("obsindx", cls.DEFAULT_OBS_TABLE)
hdu_table_filename = cls._find_file(hdu_table_filename, base_dir)
obs_table_filename = cls._find_file(obs_table_filename, base_dir)
return cls.from_files(
base_dir=base_dir,
hdu_table_filename=hdu_table_filename,
obs_table_filename=obs_table_filename,
)
@staticmethod
def _find_file(filename, dir):
"""Find a file at an absolute or relative location.
- First tries ``Path(filename)``
- Second tries ``Path(dir) / filename``
- Raises ``OSError`` if both don't exist.
"""
path1 = make_path(filename)
path2 = make_path(dir) / filename
if path1.is_file():
filename = path1
elif path2.is_file():
filename = path2
else:
raise OSError("File not found at {} or {}".format(path1, path2))
return filename
def info(self, show=True):
"""Print some info."""
s = "Data store:\n"
s += self.hdu_table.summary()
s += "\n\n"
s += self.obs_table.summary()
if show:
print(s)
else:
return s
def obs(self, obs_id):
"""Access a given `~gammapy.data.DataStoreObservation`.
Parameters
----------
obs_id : int
Observation ID.
Returns
-------
observation : `~gammapy.data.DataStoreObservation`
Observation container
"""
return DataStoreObservation(obs_id=int(obs_id), data_store=self)
def get_observations(self, obs_id, skip_missing=False):
"""Generate a `~gammapy.data.Observations`.
Parameters
----------
obs_id : list
Observation IDs.
skip_missing : bool, optional
Skip missing observations, default: False
Returns
-------
observations : `~gammapy.data.Observations`
Container holding a list of `~gammapy.data.DataStoreObservation`
"""
obs_list = []
for _ in obs_id:
try:
obs = self.obs(_)
except ValueError as err:
if skip_missing:
log.warning("Skipping missing obs_id: {!r}".format(_))
continue
else:
raise err
else:
obs_list.append(obs)
return Observations(obs_list)
def copy_obs(self, obs_id, outdir, hdu_class=None, verbose=False, overwrite=False):
"""Create a new `~gammapy.data.DataStore` containing a subset of observations.
Parameters
----------
obs_id : array-like, `~gammapy.data.ObservationTable`
List of observations to copy
outdir : str, Path
Directory for the new store
hdu_class : list of str
see :attr:`gammapy.data.HDUIndexTable.VALID_HDU_CLASS`
verbose : bool
Print copied files
overwrite : bool
Overwrite
"""
# TODO : Does rsync give any benefits here?
outdir = make_path(outdir)
if isinstance(obs_id, ObservationTable):
obs_id = obs_id["OBS_ID"].data
hdutable = self.hdu_table
hdutable.add_index("OBS_ID")
with hdutable.index_mode("discard_on_copy"):
subhdutable = hdutable.loc[obs_id]
if hdu_class is not None:
subhdutable.add_index("HDU_CLASS")
with subhdutable.index_mode("discard_on_copy"):
subhdutable = subhdutable.loc[hdu_class]
subobstable = self.obs_table.select_obs_id(obs_id)
for idx in range(len(subhdutable)):
# Changes to the file structure could be made here
loc = subhdutable.location_info(idx)
targetdir = outdir / loc.file_dir
targetdir.mkdir(exist_ok=True, parents=True)
cmd = ["cp", "-v"] if verbose else ["cp"]
if not overwrite:
cmd += ["-n"]
cmd += [str(loc.path()), str(targetdir)]
subprocess.call(cmd)
filename = str(outdir / self.DEFAULT_HDU_TABLE)
subhdutable.write(filename, format="fits", overwrite=overwrite)
filename = str(outdir / self.DEFAULT_OBS_TABLE)
subobstable.write(filename, format="fits", overwrite=overwrite)
def check(self, checks="all"):
"""Check index tables and data files.
This is a generator that yields a list of dicts.
"""
checker = DataStoreChecker(self)
return checker.run(checks=checks)
class DataStoreChecker(Checker):
"""Check data store.
Checks data format and a bit about the content.
"""
CHECKS = {
"obs_table": "check_obs_table",
"hdu_table": "check_hdu_table",
"observations": "check_observations",
"consistency": "check_consistency",
}
def __init__(self, data_store):
self.data_store = data_store
def check_obs_table(self):
"""Checks for the observation index table."""
checker = ObservationTableChecker(self.data_store.obs_table)
for record in checker.run():
yield record
def check_hdu_table(self):
"""Checks for the HDU index table."""
t = self.data_store.hdu_table
m = t.meta
if m.get("HDUCLAS1", "") != "INDEX":
yield {
"level": "error",
"hdu": "hdu-index",
"msg": "Invalid header key. Must have HDUCLAS1=INDEX",
}
if m.get("HDUCLAS2", "") != "HDU":
yield {
"level": "error",
"hdu": "hdu-index",
"msg": "Invalid header key. Must have HDUCLAS2=HDU",
}
# Check that all HDU in the data files exist
for idx in range(len(t)):
location_info = t.location_info(idx)
try:
location_info.get_hdu()
except KeyError:
yield {
"level": "error",
"msg": "HDU not found: {!r}".format(location_info.__dict__),
}
def check_consistency(self):
"""Consistency checks between multiple HDUs"""
# obs and HDU index should have the same OBS_ID
obs_table_obs_id = set(self.data_store.obs_table["OBS_ID"])
hdu_table_obs_id = set(self.data_store.hdu_table["OBS_ID"])
if not obs_table_obs_id == hdu_table_obs_id:
yield {
"level": "error",
"msg": "Inconsistent OBS_ID in obs and HDU index tables",
}
# TODO: obs table and events header should have the same times
def check_observations(self):
"""Perform some sanity checks for all observations."""
for obs_id in self.data_store.obs_table["OBS_ID"]:
obs = self.data_store.obs(obs_id)
for record in ObservationChecker(obs).run():
yield record
| gammapy/data/data_store.py | 11,584 | IACT data store.
The data selection and access happens using an observation
and an HDU index file as described at :ref:`gadf:iact-storage`.
See :gp-extra-notebook:`cta_1dc_introduction` for usage examples.
Parameters
----------
hdu_table : `~gammapy.data.HDUIndexTable`
HDU index table
obs_table : `~gammapy.data.ObservationTable`
Observation index table
Examples
--------
Here's an example how to create a `DataStore` to access H.E.S.S. data:
>>> from gammapy.data import DataStore
>>> data_store = DataStore.from_dir('$GAMMAPY_DATA/hess-dl3-dr1')
>>> data_store.info()
Check data store.
Checks data format and a bit about the content.
Find a file at an absolute or relative location.
- First tries ``Path(filename)``
- Second tries ``Path(dir) / filename``
- Raises ``OSError`` if both don't exist.
Check index tables and data files.
This is a generator that yields a list of dicts.
Consistency checks between multiple HDUs
Checks for the HDU index table.
Checks for the observation index table.
Perform some sanity checks for all observations.
Create a new `~gammapy.data.DataStore` containing a subset of observations.
Parameters
----------
obs_id : array-like, `~gammapy.data.ObservationTable`
List of observations to copy
outdir : str, Path
Directory for the new store
hdu_class : list of str
see :attr:`gammapy.data.HDUIndexTable.VALID_HDU_CLASS`
verbose : bool
Print copied files
overwrite : bool
Overwrite
Create from a config dict.
Create from a directory.
Parameters
----------
base_dir : str, Path
Base directory of the data files.
hdu_table_filename : str, Path
Filename of the HDU index file. May be specified either relative
to `base_dir` or as an absolute path. If None, the default filename
will be looked for.
obs_table_filename : str, Path
Filename of the observation index file. May be specified either relative
to `base_dir` or as an absolute path. If None, the default filename
will be looked for.
Create from a FITS file.
The FITS file must contain both index files.
Parameters
----------
filename : str, Path
FITS filename
hdu_hdu : str or int
FITS HDU name or number for the HDU index table
hdu_obs : str or int
FITS HDU name or number for the observation index table
Generate a `~gammapy.data.Observations`.
Parameters
----------
obs_id : list
Observation IDs.
skip_missing : bool, optional
Skip missing observations, default: False
Returns
-------
observations : `~gammapy.data.Observations`
Container holding a list of `~gammapy.data.DataStoreObservation`
Print some info.
Access a given `~gammapy.data.DataStoreObservation`.
Parameters
----------
obs_id : int
Observation ID.
Returns
-------
observation : `~gammapy.data.DataStoreObservation`
Observation container
Licensed under a 3-clause BSD style license - see LICENSE.rst TODO : Does rsync give any benefits here? Changes to the file structure could be made here Check that all HDU in the data files exist obs and HDU index should have the same OBS_ID TODO: obs table and events header should have the same times | 3,102 | en | 0.551857 |
# -*- coding: utf-8 -*-
from django import forms
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from tcms.core.utils import string_to_list
from tcms.core.forms.fields import UserField
from tcms.management.models import Product, Version, Build
from tcms.testplans.models import TestPlan
from tcms.testcases.models import TestCase
# =========== Forms for search/filter ==============
class SearchProductForm(forms.Form):
"""
Includes *only* fields used in search.html b/c
the actual search is now done via JSON RPC.
"""
name_product = forms.CharField(label='Product', max_length=100, required=False) | telemetryPlugin/forms.py | 685 | Includes *only* fields used in search.html b/c
the actual search is now done via JSON RPC.
-*- coding: utf-8 -*- =========== Forms for search/filter ============== | 165 | en | 0.736794 |
# pylint: disable=no-member
from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
from playthrough-bot.models import ModelBase, get_engine
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
config.set_main_option("sqlalchemy.url", str(get_engine().url))
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = ModelBase.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
render_as_batch=config.get_main_option("sqlalchemy.url").startswith(
"sqlite:"
),
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| src/playthrough-bot/models/alembic/env.py | 2,339 | pylint: disable=no-member this is the Alembic Config object, which provides access to the values within the .ini file in use. Interpret the config file for Python logging. This line sets up loggers basically. add your model's MetaData object here for 'autogenerate' support from myapp import mymodel target_metadata = mymodel.Base.metadata other values from the config, defined by the needs of env.py, can be acquired: my_important_option = config.get_main_option("my_important_option") ... etc. | 495 | en | 0.745483 |
"""
Experimemtal code for trimming primers & polyA tails from high error rate long reads
"""
import os, sys, pdb
from csv import DictWriter
from collections import namedtuple
from multiprocessing import Process
from Bio.Seq import Seq
from Bio import SeqIO
import parasail
ScoreTuple = namedtuple('ScoreTuple', ['score5', 'end5', 'score3', 'end3', 'endA'])
# for ONT using Clontech
#SEQ_5P = 'AAGCAGTGGTATCAACGCAGAGTACATGGGG'
#SEQ_3P_REV = 'GTATCAACGCAGAGTAC'
ISOSEQ_5P = 'GCAATGAAGTCGCAGGGTTGGG'
ISOSEQ_3P = 'GTACTCTGCGTTGATACCACTGCTT'
#SEQ_5P = 'GCAATGAAGTCGCAGGGTTGGGG'
#SEQ_5P = 'CAGGAAACAGCTATGACC'
#SEQ_3P_REV = 'AAGCAGTGGTATCAACGCAGAGTAC'
#SEQ_3P_REV = 'ACTGGCCGTCGTTTTAC'
MINSCORE_5P = 20
MINSCORE_3P = 20
MIN_A_LEN = 20
SCOREMAT = parasail.matrix_create("ACGT", 2, -5)
def trim5p3p_helper(r, seq_5p, seq_3p_rev):
"""
Search for 5' and 3' in the first and last 100 bp window
"""
s1 = str(r.seq[:100])
s2 = str(r.reverse_complement().seq[:100])
o1 = parasail.sg_qx_trace(s1, seq_5p, 3, 1, SCOREMAT)
o2 = parasail.sg_qe_db_trace(s2, seq_3p_rev, 3, 1, SCOREMAT)
lenA = None
if o2.score >= MINSCORE_3P:
lenA = trimA(s2[o2.end_query + 1:])
if MIN_A_LEN == 0:
end3 = len(r.seq) - o2.end_query - 1
return ScoreTuple(score5=o1.score, end5=o1.end_query, score3=o2.score, end3=end3, endA=end3)
elif lenA is not None:
end3 = len(r.seq) - o2.end_query - 1
endA = end3 - lenA + 1
return ScoreTuple(score5=o1.score, end5=o1.end_query, score3=o2.score, end3=end3, endA=endA)
else:
end3 = len(r.seq) - o2.end_query - 1
return ScoreTuple(score5=o1.score, end5=o1.end_query, score3=o2.score, end3=end3, endA=end3)
def trimA(rev_seq):
if len(rev_seq) == 0:
return None
n_rev_seq = len(rev_seq)
mismatch = 0
i = 0
while mismatch < 2 and i < n_rev_seq:
if rev_seq[i]!='T':
mismatch += 1
i += 1
i -= 1
if i >= MIN_A_LEN:
return i
else:
return None
def trim5p3p_multithreaded(fastq_filename, output_prefix, seq_5p, seq_3p_rev, chunks):
# first figure out how many records there are and record positions
num_lines = 0
for line in open(fastq_filename, 'r'): num_lines += 1
num_records = num_lines // 4
chunk_size = (num_records//chunks) + (num_records%chunks>0)
print("{0} has {1} records, {2} per chunk".format(fastq_filename, num_records, chunk_size))
pools = []
records = []
count = 0
i = 1
for r in SeqIO.parse(open(fastq_filename), 'fastq'):
count += 1
records.append(r)
if count >= chunk_size:
p = Process(target=trim5p3p, args=(records, output_prefix+'.'+str(i), seq_5p, seq_3p_rev))
p.start()
print("Starting worker {0}...".format(i))
pools.append(p)
records = []
count = 0
i += 1
p = Process(target=trim5p3p, args=(records, output_prefix + '.' + str(i), seq_5p, seq_3p_rev))
p.start()
print("Starting worker {0}...".format(i))
pools.append(p)
for p in pools:
p.join()
# now combine all the files
f_FL = open(output_prefix+'.fl.fasta', 'w')
f_FL_clips = open(output_prefix+'.fl.clips', 'w')
f_nFL = open(output_prefix+'.nfl.fasta', 'w')
f_csv = open(output_prefix+'.csv', 'w')
for j in range(1, i+1):
p = output_prefix + '.' + str(j)
with open(p + '.fl.fasta') as h:
f_FL.write(h.read())
print("writing {0} into {1}...".format(h.name, f_FL.name))
with open(p + '.fl.clips') as h:
f_FL_clips.write(h.read())
print("writing {0} into {1}...".format(h.name, f_FL_clips.name))
with open(p + '.nfl.fasta') as h:
f_nFL.write(h.read())
print("writing {0} into {1}...".format(h.name, f_nFL.name))
with open(p + '.csv') as h:
f_csv.write(h.read())
print("writing {0} into {1}...".format(h.name, f_csv.name))
os.remove(p + '.fl.fasta')
os.remove(p + '.fl.clips')
os.remove(p + '.nfl.fasta')
os.remove(p + '.csv')
f_csv.close()
f_FL.close()
f_FL_clips.close()
f_nFL.close()
def trim5p3p(records, output_prefix, seq_5p, seq_3p_rev):
f_FL = open(output_prefix+'.fl.fasta', 'w')
f_FL_clips = open(output_prefix+'.fl.clips', 'w')
f_nFL = open(output_prefix+'.nfl.fasta', 'w')
f_csv = open(output_prefix+'.csv', 'w')
writer = DictWriter(f_csv, fieldnames=['id', 'end5', 'end3', 'endA', 'strand'])
writer.writeheader()
for r in records:
r2 = r.reverse_complement()
r2.id = r.id
t1 = trim5p3p_helper(r, seq_5p, seq_3p_rev)
t2 = trim5p3p_helper(r2, seq_5p, seq_3p_rev)
is_fl_flag1 = t1.score5 >= MINSCORE_5P and t1.score3 >= MINSCORE_3P and (MIN_A_LEN == 0 or t1.endA!=t1.end3)
is_fl_flag2 = t2.score5 >= MINSCORE_5P and t2.score3 >= MINSCORE_3P and (MIN_A_LEN == 0 or t2.endA!=t2.end3)
if is_fl_flag1:
if is_fl_flag2:
if t1.score5+t1.score3 > t2.score5+t2.score3:
strand = '+'
else:
strand = '-'
else: # pick t1
strand = '+'
elif is_fl_flag2:
strand = '-'
else:
strand = 'NA'
info = {'id': r.id,
'end5': 'NA',
'end3': 'NA',
'endA': 'NA',
'strand': 'NA'}
if strand == '+':
info['strand'] = '+'
info['end5'] = t1.end5
info['end3'] = t1.end3
info['endA'] = t1.endA
f_FL.write(">{0}\n{1}\n".format(r.id, r.seq[t1.end5:t1.endA]))
f_FL_clips.write(">{0}_5p strand:+ score:{1}\n{2}\n".format(r.id, t1.score5, r.seq[:t1.end5]))
f_FL_clips.write(">{0}_3p strand:+ score:{1}\n{2}\n".format(r.id, t1.score3, r.seq[t1.endA:]))
elif strand == '-':
info['strand'] = '-'
info['end5'] = t2.end5
info['end3'] = t2.end3
info['endA'] = t2.endA
f_FL.write(">{0}\n{1}\n".format(r2.id, r2.seq[t2.end5:t2.endA]))
f_FL_clips.write(">{0}_5p strand:- score:{1}\n{2}\n".format(r.id, t2.score5, r2.seq[:t2.end5]))
f_FL_clips.write(">{0}_3p strand:- score:{1}\n{2}\n".format(r.id, t2.score3, r2.seq[t2.endA:]))
else:
# non-fL, but we still wanna trim away the stuff
if t1.score5+t1.score3 > t2.score5+t2.score3:
f_nFL.write(">{0} strand:+?\n{1}\n".format(r.id, r.seq[t1.end5:t1.endA]))
else:
f_nFL.write(">{0} strand:-?\n{1}\n".format(r2.id, r2.seq[t2.end5:t2.endA]))
writer.writerow(info)
f_csv.close()
f_FL.close()
f_FL_clips.close()
f_nFL.close()
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("fastq_filename")
parser.add_argument("output_prefix")
parser.add_argument("-p", "--primer_fasta", default=None, help="Primer fasta file (if not given, use IsoSeq defaults)")
parser.add_argument("-n", "--chunks", default=10, type=int, help="Number of chunks (CPUs) to use, default 10")
args = parser.parse_args()
if args.primer_fasta is None:
seq_5p = ISOSEQ_5P
seq_3p = ISOSEQ_3P
print(f"Using Iso-Seq default 5' primer sequence: {seq_5p}")
print(f"Using Iso-Seq default 3' primer sequence: {seq_3p}")
else:
reader = SeqIO.parse(open(args.primer_fasta), 'fasta')
r = next(reader)
if r.seqid!='5p':
print("ERROR: the first entry in {0} should be >5p! Abort!".format(args.primer_fasta))
sys.exit(-1)
seq_5p = str(r.seq)
r = next(reader)
if r.seqid!='3p':
print("ERROR: the second entry in {0} should be >3p! Abort!".format(args.primer_fasta))
sys.exit(-1)
seq_3p = str(r.seq)
print(f"Reading in 5' primer sequence: {seq_5p}")
print(f"Reading in 3' primer sequence: {seq_3p}")
seq_3p_rev = str(Seq(seq_3p).reverse_complement())
trim5p3p_multithreaded(args.fastq_filename, args.output_prefix, seq_5p, seq_3p_rev, args.chunks)
| beta/trim_primers.py | 8,323 | Search for 5' and 3' in the first and last 100 bp window
Experimemtal code for trimming primers & polyA tails from high error rate long reads
for ONT using ClontechSEQ_5P = 'AAGCAGTGGTATCAACGCAGAGTACATGGGG'SEQ_3P_REV = 'GTATCAACGCAGAGTAC'SEQ_5P = 'GCAATGAAGTCGCAGGGTTGGGG'SEQ_5P = 'CAGGAAACAGCTATGACC'SEQ_3P_REV = 'AAGCAGTGGTATCAACGCAGAGTAC'SEQ_3P_REV = 'ACTGGCCGTCGTTTTAC' first figure out how many records there are and record positions now combine all the files pick t1 non-fL, but we still wanna trim away the stuff | 521 | en | 0.716636 |
#!/usr/bin/env python
"""
Tests for ohsome client
"""
import os
import pandas as pd
from nose.tools import raises
import geojson
import geopandas as gpd
import ohsome
@raises(ohsome.OhsomeException)
def test_handle_multiple_responses_throw_timeouterror():
"""
Tests counting elements within a bounding box for two timestamps
:return:
"""
# GIVEN
bboxes = [8.67066,49.41423,8.68177,49.4204]
time = "2010-01-01/2011-01-01/P1Y"
keys = ["building"]
values = [""]
# WHEN
client = ohsome.OhsomeClientParallel()
response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values, timeout=2)
del client
def test_elements_count():
"""
Tests counting elements within a bounding box for two timestamps
:return:
"""
# GIVEN
bboxes = [8.67066,49.41423,8.68177,49.4204]
time = "2010-01-01/2011-01-01/P1Y"
keys = ["building"]
values = [""]
timestamps = ["2010-01-01T00:00:00Z", "2011-01-01T00:00:00Z"]
counts = [53.0, 256.0]
expected = pd.DataFrame({"timestamp": timestamps, "value": counts})
# WHEN
client = ohsome.OhsomeClient()
response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values)
result = response.as_dataframe()
del client
# THEN
assert expected.equals(result)
def test_elements_count_group_by_key():
"""
Tests counting elements within a bounding box and grouping them by keys
:return:
"""
#GIVEN
bboxes = "8.67066,49.41423,8.68177,49.4204"
time = "2010-01-01/2011-01-01/P1Y"
groupByKeys = ["building"]
timestamps = ["2010-01-01T00:00:00Z", "2011-01-01T00:00:00Z", "2010-01-01T00:00:00Z", "2011-01-01T00:00:00Z"]
counts = [482.0, 628.0, 53.0, 256.0]
keys = ["remainder", "remainder", "building", "building"]
expected = pd.DataFrame({"key": keys, "timestamp": timestamps, "value": counts})
expected.set_index(["key", "timestamp"], inplace=True)
# WHEN
client = ohsome.OhsomeClient()
response = client.elements.count.groupBy.key.post(bboxes=bboxes, groupByKeys=groupByKeys, time=time)
results = response.as_dataframe()
# THEN
assert expected.equals(results)
def test_elemets_count_ratio():
"""
Tests count ratio
:return:
"""
bboxes = "8.67066,49.41423,8.68177,49.4204"
time = "2010-01-01"
keys = ["building"]
keys2 = ["addr:city"]
values = [""]
values2 = [""]
expected = 365.0
client = ohsome.OhsomeClient()
response = client.elements.count.ratio.post(bboxes=bboxes, time=time, keys=keys, keys2=keys2,
values=values, values2=values2)
#results = response.as_dataframe()
# Cache is disabled
"""
def test_use_cache_dir():
# GIVEN
bboxes = "8.67066,49.41423,8.68177,49.4204"
time = "2010-01-01/2018-01-01/P1Y"
keys = ["building"]
values = [""]
cache_dir = "./tmp"
timestamps = ["2010-01-01T00:00:00Z", "2011-01-01T00:00:00Z"]
counts = [53.0, 256.0]
expected = pd.DataFrame({"timestamp": timestamps, "value": counts})
# WHEN
client = ohsome.OhsomeClient(cache_dir=cache_dir)
assert os.path.exists(cache_dir)
response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values)
result = response.as_dataframe()
#del client
"""
@raises(AssertionError)
def test_elements_count_exception():
"""
Tests whether a TypeError is raised if the result cannot be converted to a geodataframe object
:return:
"""
# GIVEN
bboxes = "8.67066,49.41423,8.68177,49.4204"
time = "2010-01-01/2011-01-01/P1Y"
keys = ["building"]
values = [""]
# WHEN
client = ohsome.OhsomeClient()
response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values)
response.as_geodataframe()
def test_elements_geometry():
"""
Tests whether the result of an elements/geometry query can be converted to a geodataframe
:return:
"""
# GIVEN
bboxes = "8.67066,49.41423,8.68177,49.4204"
time = "2010-01-01"
keys = ["landuse"]
values = ["grass"]
# WHEN
client = ohsome.OhsomeClient()
response = client.elements.geometry.post(bboxes=bboxes, time=time, keys=keys, values=values)
result = response.as_geodataframe()
del client
# THEN
assert len(result.geometry) == 9
def test_to_file_assert_filetype():
"""
Asserts whether an error is thrown if the output file is not json or geojson
:return:
"""
output_file = "./out.shp"
def test_format_coordinates():
"""
Asserts that coordinates of a MultiPolygon are concerted correctly
:return:
"""
# GIVEN
bpolys = geojson.FeatureCollection([{"type": "Feature",
"geometry": {"coordinates": [[[[13,51], [13,51.1], [13.1,51.1], [13.1,51], [13,51]],
[[13,51], [14,51.1], [14.1,51.1], [14.1,51], [14,51]]]],
"type": "MultiPolygon"}}])
time = "2018-01-01"
keys = ["landuse"]
values = ["grass"]
# WHEN
client = ohsome.OhsomeClient()
response = client.elements.geometry.post(bpolys=ohsome.format_coordinates(bpolys), time=time, keys=keys, values=values)
result = response.as_geodataframe()
del client
# THEN
assert len(result.geometry) == 74
def test_format_geodataframe():
# GIVEN
bpolys = geojson.FeatureCollection([{"type": "Feature",
"properties": {"id": 0},
"geometry": {"coordinates": [
[[[13, 51], [13, 51.1], [13.1, 51.1], [13.1, 51], [13, 51]]],
[[[14, 51], [14, 51.1], [14.1, 51.1], [14.1, 51], [14, 51]]]],
"type": "MultiPolygon"}}])
bpolys_df = gpd.GeoDataFrame().from_features(bpolys)
time = "2018-01-01"
keys = ["amenity"]
values = [""]
format = "geojson"
properties = ["tags", "metadata"]
# WHEN
client = ohsome.OhsomeClient()
response = client.elements.count.groupBy.boundary.post(bpolys=bpolys_df, time=time, keys=keys, values=values,
format=format, properties=properties)
result = response.as_geodataframe()
del client
# THEN
assert result["value"][0] == 538
def test_parallel_user():
# GIVEN
bpolys = geojson.FeatureCollection([{"type": "Feature",
"properties": {"id": 0},
"geometry": {"coordinates": [
[[[13, 51], [13, 51.1], [13.1, 51.1], [13.1, 51], [13, 51]]],
[[[14, 51], [14, 51.1], [14.1, 51.1], [14.1, 51], [14, 51]]]],
"type": "MultiPolygon"}},
{"type": "Feature",
"properties": {"id": 1},
"geometry": {"coordinates": [
[[[13, 51], [13, 51.1], [13.1, 51.1], [13.1, 51], [13, 51]]],
[[[14, 51], [14, 51.1], [14.1, 51.1], [14.1, 51], [14, 51]]]],
"type": "MultiPolygon"}}
])
bpolys_df = gpd.GeoDataFrame().from_features(bpolys)
timeperiod = "2017-01-01,2018-01-01"
keys = ["amenity"]
values = [""]
format = "json"
properties = ["metadata"]
# WHEN
client = ohsome.OhsomeClientParallel(chunksize=1)
response = client.users.count.groupBy.boundary.post(bpolys=bpolys_df, time=timeperiod, keys=keys, values=values,
format=format, properties=properties)
result = response.as_dataframe()
del client
# THEN
assert result["value"][0] == 33.
| src/ohsome/tests/test_ohsome_client.py | 8,157 | Tests counting elements within a bounding box for two timestamps
:return:
Tests whether a TypeError is raised if the result cannot be converted to a geodataframe object
:return:
Tests counting elements within a bounding box and grouping them by keys
:return:
Tests whether the result of an elements/geometry query can be converted to a geodataframe
:return:
Tests count ratio
:return:
Asserts that coordinates of a MultiPolygon are concerted correctly
:return:
Tests counting elements within a bounding box for two timestamps
:return:
Asserts whether an error is thrown if the output file is not json or geojson
:return:
Tests for ohsome client
!/usr/bin/env python GIVEN WHEN GIVEN WHEN THENGIVEN WHEN THENresults = response.as_dataframe() Cache is disabled GIVEN WHEN GIVEN WHEN THEN GIVEN WHEN THEN GIVEN WHEN THEN GIVEN WHEN THEN | 834 | en | 0.689362 |
import os, sys, glob, argparse
import logging
import types
from collections import OrderedDict
import torch
import torch.nn.functional as F
import utils
import models
import main as entry
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
def export_onnx(args):
model_name = args.model
if model_name in models.model_zoo:
model, args = models.get_model(args)
else:
print("model(%s) not support, available models: %r" % (model_name, models.model_zoo))
return
if utils.check_file(args.old):
print("load pretrained from %s" % args.old)
if torch.cuda.is_available():
checkpoint = torch.load(args.old)
else: # force cpu mode
checkpoint = torch.load(args.old, map_location='cpu')
print("load pretrained ==> last epoch: %d" % checkpoint.get('epoch', 0))
print("load pretrained ==> last best_acc: %f" % checkpoint.get('best_acc', 0))
print("load pretrained ==> last learning_rate: %f" % checkpoint.get('learning_rate', 0))
try:
utils.load_state_dict(model, checkpoint.get('state_dict', None))
except RuntimeError:
print("Loading pretrained model failed")
else:
print("no pretrained file exists({}), init model with default initlizer".
format(args.old))
onnx_model = torch.nn.Sequential(OrderedDict([
('network', model),
('softmax', torch.nn.Softmax()),
]))
onnx_path = "onnx/" + model_name
if not os.path.exists(onnx_path):
os.makedirs(onnx_path)
onnx_save = onnx_path + "/" + model_name + '.onnx'
input_names = ["input"]
dummy_input = torch.zeros((1, 3, args.input_size, args.input_size))
output_names = ['prob']
torch.onnx.export(
onnx_model,
dummy_input,
onnx_save,
verbose=True,
input_names=input_names,
output_names=output_names,
opset_version=7,
keep_initializers_as_inputs=True
)
def inference(args):
from models.quant import custom_conv
def init(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False,
args=None, force_fp=False, feature_stride=1):
super(custom_conv, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.args = args
self.force_fp = True
custom_conv.__init__ = init
model_name = args.model
if model_name in models.model_zoo:
model, args = models.get_model(args)
else:
print("model(%s) not support, available models: %r" % (model_name, models.model_zoo))
return
def forward(self, x):
print(x.shape, self.weight.shape, self.kernel_size, self.stride, self.padding, self.dilation, self.groups)
output = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return output
for m in model.modules():
if isinstance(m, torch.nn.Conv2d):
m.forward = types.MethodType(forward, m)
input = torch.rand(1, 3, args.input_size, args.input_size)
model.forward(input)
def get_parameter():
parser = entry.get_parser()
parser.add_argument('--old', type=str, default='')
parser.add_argument('--new', type=str, default='')
parser.add_argument('--mapping_from', '--mf', type=str, default='')
parser.add_argument('--mapping_to', '--mt', type=str, default='')
parser.add_argument('--verbose_list', default='ratio,sep', type=str)
args = parser.parse_args()
if isinstance(args.verbose_list, str):
args.verbose_list = [x.strip() for x in args.verbose_list.split(',')]
if isinstance(args.keyword, str):
args.keyword = [x.strip() for x in args.keyword.split(',')]
return args
def main():
args = get_parameter()
args.weights_dir = os.path.join(args.weights_dir, args.model)
utils.check_folder(args.weights_dir)
if os.path.exists(args.log_dir):
utils.setup_logging(os.path.join(args.log_dir, 'tools.txt'), resume=True)
config = dict()
for i in args.keyword:
config[i] = True
if 'export_onnx' in config.keys():
export_onnx(args)
if 'inference' in config.keys():
inference(args)
if 'verbose' in config.keys():
if torch.cuda.is_available():
checkpoint = torch.load(args.old)
else: # force cpu mode
checkpoint = torch.load(args.old, map_location='cpu')
if 'state_dict' in checkpoint:
checkpoint = checkpoint['state_dict']
if 'model' in checkpoint:
checkpoint = checkpoint['model']
for name, value in checkpoint.items():
if ('quant_activation' in name or 'quant_weight' in name) and name.split('.')[-1] in args.verbose_list:
print(name, value.shape, value.requires_grad)
print(value.data)
elif "all" in args.verbose_list:
if 'num_batches_tracked' not in name:
if isinstance(value, torch.Tensor):
print(name, value.shape, value.requires_grad)
elif isinstance(value, int) or isinstance(value, float) or isinstance(value, str):
print(name, value, type(value))
else:
print(name, type(value))
if 'load' in config.keys() or 'save' in config.keys():
model_name = args.model
if model_name in models.model_zoo:
model, args = models.get_model(args)
else:
print("model(%s) not support, available models: %r" % (model_name, models.model_zoo))
return
if utils.check_file(args.old):
raw = 'raw' in config.keys()
if torch.cuda.is_available():
checkpoint = torch.load(args.old)
else: # force cpu mode
checkpoint = torch.load(args.old, map_location='cpu')
try:
utils.load_state_dict(model, checkpoint.get('state_dict', None) if not raw else checkpoint, verbose=False)
except RuntimeError:
print("Loading pretrained model failed")
print("Loading pretrained model OK")
if 'save' in config.keys() and args.new != '':
torch.save(model.state_dict(), args.new)
print("Save pretrained model into %s" % args.new)
else:
print("file not exist %s" % args.old)
if 'update' in config.keys():
mapping_from = []
mapping_to = []
if os.path.isfile(args.mapping_from):
with open(args.mapping_from) as f:
mapping_from = f.readlines()
f.close()
if os.path.isfile(args.mapping_to):
with open(args.mapping_to) as f:
mapping_to = f.readlines()
f.close()
mapping_from = [ i.strip().strip('\n').strip('"').strip("'") for i in mapping_from]
mapping_from = [ i for i in mapping_from if len(i) > 0 and i[0] != '#']
mapping_to = [ i.strip().strip('\n').strip('"').strip("'") for i in mapping_to]
mapping_to = [ i for i in mapping_to if len(i) > 0 and i[0] != '#']
if len(mapping_to) != len(mapping_from) or len(mapping_to) == 0 or len(mapping_from) == 0:
mapping = None
logging.info('no valid mapping')
else:
mapping = dict()
for i, k in enumerate(mapping_from):
if '{' in k and '}' in k and '{' in mapping_to[i] and '}' in mapping_to[i]:
item = k.split('{')
for v in item[1].strip('}').split(","):
v = v.strip()
mapping[item[0] + v] = mapping_to[i].split('{')[0] + v
else:
mapping[k] = mapping_to[i]
raw = 'raw' in config.keys()
if not os.path.isfile(args.old):
args.old = args.pretrained
utils.import_state_dict(args.old, args.new, mapping, raw, raw_prefix=args.case)
if 'det-load' in config.keys():
from third_party.checkpoint import DetectionCheckpointer
model_name = args.model
if model_name in models.model_zoo:
model, args = models.get_model(args)
else:
print("model(%s) not support, available models: %r" % (model_name, models.model_zoo))
return
split = os.path.split(args.old)
checkpointer = DetectionCheckpointer(model, split[0], save_to_disk=True)
checkpointer.resume_or_load(args.old, resume=True)
checkpointer.save(split[1])
if 'swap' in config.keys():
mapping_from = []
if os.path.isfile(args.mapping_from):
with open(args.mapping_from) as f:
mapping_from = f.readlines()
f.close()
mapping_from = [ i.strip().strip('\n').strip('"').strip("'") for i in mapping_from]
mapping_from = [ i for i in mapping_from if len(i) > 0 and i[0] != '#']
lists = args.verbose_list
for i in lists:
item = i.split('/')
interval = (int)(item[0])
index = item[1].split('-')
index = [(int)(x) for x in index]
if len(mapping_from) % interval == 0 and len(index) <= interval:
mapping_to = mapping_from.copy()
for j, k in enumerate(index):
k = k % interval
mapping_to[j::interval] = mapping_from[k::interval]
mapping_to= [ i + '\n' for i in mapping_to]
with open(args.mapping_from + "-swap", 'w') as f:
f.writelines(mapping_to)
f.close()
if 'sort' in config.keys():
mapping_from = []
if os.path.isfile(args.mapping_from):
with open(args.mapping_from) as f:
mapping_from = f.readlines()
f.close()
mapping_from.sort()
with open(args.mapping_from + "-sort", 'w') as f:
f.writelines(mapping_from)
f.close()
if 'verify-data' in config.keys() or 'verify-image' in config.keys():
if 'verify-image' in config.keys():
lists = args.verbose_list
else:
with open(os.path.join(args.root, 'train.txt')) as f:
lists = f.readlines()
f.close()
from PIL import Image
from threading import Thread
print("going to check %d files" % len(lists))
def check(lists, start, end, index):
for i, item in enumerate(lists[start:end]):
try:
items = item.split()
if len(items) >= 1:
path = items[0].strip().strip('\n')
else:
print("skip line %s" % i)
continue
path = os.path.join(args.root, os.path.join("train", path))
imgs = Image.open(path)
imgs.resize((256,256))
if index == 0:
print(i, end ="\r", file=sys.stderr)
except (RuntimeError, IOError):
print("\nError when read image %s" % path)
print("\nFinish checking", index)
#lists = lists[45000:]
num = min(len(lists), 20)
for i in range(num):
start = len(lists) // num * i
end = min(start + len(lists) // num, len(lists))
th = Thread(target=check, args=(lists, start, end, i))
th.start()
if __name__ == '__main__':
main()
| tools.py | 11,829 | force cpu mode force cpu mode force cpu modelists = lists[45000:] | 65 | en | 0.697406 |
from selenium import webdriver
#import itertools
from openpyxl import Workbook, load_workbook
import re
import datetime
driver = webdriver.Firefox()
driver.get("https://www.worldometers.info/coronavirus/")
countries = []
cases = []
newCases = []
data = []
casesInt = []
newCasesInt = []
cells = []
cellsB = []
datez = datetime.datetime.now()
nowDate = datez.strftime("%d%b%y")
for country in range(2,22):
countries.append(driver.find_element_by_xpath("//table/tbody[1]/tr[" + str(country) + "]/td[1]").text)
for case in range(2,22):
cases.append(driver.find_element_by_xpath("//table/tbody[1]/tr[" + str(case) + "]/td[2]").text)
for newCase in range(2,22):
newCases.append(driver.find_element_by_xpath("//table/tbody[1]/tr[" + str(newCase) + "]/td[3]").text)
data = dict(zip(countries, zip(cases, newCases)))
#print(data)
for case in cases:
case = re.sub(r'\D', '', case)
casesInt.append(int(case))
for newCase in newCases:
if newCase:
newCase = re.sub(r'\D', '', newCase)
newCasesInt.append(int(newCase))
else:
newCasesInt.append(1)
percentages = []
for caseInt,newCase in zip(casesInt, newCasesInt):
result = caseInt - newCase
percentage = round((newCase/result)*100, 2)
percentages.append(percentage)
#for country, percentage in zip(countries, percentages):
# print(country, ":", percentage)
wb = Workbook()
wb = load_workbook(filename='corona.xlsx')
ws = wb.active
#for countries column
for i in range(2,22):
i = str(i)
appendValue = 'A' + i
appendValueB = 'B' + i
cells.append(appendValue)
cellsB.append(appendValueB)
for i in range(20):
ws['A' + str(i+2)] = countries[i]
ws['B' + str(i+2)] = percentages[i]
wb.save(filename="corona" + nowDate + ".xlsx") | corona.py | 1,688 | import itertoolsprint(data)for country, percentage in zip(countries, percentages): print(country, ":", percentage)for countries column | 134 | en | 0.651423 |
import epaper2in13
from machine import Pin,SPI
from time import sleep_ms
# SPI #2 on ESP32
spi = SPI(2,baudrate=2000000, polarity=0, phase=0) # miso=Pin(12), mosi=Pin(23), sck=Pin(18))
cs = Pin(5)
dc = Pin(2)
rst = Pin(15)
busy = Pin(4)
e = epaper2in13.EPD(spi, cs, dc, rst, busy)
e.init(e.FULL_UPDATE)
y_start = 6 # Y addresses start at 6 due to the memory layout
import framebuf
buf = bytearray(e.width * e.height // 8)
fb = framebuf.FrameBuffer(buf, e.height, e.width, framebuf.MONO_VLSB)
# --------------------
fb.fill(0)
fb.text('MicroPython!', 2, y_start + 2, 0xffff)
fb.rect(0, y_start, 250, 122, 0xffff)
e.set_frame_memory(buf,0,0,e.width,e.height)
e.display_frame()
sleep_ms(2000) # wait for 2 seconds before doing a partial update
# --------------------
e.init(e.PART_UPDATE)
fb = framebuf.FrameBuffer(buf, 200, 32, framebuf.MONO_VLSB)
fb.fill(0x0)
for i in range(0,32/2-1,2):
fb.rect(i, i, 200-i*2, 32-i*2, 0xffff)
e.set_frame_memory(buf,8,32,32,200) # 8px from bottom, 25px from left
e.display_frame()
| examples/2in13-hello-world/test.py | 1,035 | SPI 2 on ESP32 miso=Pin(12), mosi=Pin(23), sck=Pin(18)) Y addresses start at 6 due to the memory layout -------------------- wait for 2 seconds before doing a partial update -------------------- 8px from bottom, 25px from left | 226 | en | 0.65852 |
import datetime
import os
import typing
from dataclasses import dataclass
import pandas
import pytest
from dataclasses_json import dataclass_json
import flytekit
from flytekit import ContainerTask, SQLTask, dynamic, kwtypes, maptask
from flytekit.common.translator import get_serializable
from flytekit.core import context_manager, launch_plan, promise
from flytekit.core.condition import conditional
from flytekit.core.context_manager import ExecutionState, Image, ImageConfig
from flytekit.core.node import Node
from flytekit.core.promise import NodeOutput, Promise, VoidPromise
from flytekit.core.resources import Resources
from flytekit.core.task import TaskMetadata, task
from flytekit.core.testing import patch, task_mock
from flytekit.core.type_engine import RestrictedTypeError, TypeEngine
from flytekit.core.workflow import workflow
from flytekit.interfaces.data.data_proxy import FileAccessProvider
from flytekit.models.core import types as _core_types
from flytekit.models.interface import Parameter
from flytekit.models.task import Resources as _resource_models
from flytekit.models.types import LiteralType
from flytekit.types.schema import FlyteSchema, SchemaOpenMode
def test_default_wf_params_works():
@task
def my_task(a: int):
wf_params = flytekit.current_context()
assert wf_params.execution_id == "ex:local:local:local"
my_task(a=3)
def test_simple_input_output():
@task
def my_task(a: int) -> typing.NamedTuple("OutputsBC", b=int, c=str):
ctx = flytekit.current_context()
assert ctx.execution_id == "ex:local:local:local"
return a + 2, "hello world"
assert my_task(a=3) == (5, "hello world")
def test_simple_input_no_output():
@task
def my_task(a: int):
pass
assert my_task(a=3) is None
ctx = context_manager.FlyteContext.current_context()
with ctx.new_compilation_context() as ctx:
outputs = my_task(a=3)
assert isinstance(outputs, VoidPromise)
def test_single_output():
@task
def my_task() -> str:
return "Hello world"
assert my_task() == "Hello world"
ctx = context_manager.FlyteContext.current_context()
with ctx.new_compilation_context() as ctx:
outputs = my_task()
assert ctx.compilation_state is not None
nodes = ctx.compilation_state.nodes
assert len(nodes) == 1
assert outputs.is_ready is False
assert outputs.ref.node is nodes[0]
def test_engine_file_output():
basic_blob_type = _core_types.BlobType(format="", dimensionality=_core_types.BlobType.BlobDimensionality.SINGLE,)
fs = FileAccessProvider(local_sandbox_dir="/tmp/flytetesting")
with context_manager.FlyteContext.current_context().new_file_access_context(file_access_provider=fs) as ctx:
# Write some text to a file not in that directory above
test_file_location = "/tmp/sample.txt"
with open(test_file_location, "w") as fh:
fh.write("Hello World\n")
lit = TypeEngine.to_literal(ctx, test_file_location, os.PathLike, LiteralType(blob=basic_blob_type))
# Since we're using local as remote, we should be able to just read the file from the 'remote' location.
with open(lit.scalar.blob.uri, "r") as fh:
assert fh.readline() == "Hello World\n"
# We should also be able to turn the thing back into regular python native thing.
redownloaded_local_file_location = TypeEngine.to_python_value(ctx, lit, os.PathLike)
with open(redownloaded_local_file_location, "r") as fh:
assert fh.readline() == "Hello World\n"
def test_wf1():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
return a + 2, "world"
@task
def t2(a: str, b: str) -> str:
return b + a
@workflow
def my_wf(a: int, b: str) -> (int, str):
x, y = t1(a=a)
d = t2(a=y, b=b)
return x, d
assert len(my_wf._nodes) == 2
assert my_wf._nodes[0].id == "n0"
assert my_wf._nodes[1]._upstream_nodes[0] is my_wf._nodes[0]
assert len(my_wf._output_bindings) == 2
assert my_wf._output_bindings[0].var == "o0"
assert my_wf._output_bindings[0].binding.promise.var == "t1_int_output"
nt = typing.NamedTuple("SingleNT", t1_int_output=float)
@task
def t3(a: int) -> nt:
return (a + 2,)
assert t3.python_interface.output_tuple_name == "SingleNT"
assert t3.interface.outputs["t1_int_output"] is not None
def test_wf1_run():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
return a + 2, "world"
@task
def t2(a: str, b: str) -> str:
return b + a
@workflow
def my_wf(a: int, b: str) -> (int, str):
x, y = t1(a=a)
d = t2(a=y, b=b)
return x, d
x = my_wf(a=5, b="hello ")
assert x == (7, "hello world")
@workflow
def my_wf2(a: int, b: str) -> (int, str):
tup = t1(a=a)
d = t2(a=tup.c, b=b)
return tup.t1_int_output, d
x = my_wf2(a=5, b="hello ")
assert x == (7, "hello world")
def test_wf1_with_overrides():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
return a + 2, "world"
@task
def t2(a: str, b: str) -> str:
return b + a
@workflow
def my_wf(a: int, b: str) -> (int, str):
x, y = t1(a=a).with_overrides(name="x")
d = t2(a=y, b=b).with_overrides()
return x, d
x = my_wf(a=5, b="hello ")
assert x == (7, "hello world")
def test_wf1_with_list_of_inputs():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
return a + 2, "world"
@task
def t2(a: typing.List[str]) -> str:
return " ".join(a)
@workflow
def my_wf(a: int, b: str) -> (int, str):
xx, yy = t1(a=a)
d = t2(a=[b, yy])
return xx, d
x = my_wf(a=5, b="hello")
assert x == (7, "hello world")
@workflow
def my_wf2(a: int, b: str) -> int:
x, y = t1(a=a)
t2(a=[b, y])
return x
x = my_wf2(a=5, b="hello")
assert x == 7
def test_wf_output_mismatch():
with pytest.raises(AssertionError):
@workflow
def my_wf(a: int, b: str) -> (int, str):
return a
with pytest.raises(AssertionError):
@workflow
def my_wf2(a: int, b: str) -> int:
return a, b
@workflow
def my_wf3(a: int, b: str) -> int:
return (a,)
my_wf3(a=10, b="hello")
def test_promise_return():
"""
Testing that when a workflow is local executed but a local wf execution context already exists, Promise objects
are returned wrapping Flyte literals instead of the unpacked dict.
"""
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
a = a + 2
return a, "world-" + str(a)
@workflow
def mimic_sub_wf(a: int) -> (str, str):
x, y = t1(a=a)
u, v = t1(a=x)
return y, v
ctx = context_manager.FlyteContext.current_context()
with ctx.new_execution_context(mode=ExecutionState.Mode.LOCAL_WORKFLOW_EXECUTION) as ctx:
a, b = mimic_sub_wf(a=3)
assert isinstance(a, promise.Promise)
assert isinstance(b, promise.Promise)
assert a.val.scalar.value.string_value == "world-5"
assert b.val.scalar.value.string_value == "world-7"
def test_wf1_with_sql():
sql = SQLTask(
"my-query",
query_template="SELECT * FROM hive.city.fact_airport_sessions WHERE ds = '{{ .Inputs.ds }}' LIMIT 10",
inputs=kwtypes(ds=datetime.datetime),
outputs=kwtypes(results=FlyteSchema),
metadata=TaskMetadata(retries=2),
)
@task
def t1() -> datetime.datetime:
return datetime.datetime.now()
@workflow
def my_wf() -> FlyteSchema:
dt = t1()
return sql(ds=dt)
with task_mock(sql) as mock:
mock.return_value = pandas.DataFrame(data={"x": [1, 2], "y": ["3", "4"]})
assert (my_wf().open().all() == pandas.DataFrame(data={"x": [1, 2], "y": ["3", "4"]})).all().all()
def test_wf1_with_sql_with_patch():
sql = SQLTask(
"my-query",
query_template="SELECT * FROM hive.city.fact_airport_sessions WHERE ds = '{{ .Inputs.ds }}' LIMIT 10",
inputs=kwtypes(ds=datetime.datetime),
outputs=kwtypes(results=FlyteSchema),
metadata=TaskMetadata(retries=2),
)
@task
def t1() -> datetime.datetime:
return datetime.datetime.now()
@workflow
def my_wf() -> FlyteSchema:
dt = t1()
return sql(ds=dt)
@patch(sql)
def test_user_demo_test(mock_sql):
mock_sql.return_value = pandas.DataFrame(data={"x": [1, 2], "y": ["3", "4"]})
assert (my_wf().open().all() == pandas.DataFrame(data={"x": [1, 2], "y": ["3", "4"]})).all().all()
# Have to call because tests inside tests don't run
test_user_demo_test()
def test_wf1_with_map():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
a = a + 2
return a, "world-" + str(a)
@task
def t2(a: typing.List[int], b: typing.List[str]) -> (int, str):
ra = 0
for x in a:
ra += x
rb = ""
for x in b:
rb += x
return ra, rb
@workflow
def my_wf(a: typing.List[int]) -> (int, str):
x, y = maptask(t1, metadata=TaskMetadata(retries=1))(a=a)
return t2(a=x, b=y)
x = my_wf(a=[5, 6])
assert x == (15, "world-7world-8")
def test_wf1_compile_time_constant_vars():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
return a + 2, "world"
@task
def t2(a: str, b: str) -> str:
return b + a
@workflow
def my_wf(a: int, b: str) -> (int, str):
x, y = t1(a=a)
d = t2(a="This is my way", b=b)
return x, d
x = my_wf(a=5, b="hello ")
assert x == (7, "hello This is my way")
def test_wf1_with_constant_return():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
return a + 2, "world"
@task
def t2(a: str, b: str) -> str:
return b + a
@workflow
def my_wf(a: int, b: str) -> (int, str):
x, y = t1(a=a)
t2(a="This is my way", b=b)
return x, "A constant output"
x = my_wf(a=5, b="hello ")
assert x == (7, "A constant output")
@workflow
def my_wf2(a: int, b: str) -> int:
t1(a=a)
t2(a="This is my way", b=b)
return 10
assert my_wf2(a=5, b="hello ") == 10
def test_wf1_with_dynamic():
@task
def t1(a: int) -> str:
a = a + 2
return "world-" + str(a)
@task
def t2(a: str, b: str) -> str:
return b + a
@dynamic
def my_subwf(a: int) -> typing.List[str]:
s = []
for i in range(a):
s.append(t1(a=i))
return s
@workflow
def my_wf(a: int, b: str) -> (str, typing.List[str]):
x = t2(a=b, b=b)
v = my_subwf(a=a)
return x, v
v = 5
x = my_wf(a=v, b="hello ")
assert x == ("hello hello ", ["world-" + str(i) for i in range(2, v + 2)])
with context_manager.FlyteContext.current_context().new_serialization_settings(
serialization_settings=context_manager.SerializationSettings(
project="test_proj",
domain="test_domain",
version="abc",
image_config=ImageConfig(Image(name="name", fqn="image", tag="name")),
env={},
)
) as ctx:
with ctx.new_execution_context(mode=ExecutionState.Mode.TASK_EXECUTION) as ctx:
dynamic_job_spec = my_subwf.compile_into_workflow(ctx, my_subwf._task_function, a=5)
assert len(dynamic_job_spec._nodes) == 5
def test_list_output():
@task
def t1(a: int) -> str:
a = a + 2
return "world-" + str(a)
@workflow
def lister() -> typing.List[str]:
s = []
# FYI: For users who happen to look at this, keep in mind this is only run once at compile time.
for i in range(10):
s.append(t1(a=i))
return s
assert len(lister.interface.outputs) == 1
binding_data = lister._output_bindings[0].binding # the property should be named binding_data
assert binding_data.collection is not None
assert len(binding_data.collection.bindings) == 10
def test_comparison_refs():
def dummy_node(node_id) -> Node:
n = Node(
node_id,
metadata=None,
bindings=[],
upstream_nodes=[],
flyte_entity=SQLTask(name="x", query_template="x", inputs={}),
)
n._id = node_id
return n
px = Promise("x", NodeOutput(var="x", node=dummy_node("n1")))
py = Promise("y", NodeOutput(var="y", node=dummy_node("n2")))
def print_expr(expr):
print(f"{expr} is type {type(expr)}")
print_expr(px == py)
print_expr(px < py)
print_expr((px == py) & (px < py))
print_expr(((px == py) & (px < py)) | (px > py))
print_expr(px < 5)
print_expr(px >= 5)
def test_comparison_lits():
px = Promise("x", TypeEngine.to_literal(None, 5, int, None))
py = Promise("y", TypeEngine.to_literal(None, 8, int, None))
def eval_expr(expr, expected: bool):
print(f"{expr} evals to {expr.eval()}")
assert expected == expr.eval()
eval_expr(px == py, False)
eval_expr(px < py, True)
eval_expr((px == py) & (px < py), False)
eval_expr(((px == py) & (px < py)) | (px > py), False)
eval_expr(px < 5, False)
eval_expr(px >= 5, True)
eval_expr(py >= 5, True)
def test_wf1_branches():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
return a + 2, "world"
@task
def t2(a: str) -> str:
return a
@workflow
def my_wf(a: int, b: str) -> (int, str):
x, y = t1(a=a)
d = (
conditional("test1")
.if_(x == 4)
.then(t2(a=b))
.elif_(x >= 5)
.then(t2(a=y))
.else_()
.fail("Unable to choose branch")
)
f = conditional("test2").if_(d == "hello ").then(t2(a="It is hello")).else_().then(t2(a="Not Hello!"))
return x, f
x = my_wf(a=5, b="hello ")
assert x == (7, "Not Hello!")
x = my_wf(a=2, b="hello ")
assert x == (4, "It is hello")
def test_wf1_branches_no_else():
with pytest.raises(NotImplementedError):
def foo():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
return a + 2, "world"
@task
def t2(a: str) -> str:
return a
@workflow
def my_wf(a: int, b: str) -> (int, str):
x, y = t1(a=a)
d = conditional("test1").if_(x == 4).then(t2(a=b)).elif_(x >= 5).then(t2(a=y))
conditional("test2").if_(x == 4).then(t2(a=b)).elif_(x >= 5).then(t2(a=y)).else_().fail("blah")
return x, d
foo()
def test_wf1_branches_failing():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
return a + 2, "world"
@task
def t2(a: str) -> str:
return a
@workflow
def my_wf(a: int, b: str) -> (int, str):
x, y = t1(a=a)
d = (
conditional("test1")
.if_(x == 4)
.then(t2(a=b))
.elif_(x >= 5)
.then(t2(a=y))
.else_()
.fail("All Branches failed")
)
return x, d
with pytest.raises(ValueError):
my_wf(a=1, b="hello ")
def test_cant_use_normal_tuples():
with pytest.raises(RestrictedTypeError):
@task
def t1(a: str) -> tuple:
return (a, 3)
def test_wf1_df():
@task
def t1(a: int) -> pandas.DataFrame:
return pandas.DataFrame(data={"col1": [a, 2], "col2": [a, 4]})
@task
def t2(df: pandas.DataFrame) -> pandas.DataFrame:
return df.append(pandas.DataFrame(data={"col1": [5, 10], "col2": [5, 10]}))
@workflow
def my_wf(a: int) -> pandas.DataFrame:
df = t1(a=a)
return t2(df=df)
x = my_wf(a=20)
assert isinstance(x, pandas.DataFrame)
result_df = x.reset_index(drop=True) == pandas.DataFrame(
data={"col1": [20, 2, 5, 10], "col2": [20, 4, 5, 10]}
).reset_index(drop=True)
assert result_df.all().all()
def test_lp_serialize():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
a = a + 2
return a, "world-" + str(a)
@task
def t2(a: str, b: str) -> str:
return b + a
@workflow
def my_subwf(a: int) -> (str, str):
x, y = t1(a=a)
u, v = t1(a=x)
return y, v
lp = launch_plan.LaunchPlan.create("serialize_test1", my_subwf)
lp_with_defaults = launch_plan.LaunchPlan.create("serialize_test2", my_subwf, default_inputs={"a": 3})
serialization_settings = context_manager.SerializationSettings(
project="proj",
domain="dom",
version="123",
image_config=ImageConfig(Image(name="name", fqn="asdf/fdsa", tag="123")),
env={},
)
sdk_lp = get_serializable(serialization_settings, lp)
assert len(sdk_lp.default_inputs.parameters) == 0
assert len(sdk_lp.fixed_inputs.literals) == 0
sdk_lp = get_serializable(serialization_settings, lp_with_defaults)
assert len(sdk_lp.default_inputs.parameters) == 1
assert len(sdk_lp.fixed_inputs.literals) == 0
# Adding a check to make sure oneof is respected. Tricky with booleans... if a default is specified, the
# required field needs to be None, not False.
parameter_a = sdk_lp.default_inputs.parameters["a"]
parameter_a = Parameter.from_flyte_idl(parameter_a.to_flyte_idl())
assert parameter_a.default is not None
def test_wf_container_task():
@task
def t1(a: int) -> (int, str):
return a + 2, str(a) + "-HELLO"
t2 = ContainerTask(
"raw",
image="alpine",
inputs=kwtypes(a=int, b=str),
input_data_dir="/tmp",
output_data_dir="/tmp",
command=["cat"],
arguments=["/tmp/a"],
)
def wf(a: int):
x, y = t1(a=a)
t2(a=x, b=y)
with task_mock(t2) as mock:
mock.side_effect = lambda a, b: None
assert t2(a=10, b="hello") is None
wf(a=10)
def test_wf_container_task_multiple():
square = ContainerTask(
name="square",
input_data_dir="/var/inputs",
output_data_dir="/var/outputs",
inputs=kwtypes(val=int),
outputs=kwtypes(out=int),
image="alpine",
command=["sh", "-c", "echo $(( {{.Inputs.val}} * {{.Inputs.val}} )) | tee /var/outputs/out"],
)
sum = ContainerTask(
name="sum",
input_data_dir="/var/flyte/inputs",
output_data_dir="/var/flyte/outputs",
inputs=kwtypes(x=int, y=int),
outputs=kwtypes(out=int),
image="alpine",
command=["sh", "-c", "echo $(( {{.Inputs.x}} + {{.Inputs.y}} )) | tee /var/flyte/outputs/out"],
)
@workflow
def raw_container_wf(val1: int, val2: int) -> int:
return sum(x=square(val=val1), y=square(val=val2))
with task_mock(square) as square_mock, task_mock(sum) as sum_mock:
square_mock.side_effect = lambda val: val * val
assert square(val=10) == 100
sum_mock.side_effect = lambda x, y: x + y
assert sum(x=10, y=10) == 20
assert raw_container_wf(val1=10, val2=10) == 200
def test_wf_tuple_fails():
with pytest.raises(RestrictedTypeError):
@task
def t1(a: tuple) -> (int, str):
return a[0] + 2, str(a) + "-HELLO"
def test_wf_typed_schema():
schema1 = FlyteSchema[kwtypes(x=int, y=str)]
@task
def t1() -> schema1:
s = schema1()
s.open().write(pandas.DataFrame(data={"x": [1, 2], "y": ["3", "4"]}))
return s
@task
def t2(s: FlyteSchema[kwtypes(x=int, y=str)]) -> FlyteSchema[kwtypes(x=int)]:
df = s.open().all()
return df[s.column_names()[:-1]]
@workflow
def wf() -> FlyteSchema[kwtypes(x=int)]:
return t2(s=t1())
w = t1()
assert w is not None
df = w.open(override_mode=SchemaOpenMode.READ).all()
result_df = df.reset_index(drop=True) == pandas.DataFrame(data={"x": [1, 2], "y": ["3", "4"]}).reset_index(
drop=True
)
assert result_df.all().all()
df = t2(s=w.as_readonly())
assert df is not None
result_df = df.reset_index(drop=True) == pandas.DataFrame(data={"x": [1, 2]}).reset_index(drop=True)
assert result_df.all().all()
x = wf()
df = x.open().all()
result_df = df.reset_index(drop=True) == pandas.DataFrame(data={"x": [1, 2]}).reset_index(drop=True)
assert result_df.all().all()
def test_wf_schema_to_df():
schema1 = FlyteSchema[kwtypes(x=int, y=str)]
@task
def t1() -> schema1:
s = schema1()
s.open().write(pandas.DataFrame(data={"x": [1, 2], "y": ["3", "4"]}))
return s
@task
def t2(df: pandas.DataFrame) -> int:
return len(df.columns.values)
@workflow
def wf() -> int:
return t2(df=t1())
x = wf()
assert x == 2
def test_dict_wf_with_constants():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
return a + 2, "world"
@task
def t2(a: typing.Dict[str, str]) -> str:
return " ".join([v for k, v in a.items()])
@workflow
def my_wf(a: int, b: str) -> (int, str):
x, y = t1(a=a)
d = t2(a={"key1": b, "key2": y})
return x, d
x = my_wf(a=5, b="hello")
assert x == (7, "hello world")
def test_dict_wf_with_conversion():
@task
def t1(a: int) -> typing.Dict[str, str]:
return {"a": str(a)}
@task
def t2(a: dict) -> str:
print(f"HAHAH {a}")
return " ".join([v for k, v in a.items()])
@workflow
def my_wf(a: int) -> str:
return t2(a=t1(a=a))
with pytest.raises(TypeError):
my_wf(a=5)
def test_wf_with_empty_dict():
@task
def t1() -> typing.Dict:
return {}
@task
def t2(d: typing.Dict):
assert d == {}
@workflow
def wf():
d = t1()
t2(d=d)
wf()
def test_wf_with_catching_no_return():
@task
def t1() -> typing.Dict:
return {}
@task
def t2(d: typing.Dict):
assert d == {}
@task
def t3(s: str):
pass
with pytest.raises(AssertionError):
@workflow
def wf():
d = t1()
# The following statement is wrong, this should not be allowed to pass to another task
x = t2(d=d)
# Passing x is wrong in this case
t3(s=x)
wf()
def test_wf_custom_types_missing_dataclass_json():
with pytest.raises(AssertionError):
@dataclass
class MyCustomType(object):
pass
@task
def t1(a: int) -> MyCustomType:
return MyCustomType()
def test_wf_custom_types():
@dataclass_json
@dataclass
class MyCustomType(object):
x: int
y: str
@task
def t1(a: int) -> MyCustomType:
return MyCustomType(x=a, y="t1")
@task
def t2(a: MyCustomType, b: str) -> (MyCustomType, int):
return MyCustomType(x=a.x, y=f"{a.y} {b}"), 5
@workflow
def my_wf(a: int, b: str) -> (MyCustomType, int):
return t2(a=t1(a=a), b=b)
c, v = my_wf(a=10, b="hello")
assert v == 5
assert c.x == 10
assert c.y == "t1 hello"
def test_arbit_class():
class Foo(object):
pass
with pytest.raises(ValueError):
@task
def t1(a: int) -> Foo:
return Foo()
def test_dataclass_more():
@dataclass_json
@dataclass
class Datum(object):
x: int
y: str
z: typing.Dict[int, str]
@task
def stringify(x: int) -> Datum:
return Datum(x=x, y=str(x), z={x: str(x)})
@task
def add(x: Datum, y: Datum) -> Datum:
x.z.update(y.z)
return Datum(x=x.x + y.x, y=x.y + y.y, z=x.z)
@workflow
def wf(x: int, y: int) -> Datum:
return add(x=stringify(x=x), y=stringify(x=y))
wf(x=10, y=20)
def test_environment():
@task(environment={"FOO": "foofoo", "BAZ": "baz"})
def t1(a: int) -> str:
a = a + 2
return "now it's " + str(a)
@workflow
def my_wf(a: int) -> str:
x = t1(a=a)
return x
serialization_settings = context_manager.SerializationSettings(
project="test_proj",
domain="test_domain",
version="abc",
image_config=ImageConfig(Image(name="name", fqn="image", tag="name")),
env={"FOO": "foo", "BAR": "bar"},
)
with context_manager.FlyteContext.current_context().new_compilation_context():
sdk_task = get_serializable(serialization_settings, t1)
assert sdk_task.container.env == {"FOO": "foofoo", "BAR": "bar", "BAZ": "baz"}
def test_resources():
@task(requests=Resources(cpu="1"), limits=Resources(cpu="2", mem="400M"))
def t1(a: int) -> str:
a = a + 2
return "now it's " + str(a)
@task(requests=Resources(cpu="3"))
def t2(a: int) -> str:
a = a + 200
return "now it's " + str(a)
@workflow
def my_wf(a: int) -> str:
x = t1(a=a)
return x
serialization_settings = context_manager.SerializationSettings(
project="test_proj",
domain="test_domain",
version="abc",
image_config=ImageConfig(Image(name="name", fqn="image", tag="name")),
env={},
)
with context_manager.FlyteContext.current_context().new_compilation_context():
sdk_task = get_serializable(serialization_settings, t1)
assert sdk_task.container.resources.requests == [
_resource_models.ResourceEntry(_resource_models.ResourceName.CPU, "1")
]
assert sdk_task.container.resources.limits == [
_resource_models.ResourceEntry(_resource_models.ResourceName.CPU, "2"),
_resource_models.ResourceEntry(_resource_models.ResourceName.MEMORY, "400M"),
]
sdk_task2 = get_serializable(serialization_settings, t2)
assert sdk_task2.container.resources.requests == [
_resource_models.ResourceEntry(_resource_models.ResourceName.CPU, "3")
]
assert sdk_task2.container.resources.limits == []
def test_wf_explicitly_returning_empty_task():
@task
def t1():
...
@workflow
def my_subwf():
return t1() # This forces the wf _local_execute to handle VoidPromises
assert my_subwf() is None
| tests/flytekit/unit/core/test_type_hints.py | 26,978 | Testing that when a workflow is local executed but a local wf execution context already exists, Promise objects
are returned wrapping Flyte literals instead of the unpacked dict.
Write some text to a file not in that directory above Since we're using local as remote, we should be able to just read the file from the 'remote' location. We should also be able to turn the thing back into regular python native thing. Have to call because tests inside tests don't run FYI: For users who happen to look at this, keep in mind this is only run once at compile time. the property should be named binding_data Adding a check to make sure oneof is respected. Tricky with booleans... if a default is specified, the required field needs to be None, not False. The following statement is wrong, this should not be allowed to pass to another task Passing x is wrong in this case This forces the wf _local_execute to handle VoidPromises | 925 | en | 0.939458 |
# coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ... import meta as _meta
__all__ = ['CSIStorageCapacityArgs', 'CSIStorageCapacity']
@pulumi.input_type
class CSIStorageCapacityArgs:
def __init__(__self__, *,
storage_class_name: pulumi.Input[str],
api_version: Optional[pulumi.Input[str]] = None,
capacity: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
maximum_volume_size: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']] = None,
node_topology: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']] = None):
"""
The set of arguments for constructing a CSIStorageCapacity resource.
:param pulumi.Input[str] storage_class_name: The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[str] capacity: Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input[str] maximum_volume_size: MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.
:param pulumi.Input['_meta.v1.ObjectMetaArgs'] metadata: Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name.
Objects are namespaced.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param pulumi.Input['_meta.v1.LabelSelectorArgs'] node_topology: NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.
"""
pulumi.set(__self__, "storage_class_name", storage_class_name)
if api_version is not None:
pulumi.set(__self__, "api_version", 'storage.k8s.io/v1beta1')
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if kind is not None:
pulumi.set(__self__, "kind", 'CSIStorageCapacity')
if maximum_volume_size is not None:
pulumi.set(__self__, "maximum_volume_size", maximum_volume_size)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if node_topology is not None:
pulumi.set(__self__, "node_topology", node_topology)
@property
@pulumi.getter(name="storageClassName")
def storage_class_name(self) -> pulumi.Input[str]:
"""
The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.
"""
return pulumi.get(self, "storage_class_name")
@storage_class_name.setter
def storage_class_name(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_class_name", value)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter
def capacity(self) -> Optional[pulumi.Input[str]]:
"""
Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.
"""
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "capacity", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="maximumVolumeSize")
def maximum_volume_size(self) -> Optional[pulumi.Input[str]]:
"""
MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.
"""
return pulumi.get(self, "maximum_volume_size")
@maximum_volume_size.setter
def maximum_volume_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maximum_volume_size", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]:
"""
Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name.
Objects are namespaced.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter(name="nodeTopology")
def node_topology(self) -> Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]:
"""
NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.
"""
return pulumi.get(self, "node_topology")
@node_topology.setter
def node_topology(self, value: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]):
pulumi.set(self, "node_topology", value)
class CSIStorageCapacity(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_version: Optional[pulumi.Input[str]] = None,
capacity: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
maximum_volume_size: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']]] = None,
node_topology: Optional[pulumi.Input[pulumi.InputType['_meta.v1.LabelSelectorArgs']]] = None,
storage_class_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.
For example this can express things like: - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1" - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123"
The following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero
The producer of these objects can decide which approach is more suitable.
They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[str] capacity: Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input[str] maximum_volume_size: MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.
:param pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']] metadata: Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name.
Objects are namespaced.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param pulumi.Input[pulumi.InputType['_meta.v1.LabelSelectorArgs']] node_topology: NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.
:param pulumi.Input[str] storage_class_name: The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: CSIStorageCapacityArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.
For example this can express things like: - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1" - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123"
The following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero
The producer of these objects can decide which approach is more suitable.
They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity.
:param str resource_name: The name of the resource.
:param CSIStorageCapacityArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CSIStorageCapacityArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_version: Optional[pulumi.Input[str]] = None,
capacity: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
maximum_volume_size: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']]] = None,
node_topology: Optional[pulumi.Input[pulumi.InputType['_meta.v1.LabelSelectorArgs']]] = None,
storage_class_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = CSIStorageCapacityArgs.__new__(CSIStorageCapacityArgs)
__props__.__dict__["api_version"] = 'storage.k8s.io/v1beta1'
__props__.__dict__["capacity"] = capacity
__props__.__dict__["kind"] = 'CSIStorageCapacity'
__props__.__dict__["maximum_volume_size"] = maximum_volume_size
__props__.__dict__["metadata"] = metadata
__props__.__dict__["node_topology"] = node_topology
if storage_class_name is None and not opts.urn:
raise TypeError("Missing required property 'storage_class_name'")
__props__.__dict__["storage_class_name"] = storage_class_name
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="kubernetes:storage.k8s.io/v1alpha1:CSIStorageCapacity")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(CSIStorageCapacity, __self__).__init__(
'kubernetes:storage.k8s.io/v1beta1:CSIStorageCapacity',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'CSIStorageCapacity':
"""
Get an existing CSIStorageCapacity resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = CSIStorageCapacityArgs.__new__(CSIStorageCapacityArgs)
__props__.__dict__["api_version"] = None
__props__.__dict__["capacity"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["maximum_volume_size"] = None
__props__.__dict__["metadata"] = None
__props__.__dict__["node_topology"] = None
__props__.__dict__["storage_class_name"] = None
return CSIStorageCapacity(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> pulumi.Output[Optional[str]]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@property
@pulumi.getter
def capacity(self) -> pulumi.Output[Optional[str]]:
"""
Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.
"""
return pulumi.get(self, "capacity")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="maximumVolumeSize")
def maximum_volume_size(self) -> pulumi.Output[Optional[str]]:
"""
MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.
"""
return pulumi.get(self, "maximum_volume_size")
@property
@pulumi.getter
def metadata(self) -> pulumi.Output[Optional['_meta.v1.outputs.ObjectMeta']]:
"""
Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name.
Objects are namespaced.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter(name="nodeTopology")
def node_topology(self) -> pulumi.Output[Optional['_meta.v1.outputs.LabelSelector']]:
"""
NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.
"""
return pulumi.get(self, "node_topology")
@property
@pulumi.getter(name="storageClassName")
def storage_class_name(self) -> pulumi.Output[str]:
"""
The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.
"""
return pulumi.get(self, "storage_class_name")
| sdk/python/pulumi_kubernetes/storage/v1beta1/CSIStorageCapacity.py | 23,669 | The set of arguments for constructing a CSIStorageCapacity resource.
:param pulumi.Input[str] storage_class_name: The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[str] capacity: Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input[str] maximum_volume_size: MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.
:param pulumi.Input['_meta.v1.ObjectMetaArgs'] metadata: Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name.
Objects are namespaced.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param pulumi.Input['_meta.v1.LabelSelectorArgs'] node_topology: NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.
CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.
For example this can express things like: - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1" - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123"
The following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero
The producer of these objects can decide which approach is more suitable.
They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[str] capacity: Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input[str] maximum_volume_size: MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.
:param pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']] metadata: Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name.
Objects are namespaced.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param pulumi.Input[pulumi.InputType['_meta.v1.LabelSelectorArgs']] node_topology: NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.
:param pulumi.Input[str] storage_class_name: The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.
CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.
For example this can express things like: - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1" - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123"
The following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero
The producer of these objects can decide which approach is more suitable.
They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity.
:param str resource_name: The name of the resource.
:param CSIStorageCapacityArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.
Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.
Get an existing CSIStorageCapacity resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.
MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.
Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name.
Objects are namespaced.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name.
Objects are namespaced.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.
NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.
The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.
The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.
coding=utf-8 *** WARNING: this file was generated by pulumigen. *** *** Do not edit by hand unless you're certain you know what you are doing! *** | 13,506 | en | 0.828207 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-10-15 15:04
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ContestAnalyzerOnline', '0006_auto_20171015_1445'),
]
operations = [
migrations.DeleteModel(
name='Comment',
),
]
| ContestAnalyzerOnline/utils/migrations/0007_delete_comment.py | 375 | -*- coding: utf-8 -*- Generated by Django 1.10.2 on 2017-10-15 15:04 | 68 | en | 0.646813 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import logging
import struct
from django.http import Http404
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_POST
from desktop.lib.django_util import JsonResponse
from desktop.lib.i18n import force_unicode
from desktop.models import Document2
from libsentry.privilege_checker import MissingSentryPrivilegeException
from notebook.api import _get_statement
from notebook.models import Notebook
from metadata.optimizer_client import OptimizerApi, NavOptException, _get_table_name, _clean_query
from metadata.conf import OPTIMIZER
from desktop.auth.backend import is_admin
LOG = logging.getLogger(__name__)
try:
from beeswax.api import get_table_stats
from beeswax.design import hql_query
from metastore.views import _get_db
except ImportError, e:
LOG.warn("Hive lib not enabled")
def error_handler(view_fn):
def decorator(*args, **kwargs):
try:
return view_fn(*args, **kwargs)
except Http404, e:
raise e
except NavOptException, e:
LOG.exception(e)
response = {
'status': -1,
'message': e.message
}
except MissingSentryPrivilegeException, e:
LOG.exception(e)
response = {
'status': -1,
'message': 'Missing privileges for %s' % force_unicode(str(e))
}
except Exception, e:
LOG.exception(e)
response = {
'status': -1,
'message': force_unicode(str(e))
}
return JsonResponse(response, status=500)
return decorator
@require_POST
@error_handler
def get_tenant(request):
response = {'status': -1}
cluster_id = request.POST.get('cluster_id')
api = OptimizerApi(request.user)
data = api.get_tenant(cluster_id=cluster_id)
if data:
response['status'] = 0
response['data'] = data['tenant']
else:
response['message'] = 'Optimizer: %s' % data['details']
return JsonResponse(response)
@require_POST
@error_handler
def top_tables(request):
response = {'status': -1}
database = request.POST.get('database', 'default')
limit = request.POST.get('len', 1000)
api = OptimizerApi(user=request.user)
data = api.top_tables(database_name=database, page_size=limit)
tables = [{
'eid': table['eid'],
'database': _get_table_name(table['name'])['database'],
'name': _get_table_name(table['name'])['table'],
'popularity': table['workloadPercent'],
'column_count': table['columnCount'],
'patternCount': table['patternCount'],
'total': table['total'],
'is_fact': table['type'] != 'Dimension'
} for table in data['results']
]
response['top_tables'] = tables
response['status'] = 0
return JsonResponse(response)
@require_POST
@error_handler
def table_details(request):
response = {'status': -1}
database_name = request.POST.get('databaseName')
table_name = request.POST.get('tableName')
api = OptimizerApi(request.user)
data = api.table_details(database_name=database_name, table_name=table_name)
if data:
response['status'] = 0
response['details'] = data
else:
response['message'] = 'Optimizer: %s' % data['details']
return JsonResponse(response)
@require_POST
@error_handler
def query_compatibility(request):
response = {'status': -1}
source_platform = request.POST.get('sourcePlatform')
target_platform = request.POST.get('targetPlatform')
query = request.POST.get('query')
api = OptimizerApi(request.user)
data = api.query_compatibility(source_platform=source_platform, target_platform=target_platform, query=query)
if data:
response['status'] = 0
response['query_compatibility'] = data
else:
response['message'] = 'Optimizer: %s' % data
return JsonResponse(response)
@require_POST
@error_handler
def query_risk(request):
response = {'status': -1}
query = json.loads(request.POST.get('query'))
source_platform = request.POST.get('sourcePlatform')
db_name = request.POST.get('dbName')
api = OptimizerApi(request.user)
data = api.query_risk(query=query, source_platform=source_platform, db_name=db_name)
if data:
response['status'] = 0
response['query_risk'] = data
else:
response['message'] = 'Optimizer: %s' % data
return JsonResponse(response)
@require_POST
@error_handler
def similar_queries(request):
response = {'status': -1}
source_platform = request.POST.get('sourcePlatform')
query = json.loads(request.POST.get('query'))
api = OptimizerApi(request.user)
data = api.similar_queries(source_platform=source_platform, query=query)
if data:
response['status'] = 0
response['similar_queries'] = data
else:
response['message'] = 'Optimizer: %s' % data
return JsonResponse(response)
@require_POST
@error_handler
def top_filters(request):
response = {'status': -1}
db_tables = json.loads(request.POST.get('dbTables'), '[]')
column_name = request.POST.get('columnName') # Unused
api = OptimizerApi(request.user)
data = api.top_filters(db_tables=db_tables)
if data:
response['status'] = 0
response['values'] = data['results']
else:
response['message'] = 'Optimizer: %s' % data
return JsonResponse(response)
@require_POST
@error_handler
def top_joins(request):
response = {'status': -1}
db_tables = json.loads(request.POST.get('dbTables'), '[]')
api = OptimizerApi(request.user)
data = api.top_joins(db_tables=db_tables)
if data:
response['status'] = 0
response['values'] = data['results']
else:
response['message'] = 'Optimizer: %s' % data
return JsonResponse(response)
@require_POST
@error_handler
def top_aggs(request):
response = {'status': -1}
db_tables = json.loads(request.POST.get('dbTables'), '[]')
api = OptimizerApi(request.user)
data = api.top_aggs(db_tables=db_tables)
if data:
response['status'] = 0
response['values'] = data['results']
else:
response['message'] = 'Optimizer: %s' % data
return JsonResponse(response)
@require_POST
@error_handler
def top_databases(request):
response = {'status': -1}
api = OptimizerApi(request.user)
data = api.top_databases()
if data:
response['status'] = 0
response['values'] = data['results']
else:
response['message'] = 'Optimizer: %s' % data
return JsonResponse(response)
@require_POST
@error_handler
def top_columns(request):
response = {'status': -1}
db_tables = json.loads(request.POST.get('dbTables'), '[]')
api = OptimizerApi(request.user)
data = api.top_columns(db_tables=db_tables)
if data:
response['status'] = 0
response['values'] = data
else:
response['message'] = 'Optimizer: %s' % data
return JsonResponse(response)
def _convert_queries(queries_data):
queries = []
for query_data in queries_data:
try:
snippet = query_data['snippets'][0]
if 'guid' in snippet['result']['handle']: # Not failed query
original_query_id = '%s:%s' % struct.unpack(b"QQ", base64.decodestring(snippet['result']['handle']['guid'])) # unpack_guid uses '%016x:%016x' while optmizer api uses '%s:%s'.
execution_time = snippet['result']['executionTime'] * 100 if snippet['status'] in ('available', 'expired') else -1
statement = _clean_query(_get_statement(query_data))
queries.append((original_query_id, execution_time, statement, snippet.get('database', 'default').strip()))
except Exception, e:
LOG.warning('Skipping upload of %s: %s' % (query_data['uuid'], e))
return queries
@require_POST
@error_handler
def upload_history(request):
response = {'status': -1}
if is_admin(request.user):
api = OptimizerApi(request.user)
histories = []
upload_stats = {}
if request.POST.get('sourcePlatform'):
n = min(request.POST.get('n', OPTIMIZER.QUERY_HISTORY_UPLOAD_LIMIT.get()))
source_platform = request.POST.get('sourcePlatform', 'hive')
histories = [(source_platform, Document2.objects.get_history(doc_type='query-%s' % source_platform, user=request.user)[:n])]
elif OPTIMIZER.QUERY_HISTORY_UPLOAD_LIMIT.get() > 0:
histories = [
(source_platform, Document2.objects.filter(type='query-%s' % source_platform, is_history=True, is_managed=False, is_trashed=False).order_by('-last_modified')[:OPTIMIZER.QUERY_HISTORY_UPLOAD_LIMIT.get()])
for source_platform in ['hive', 'impala']
]
for source_platform, history in histories:
queries = _convert_queries([Notebook(document=doc).get_data() for doc in history])
upload_stats[source_platform] = api.upload(data=queries, data_type='queries', source_platform=source_platform)
response['upload_history'] = upload_stats
response['status'] = 0
else:
response['message'] = _('Query history upload requires Admin privileges or feature is disabled.')
return JsonResponse(response)
@require_POST
@error_handler
def upload_query(request):
response = {'status': -1}
source_platform = request.POST.get('sourcePlatform', 'default')
query_id = request.POST.get('query_id')
if OPTIMIZER.AUTO_UPLOAD_QUERIES.get() and source_platform in ('hive', 'impala') and query_id:
try:
doc = Document2.objects.document(request.user, doc_id=query_id)
query_data = Notebook(document=doc).get_data()
queries = _convert_queries([query_data])
source_platform = query_data['snippets'][0]['type']
api = OptimizerApi(request.user)
response['query_upload'] = api.upload(data=queries, data_type='queries', source_platform=source_platform)
except Document2.DoesNotExist:
response['query_upload'] = _('Skipped as task query')
else:
response['query_upload'] = _('Skipped')
response['status'] = 0
return JsonResponse(response)
@require_POST
@error_handler
def upload_table_stats(request):
response = {'status': -1}
db_tables = json.loads(request.POST.get('db_tables'), '[]')
source_platform = json.loads(request.POST.get('sourcePlatform', '"hive"'))
with_ddl = json.loads(request.POST.get('with_ddl', 'false'))
with_table_stats = json.loads(request.POST.get('with_table', 'false'))
with_columns_stats = json.loads(request.POST.get('with_columns', 'false'))
table_ddls = []
table_stats = []
column_stats = []
if not OPTIMIZER.AUTO_UPLOAD_DDL.get():
with_ddl = False
if not OPTIMIZER.AUTO_UPLOAD_STATS.get():
with_table_stats = with_columns_stats = False
for db_table in db_tables:
path = _get_table_name(db_table)
try:
if with_ddl:
db = _get_db(request.user, source_type=source_platform)
query = hql_query('SHOW CREATE TABLE `%(database)s`.`%(table)s`' % path)
handle = db.execute_and_wait(query, timeout_sec=5.0)
if handle:
result = db.fetch(handle, rows=5000)
db.close(handle)
table_ddls.append((0, 0, ' '.join([row[0] for row in result.rows()]), path['database']))
if with_table_stats:
mock_request = MockRequest(user=request.user, source_platform=source_platform)
full_table_stats = json.loads(get_table_stats(mock_request, database=path['database'], table=path['table']).content)
stats = dict((stat['data_type'], stat['comment']) for stat in full_table_stats['stats'])
table_stats.append({
'table_name': '%(database)s.%(table)s' % path, # DB Prefix
'num_rows': stats.get('numRows', -1),
'last_modified_time': stats.get('transient_lastDdlTime', -1),
'total_size': stats.get('totalSize', -1),
'raw_data_size': stats.get('rawDataSize', -1),
'num_files': stats.get('numFiles', -1),
'num_partitions': stats.get('numPartitions', -1),
# bytes_cached
# cache_replication
# format
})
if with_columns_stats:
if source_platform == 'impala':
colum_stats = json.loads(get_table_stats(mock_request, database=path['database'], table=path['table'], column=-1).content)['stats']
else:
colum_stats = [
json.loads(get_table_stats(mock_request, database=path['database'], table=path['table'], column=col).content)['stats']
for col in full_table_stats['columns'][:25]
]
raw_column_stats = [dict([(key, val if val is not None else '') for col_stat in col for key, val in col_stat.iteritems()]) for col in colum_stats]
for col_stats in raw_column_stats:
column_stats.append({
'table_name': '%(database)s.%(table)s' % path, # DB Prefix
'column_name': col_stats['col_name'],
'data_type': col_stats['data_type'],
"num_distinct": int(col_stats.get('distinct_count')) if col_stats.get('distinct_count') != '' else -1,
"num_nulls": int(col_stats['num_nulls']) if col_stats['num_nulls'] != '' else -1,
"avg_col_len": int(float(col_stats['avg_col_len'])) if col_stats['avg_col_len'] != '' else -1,
"max_size": int(float(col_stats['max_col_len'])) if col_stats['max_col_len'] != '' else -1,
"min": col_stats['min'] if col_stats.get('min', '') != '' else -1,
"max": col_stats['max'] if col_stats.get('max', '') != '' else -1,
"num_trues": col_stats['num_trues'] if col_stats.get('num_trues', '') != '' else -1,
"num_falses": col_stats['num_falses'] if col_stats.get('num_falses', '') != '' else -1,
})
except Exception, e:
LOG.exception('Skipping upload of %s: %s' % (db_table, e))
api = OptimizerApi(request.user)
response['status'] = 0
if table_stats:
response['upload_table_stats'] = api.upload(data=table_stats, data_type='table_stats', source_platform=source_platform)
response['upload_table_stats_status'] = 0 if response['upload_table_stats']['status']['state'] in ('WAITING', 'FINISHED', 'IN_PROGRESS') else -1
response['status'] = response['upload_table_stats_status']
if column_stats:
response['upload_cols_stats'] = api.upload(data=column_stats, data_type='cols_stats', source_platform=source_platform)
response['upload_cols_stats_status'] = response['status'] if response['upload_cols_stats']['status']['state'] in ('WAITING', 'FINISHED', 'IN_PROGRESS') else -1
if response['upload_cols_stats_status'] != 0:
response['status'] = response['upload_cols_stats_status']
if table_ddls:
response['upload_table_ddl'] = api.upload(data=table_ddls, data_type='queries', source_platform=source_platform)
response['upload_table_ddl_status'] = response['status'] if response['upload_table_ddl']['status']['state'] in ('WAITING', 'FINISHED', 'IN_PROGRESS') else -1
if response['upload_table_ddl_status'] != 0:
response['status'] = response['upload_table_ddl_status']
return JsonResponse(response)
@require_POST
@error_handler
def upload_status(request):
response = {'status': -1}
workload_id = request.POST.get('workloadId')
api = OptimizerApi(request.user)
response['upload_status'] = api.upload_status(workload_id=workload_id)
response['status'] = 0
return JsonResponse(response)
class MockRequest():
def __init__(self, user, source_platform):
self.user = user
self.path = '/%s/' % source_platform if source_platform != 'hive' else 'beeswax'
| desktop/libs/metadata/src/metadata/optimizer_api.py | 16,037 | !/usr/bin/env python Licensed to Cloudera, Inc. under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. Cloudera, Inc. licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Unused Not failed query unpack_guid uses '%016x:%016x' while optmizer api uses '%s:%s'. DB Prefix bytes_cached cache_replication format DB Prefix | 905 | en | 0.799255 |
# -*- coding: utf-8 -*-
"""
walle-web
:copyright: © 2015-2019 walle-web.io
:created time: 2019-02-24 10:47:53
:author: wushuiyong@walle-web.io
"""
import os
import re
import os.path as osp
import git as PyGit
from git import Repo as PyRepo
class Repo:
path = None
def __init__(self, path=None):
self.path = path
def is_git_dir(self):
'''
判断是否为git目录
@param path:
@return:
'''
d = self.path + '/.git'
if osp.isdir(d):
if osp.isdir(osp.join(d, 'objects')) and osp.isdir(osp.join(d, 'refs')):
headref = osp.join(d, 'HEAD')
return osp.isfile(headref) or \
(osp.islink(headref) and
os.readlink(headref).startswith('refs'))
elif (osp.isfile(osp.join(d, 'gitdir')) and
osp.isfile(osp.join(d, 'commondir')) and
osp.isfile(osp.join(d, 'gitfile'))):
return False
return False
def init(self, url):
# 创建目录
if not os.path.exists(self.path):
os.makedirs(self.path)
# git clone
if self.is_git_dir():
return self.pull()
else:
return self.clone(url)
def clone(self, url):
'''
检出项目
@param branch:
@param kwargs:
@return:
'''
return PyRepo.clone_from(url, self.path)
def pull(self):
'''
更新项目
@param branch:
@param kwargs:
@return:
'''
repo = PyRepo(self.path)
return repo.remote().pull()
def checkout_2_branch(self, branch):
PyRepo(self.path).git.checkout(branch)
def checkout_2_commit(self, branch, commit):
'''
@todo 未完成
@param branch:
@param commit:
@return:
'''
PyRepo(self.path).git.checkout(branch)
# PyRepo(self.path).head.set_reference(branch)
# 方法有问题,只是做了reset,没有checkout
PyRepo(self.path).head.set_commit(commit)
def checkout_2_tag(self, tag):
PyRepo(self.path).git.checkout(tag)
def branches(self):
'''
获取所有分支
@param branch:
@param kwargs:
@return:
'''
# 去除 origin/HEAD -> 当前指向
# 去除远端前缀
branches = PyRepo(self.path).remote().refs
# fixbug https://github.com/meolu/walle-web/issues/705
return [str(branch).strip().lstrip('origin').lstrip('/') for branch in branches if
not str(branch).strip().startswith('origin/HEAD')]
def tags(self):
'''
获取所有tag
@param branch:
@param kwargs:
@return:
'''
return [str(tag) for tag in PyRepo(self.path).tags]
def commits(self, branch):
'''
获取分支的commits
@param branch:
@param kwargs:
@return:
'''
self.checkout_2_branch(branch)
commit_log = PyGit.Git(self.path).log('--pretty=%h #@_@# %an #@_@# %s', max_count=50)
commit_list = commit_log.split('\n')
commits = []
for commit in commit_list:
if not re.search('^.+ #@_@# .+ #@_@# .*$', commit):
continue
commit_dict = commit.split(' #@_@# ')
from flask import current_app
current_app.logger.info(commit_dict)
commits.append({
'id': commit_dict[0],
'name': commit_dict[1],
'message': commit_dict[2],
})
return commits
| walle/service/git/repo.py | 3,713 | 获取所有分支
@param branch:
@param kwargs:
@return:
@todo 未完成
@param branch:
@param commit:
@return:
检出项目
@param branch:
@param kwargs:
@return:
获取分支的commits
@param branch:
@param kwargs:
@return:
判断是否为git目录
@param path:
@return:
更新项目
@param branch:
@param kwargs:
@return:
获取所有tag
@param branch:
@param kwargs:
@return:
walle-web
:copyright: © 2015-2019 walle-web.io
:created time: 2019-02-24 10:47:53
:author: wushuiyong@walle-web.io
-*- coding: utf-8 -*- 创建目录 git clone PyRepo(self.path).head.set_reference(branch) 方法有问题,只是做了reset,没有checkout 去除 origin/HEAD -> 当前指向 去除远端前缀 fixbug https://github.com/meolu/walle-web/issues/705 | 630 | zh | 0.220087 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import datetime
import logging
import os
import time
import isodate
from testlib import mini_poster
logger = logging.getLogger(__name__)
def utc_now():
return datetime.datetime.now(isodate.UTC)
class CrashVerifier:
def raw_crash_key(self, crash_id):
return 'v2/raw_crash/{entropy}/{date}/{crashid}'.format(
entropy=crash_id[0:3],
date='20' + crash_id[-6:],
crashid=crash_id
)
def dump_names_key(self, crash_id):
return 'v1/dump_names/{crashid}'.format(
crashid=crash_id
)
def dump_key(self, crash_id, name):
if name in (None, '', 'upload_file_minidump'):
name = 'dump'
return 'v1/{name}/{crashid}'.format(
name=name,
crashid=crash_id
)
def verify_stored_data(self, crash_id, raw_crash, dumps, s3conn):
# Verify the raw crash file made it
key = self.raw_crash_key(crash_id)
assert key in s3conn.list_objects(prefix=key)
# Verify the dump_names file made it
key = self.dump_names_key(crash_id)
assert key in s3conn.list_objects(prefix=key)
# Verify the dumps made it
for name, dump in dumps.items():
key = self.dump_key(crash_id, name)
assert key in s3conn.list_objects(prefix=key)
def verify_published_data(self, crash_id, pubsub):
# Verify crash id was published--this might pick up a bunch of stuff,
# so we just verify it's one of the things we picked up
if 'PUBSUB_EMULATOR_HOST' in os.environ:
crash_ids = [crash_id.decode('utf-8') for crash_id in pubsub.list_crashids()]
assert crash_id in crash_ids
else:
print('SKIPPING PUBLISH CHECK--NOT USING EMULATOR')
def content_to_crashid(content):
if not isinstance(content, str):
content = str(content, encoding='utf-8')
crash_id = content.strip()
crash_id = crash_id[len('CrashID=bp-'):]
return crash_id
# Gives Antenna time to save things before we check
SLEEP_TIME = 5
class TestPostCrash:
def test_regular(self, posturl, s3conn, pubsub, crash_generator):
"""Post a valid crash and verify the contents made it to S3."""
raw_crash, dumps = crash_generator.generate()
crash_payload = mini_poster.assemble_crash_payload_dict(raw_crash, dumps)
resp = mini_poster.post_crash(posturl, crash_payload, dumps)
# Sleep to give Antenna time to save things
time.sleep(SLEEP_TIME)
crash_id = content_to_crashid(resp.content)
logger.debug('Crash ID is: %s', crash_id)
logger.debug('S3conn: %s', s3conn.get_config())
# Verify stored and published crash data
verifier = CrashVerifier()
verifier.verify_stored_data(crash_id, raw_crash, dumps, s3conn)
verifier.verify_published_data(crash_id, pubsub)
def test_compressed_crash(self, posturl, s3conn, pubsub, crash_generator):
"""Post a compressed crash and verify contents made it to S3."""
raw_crash, dumps = crash_generator.generate()
crash_payload = mini_poster.assemble_crash_payload_dict(raw_crash, dumps)
resp = mini_poster.post_crash(posturl, crash_payload, compressed=True)
# Sleep to give Antenna time to save things
time.sleep(SLEEP_TIME)
crash_id = content_to_crashid(resp.content)
logger.debug('Crash ID is: %s', crash_id)
logger.debug('S3conn: %s', s3conn.get_config())
# Verify stored and published crash data
verifier = CrashVerifier()
verifier.verify_stored_data(crash_id, raw_crash, dumps, s3conn)
verifier.verify_published_data(crash_id, pubsub)
| tests/systemtest/test_post_crash.py | 3,937 | Post a compressed crash and verify contents made it to S3.
Post a valid crash and verify the contents made it to S3.
This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. Verify the raw crash file made it Verify the dump_names file made it Verify the dumps made it Verify crash id was published--this might pick up a bunch of stuff, so we just verify it's one of the things we picked up Gives Antenna time to save things before we check Sleep to give Antenna time to save things Verify stored and published crash data Sleep to give Antenna time to save things Verify stored and published crash data | 739 | en | 0.915402 |
import requests
import argparse
import logging
import coloredlogs
import threading
from flask import Flask, request, jsonify
from flask_swagger import swagger
from waitress import serve
import subprocess
import json
from kafka import KafkaConsumer
from threading import Thread
from threading import Timer
from datetime import timedelta
import psycopg2
import time
app = Flask(__name__)
logger = logging.getLogger("DCSRestClient")
signalling_metric_infrastructure = {'expId': 'internal', 'topic': 'signalling.metric.infrastructure'}
signalling_metric_application = {'expId': 'internal', 'topic': 'signalling.metric.application'}
signalling_kpi = {'expId': 'internal', 'topic': 'signalling.kpi'}
dcm_port = "8090"
dcm_subscribe_url = "/dcm/subscribe"
dcm_unsubscribe_url = "/dcm/unsubscribe"
dcs_dashboard_url = "http://127.0.0.1:8080/portal/dcs/dashboard"
signalling_start = False
@app.route('/', methods=['GET'])
def server_status():
"""
Get status.
---
describe: get status
responses:
200:
description: OK
"""
logger.info("GET /")
return '', 200
@app.route("/spec", methods=['GET'])
def spec():
"""
Get swagger specification.
---
describe: get swagger specification
responses:
swagger:
description: swagger specification
"""
swag = swagger(app)
swag['info']['version'] = "1.0"
swag['info']['title'] = "DCS REST API"
return jsonify(swag)
def kafka_consumer_refresh_dashboard_handler(topic, value):
logger.info("Creating Kafka Consumer for %s topic", topic)
consumer = KafkaConsumer(
topic,
bootstrap_servers=[dcm_ip_address + ":9092"],
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id=None,
value_deserializer=lambda x: json.loads(x.decode('utf-8')))
message_received = False
while not message_received:
message = consumer.poll(timeout_ms=1000)
if message != {}:
logger.info("Message received in %s topic: %s", topic, message)
message_received = True
time.sleep(5)
logger.info("Creating dashboard for topic: %s", topic)
r = requests.post(dcs_dashboard_url, json={'records': [ { 'value': json.loads(value) }]})
logger.info("Response: Code %s", r)
# This call seems that is not needed as the dashboard is generated when data is present.
#time.sleep(2)
#logger.info("Refreshing dashboard for %s topic", topic)
#subprocess.call(['/bin/bash', '/usr/bin/dcs/refresh_dashboard.sh', topic])
logger.info("Closing Kafka Consumer for %s topic", topic)
consumer.close()
def index_cleaner(topic, value):
logger.info("Time to delete the dashboard for topic %s", topic)
r = requests.delete(dcs_dashboard_url, json={'records': [ { 'value': json.loads(value) }]})
logger.info("Response: Code %s", r)
logger.info("Time to delete the Elasticsearch index for topic %s", topic)
subprocess.call(['/bin/bash', '/usr/bin/dcs/delete_logstash_pipeline.sh', topic, 'yes'])
def kafka_consumer_signalling_topic_handler(signalling_topic_data):
logger.info("Creating Kafka Consumer for %s topic", signalling_topic_data["topic"])
consumer = KafkaConsumer(
signalling_topic_data["topic"],
bootstrap_servers=[dcm_ip_address + ":9092"],
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id=None,
value_deserializer=lambda x: json.loads(x.decode('utf-8')))
while signalling_start:
message = consumer.poll(timeout_ms=1000)
if message != {}:
logger.info("Message received in %s topic: %s", signalling_topic_data["topic"], message)
for tp, messages in message.items():
for msg in messages:
logger.info("Value: %s", msg.value)
topic = json.loads(msg.value)["topic"]
if json.loads(msg.value)["action"] == "subscribe":
logger.info("Create Logstash pipeline for topic %s", topic)
subprocess.call(['/bin/bash', '/usr/bin/dcs/create_logstash_pipeline.sh', topic])
# Dashboard creation is commented because it will be created when data is published in the topic.
#r = requests.post(dcs_dashboard_url, json={'records': [ { 'value': json.loads(msg.value) }]})
#logger.info("Response: Code %s", r)
# Create Kafka consumer to wait for the first message received in the topic and, then, refresh the dashboard.
thread = threading.Thread(target = kafka_consumer_refresh_dashboard_handler, args = [topic, msg.value])
thread.start()
# Finally, save topic in DB
try:
connection = psycopg2.connect(user = "eve", password = eve_db_password, host = "localhost", port = "5432", dbname="pipelines")
logger.info("Inserting %s topic in database", topic)
cursor = connection.cursor()
cursor.execute("INSERT INTO pipeline VALUES ( %s )", (topic,))
connection.commit()
logger.info("Topic %s inserted in database", topic)
cursor.close()
connection.close()
except (Exception, psycopg2.Error) as error:
logger.error("Error while connecting to PostgreSQL: ", error)
elif json.loads(msg.value)["action"] == "unsubscribe":
logger.info("Delete Logstash pipeline for topic %s", topic)
subprocess.call(['/bin/bash', '/usr/bin/dcs/delete_logstash_pipeline.sh', topic, 'no'])
# Schedule the removal of Kibana dashboard and Elasticsearch index (retention time of 14 days)
scheduled_thread = threading.Timer(timedelta(days=14).total_seconds(), index_cleaner, args = [topic, msg.value])
# This call is for testing purposes, to be commented when unused:
#scheduled_thread = threading.Timer(timedelta(seconds=30).total_seconds(), index_cleaner, args = [topic, msg.value])
scheduled_thread.start()
logger.info("Data removal for topic %s scheduled in 14 days", topic)
# Finally, delete topic in DB
try:
connection = psycopg2.connect(user = "eve", password = eve_db_password, host = "localhost", port = "5432", dbname="pipelines")
logger.info("Deleting %s topic in database", topic)
cursor = connection.cursor()
cursor.execute("DELETE FROM pipeline WHERE topic = %s", (topic,))
connection.commit()
logger.info("Topic %s deleted in database", topic)
cursor.close()
connection.close()
except (Exception, psycopg2.Error) as error:
logger.error("Error while connecting to PostgreSQL: ", error)
else:
logger.error("Action not allowed")
logger.info("Closing Kafka Consumer for %s topic", signalling_topic_data["topic"])
consumer.close()
def start_consuming_signalling_topic(signalling_topic_data):
signalling_topic_data = json.loads(signalling_topic_data)
logger.info("Starting %s topic", signalling_topic_data["topic"])
logger.info("Sending POST request to %s", url_subscribe)
# Send the request to the DCM.
r = requests.post(url_subscribe, json=signalling_topic_data)
logger.info("Response: Code %s", r)
# Create Kafka consumer.
global signalling_start
signalling_start = True
thread = threading.Thread(target = kafka_consumer_signalling_topic_handler, args = [signalling_topic_data])
thread.start()
@app.route('/portal/dcs/start_signalling/', methods=['POST'])
def start_dcs():
"""
Start signalling topics.
---
describe: start signalling topics
responses:
201:
description: accepted request
400:
description: error processing the request
"""
logger.info("Request received - POST /portal/dcs/start_signalling/")
try:
start_consuming_signalling_topic(json.dumps(signalling_metric_infrastructure))
start_consuming_signalling_topic(json.dumps(signalling_metric_application))
start_consuming_signalling_topic(json.dumps(signalling_kpi))
except Exception as e:
logger.error("Error while parsing request")
logger.exception(e)
return str(e), 400
return '', 201
def stop_consuming_signalling_topic(signalling_topic_data):
signalling_topic_data = json.loads(signalling_topic_data)
logger.info("Stopping %s topic", signalling_topic_data["topic"])
logger.info("Sending DELETE request to %s", url_unsubscribe)
# Send the request to the DCM.
r = requests.delete(url_unsubscribe, json=signalling_topic_data)
logger.info("Response: Code %s", r)
# Delete Kafka consumer.
global signalling_start
# Put signalling_start to False, and then threads will finish their execution.
signalling_start = False
@app.route('/portal/dcs/stop_signalling/', methods=['DELETE'])
def stop_dcs():
"""
Stop signalling topics.
---
describe: stop signalling topics
responses:
201:
description: accepted request
400:
description: error processing the request
"""
logger.info("Request received - DELETE /portal/dcs/stop_signalling/")
try:
stop_consuming_signalling_topic(json.dumps(signalling_metric_infrastructure))
stop_consuming_signalling_topic(json.dumps(signalling_metric_application))
stop_consuming_signalling_topic(json.dumps(signalling_kpi))
except Exception as e:
logger.error("Error while parsing request")
logger.exception(e)
return str(e), 400
return '', 201
def checkValidPort(value):
ivalue = int(value)
# RFC 793
if ivalue < 0 or ivalue > 65535:
raise argparse.ArgumentTypeError("%s is not a valid port" % value)
return value
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dcm_ip_address",
help='DCM IP address, default IP is localhost',
default='localhost')
parser.add_argument(
"--eve_db_password",
help='DB password for eve user')
parser.add_argument(
"--port",
type=checkValidPort,
help='The port you want to use as an endpoint, default port is 8091',
default="8091")
parser.add_argument(
"--log",
help='Sets the Log Level output, default level is "info"',
choices=[
"info",
"debug",
"error",
"warning"],
nargs='?',
default='info')
args = parser.parse_args()
numeric_level = getattr(logging, str(args.log).upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
coloredlogs.install(
fmt='%(asctime)s %(levelname)s %(message)s',
datefmt='%d/%m/%Y %H:%M:%S',
level=numeric_level)
logging.basicConfig(filename='/var/log/dcs_rest_client.log')
logging.getLogger("DCSRestClient").setLevel(numeric_level)
logging.getLogger("requests.packages.urllib3").setLevel(logging.ERROR)
args = parser.parse_args()
logger.info("Serving DCSRestClient on port %s", str(args.port))
global dcm_ip_address
dcm_ip_address= str(args.dcm_ip_address)
global url_subscribe
url_subscribe = "http://" + dcm_ip_address + ":" + dcm_port + dcm_subscribe_url
global url_unsubscribe
url_unsubscribe = "http://" + dcm_ip_address + ":" + dcm_port + dcm_unsubscribe_url
global eve_db_password
eve_db_password= str(args.eve_db_password)
#TODO: advanced feature - connect to the database and make sure that Logstash pipelines are created for the topics saved in the DB.
serve(app, host='0.0.0.0', port=args.port)
| dcs_rest_client.py | 12,429 | Get status.
---
describe: get status
responses:
200:
description: OK
Get swagger specification.
---
describe: get swagger specification
responses:
swagger:
description: swagger specification
Start signalling topics.
---
describe: start signalling topics
responses:
201:
description: accepted request
400:
description: error processing the request
Stop signalling topics.
---
describe: stop signalling topics
responses:
201:
description: accepted request
400:
description: error processing the request
This call seems that is not needed as the dashboard is generated when data is present.time.sleep(2) logger.info("Refreshing dashboard for %s topic", topic)subprocess.call(['/bin/bash', '/usr/bin/dcs/refresh_dashboard.sh', topic]) Dashboard creation is commented because it will be created when data is published in the topic.r = requests.post(dcs_dashboard_url, json={'records': [ { 'value': json.loads(msg.value) }]})logger.info("Response: Code %s", r) Create Kafka consumer to wait for the first message received in the topic and, then, refresh the dashboard. Finally, save topic in DB Schedule the removal of Kibana dashboard and Elasticsearch index (retention time of 14 days) This call is for testing purposes, to be commented when unused:scheduled_thread = threading.Timer(timedelta(seconds=30).total_seconds(), index_cleaner, args = [topic, msg.value]) Finally, delete topic in DB Send the request to the DCM. Create Kafka consumer. Send the request to the DCM. Delete Kafka consumer. Put signalling_start to False, and then threads will finish their execution. RFC 793TODO: advanced feature - connect to the database and make sure that Logstash pipelines are created for the topics saved in the DB. | 1,756 | en | 0.800227 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 21 19:14:52 2020
@author: prachi
"""
import pickle
import numpy as np
der='swbd_diar/exp_new/callhome/plda_oracle/der.scp'
der_pickle = 'swbd_diar/exp_new/callhome/plda_oracle/derdict'
der=open(der,'r').readlines()
DER={}
for line in der[2:-1]:
fname = line.split()[0]
val = float(line.split()[1])
DER[fname] = val
pickleobj=open(der_pickle,'wb')
pickle.dump(DER,pickleobj)
pickleobj.close()
| services/gen_der_dict.py | 501 | Created on Tue Jan 21 19:14:52 2020
@author: prachi
!/usr/bin/env python3 -*- coding: utf-8 -*- | 97 | en | 0.598251 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
from six import StringIO
from pants.base.workunit import WorkUnitLabel
from pants.reporting.html_reporter import HtmlReporter
from pants.reporting.invalidation_report import InvalidationReport
from pants.reporting.plaintext_reporter import LabelFormat, PlainTextReporter, ToolOutputFormat
from pants.reporting.quiet_reporter import QuietReporter
from pants.reporting.report import Report
from pants.reporting.reporter import ReporterDestination
from pants.reporting.reporting_server import ReportingServerManager
from pants.subsystem.subsystem import Subsystem
from pants.util.dirutil import relative_symlink, safe_mkdir
class Reporting(Subsystem):
options_scope = 'reporting'
@classmethod
def register_options(cls, register):
super(Reporting, cls).register_options(register)
register('--invalidation-report', type=bool,
help='Write a formatted report on the invalid objects to the specified path.')
register('--reports-dir', advanced=True, metavar='<dir>',
default=os.path.join(register.bootstrap.pants_workdir, 'reports'),
help='Write reports to this dir.')
register('--template-dir', advanced=True, metavar='<dir>', default=None,
help='Find templates for rendering in this dir.')
register('--console-label-format', advanced=True, type=dict,
default=PlainTextReporter.LABEL_FORMATTING,
help='Controls the printing of workunit labels to the console. Workunit types are '
'{workunits}. Possible formatting values are {formats}'.format(
workunits=WorkUnitLabel.keys(), formats=LabelFormat.keys()))
register('--console-tool-output-format', advanced=True, type=dict,
default=PlainTextReporter.TOOL_OUTPUT_FORMATTING,
help='Controls the printing of workunit tool output to the console. Workunit types are '
'{workunits}. Possible formatting values are {formats}'.format(
workunits=WorkUnitLabel.keys(), formats=ToolOutputFormat.keys()))
def initialize(self, run_tracker, start_time=None):
"""Initialize with the given RunTracker.
TODO: See `RunTracker.start`.
"""
run_id = run_tracker.initialize()
run_dir = os.path.join(self.get_options().reports_dir, run_id)
html_dir = os.path.join(run_dir, 'html')
safe_mkdir(html_dir)
relative_symlink(run_dir, os.path.join(self.get_options().reports_dir, 'latest'))
report = Report()
# Capture initial console reporting into a buffer. We'll do something with it once
# we know what the cmd-line flag settings are.
outfile = StringIO()
errfile = StringIO()
capturing_reporter_settings = PlainTextReporter.Settings(
outfile=outfile, errfile=errfile, log_level=Report.INFO,
color=False, indent=True, timing=False,
cache_stats=False,
label_format=self.get_options().console_label_format,
tool_output_format=self.get_options().console_tool_output_format)
capturing_reporter = PlainTextReporter(run_tracker, capturing_reporter_settings)
report.add_reporter('capturing', capturing_reporter)
# Set up HTML reporting. We always want that.
html_reporter_settings = HtmlReporter.Settings(log_level=Report.INFO,
html_dir=html_dir,
template_dir=self.get_options().template_dir)
html_reporter = HtmlReporter(run_tracker, html_reporter_settings)
report.add_reporter('html', html_reporter)
# Add some useful RunInfo.
run_tracker.run_info.add_info('default_report', html_reporter.report_path())
port = ReportingServerManager().socket
if port:
run_tracker.run_info.add_info('report_url', 'http://localhost:{}/run/{}'.format(port, run_id))
# And start tracking the run.
run_tracker.start(report, start_time)
def _get_invalidation_report(self):
return InvalidationReport() if self.get_options().invalidation_report else None
@staticmethod
def _consume_stringio(f):
f.flush()
buffered_output = f.getvalue()
f.close()
return buffered_output
def update_reporting(self, global_options, is_quiet, run_tracker):
"""Updates reporting config once we've parsed cmd-line flags."""
# Get any output silently buffered in the old console reporter, and remove it.
removed_reporter = run_tracker.report.remove_reporter('capturing')
buffered_out = self._consume_stringio(removed_reporter.settings.outfile)
buffered_err = self._consume_stringio(removed_reporter.settings.errfile)
log_level = Report.log_level_from_string(global_options.level or 'info')
# Ideally, we'd use terminfo or somesuch to discover whether a
# terminal truly supports color, but most that don't set TERM=dumb.
color = global_options.colors and (os.getenv('TERM') != 'dumb')
timing = global_options.time
cache_stats = global_options.time # TODO: Separate flag for this?
if is_quiet:
console_reporter = QuietReporter(run_tracker,
QuietReporter.Settings(log_level=log_level, color=color,
timing=timing, cache_stats=cache_stats))
else:
# Set up the new console reporter.
settings = PlainTextReporter.Settings(log_level=log_level, outfile=sys.stdout, errfile=sys.stderr,
color=color, indent=True, timing=timing, cache_stats=cache_stats,
label_format=self.get_options().console_label_format,
tool_output_format=self.get_options().console_tool_output_format)
console_reporter = PlainTextReporter(run_tracker, settings)
console_reporter.emit(buffered_out, dest=ReporterDestination.OUT)
console_reporter.emit(buffered_err, dest=ReporterDestination.ERR)
console_reporter.flush()
run_tracker.report.add_reporter('console', console_reporter)
if global_options.logdir:
# Also write plaintext logs to a file. This is completely separate from the html reports.
safe_mkdir(global_options.logdir)
run_id = run_tracker.run_info.get_info('id')
outfile = open(os.path.join(global_options.logdir, '{}.log'.format(run_id)), 'w')
errfile = open(os.path.join(global_options.logdir, '{}.err.log'.format(run_id)), 'w')
settings = PlainTextReporter.Settings(log_level=log_level, outfile=outfile, errfile=errfile,
color=False, indent=True, timing=True, cache_stats=True,
label_format=self.get_options().console_label_format,
tool_output_format=self.get_options().console_tool_output_format)
logfile_reporter = PlainTextReporter(run_tracker, settings)
logfile_reporter.emit(buffered_out, dest=ReporterDestination.OUT)
logfile_reporter.emit(buffered_err, dest=ReporterDestination.ERR)
logfile_reporter.flush()
run_tracker.report.add_reporter('logfile', logfile_reporter)
invalidation_report = self._get_invalidation_report()
if invalidation_report:
run_id = run_tracker.run_info.get_info('id')
outfile = os.path.join(self.get_options().reports_dir, run_id, 'invalidation-report.csv')
invalidation_report.set_filename(outfile)
return invalidation_report
| src/python/pants/reporting/reporting.py | 7,709 | Initialize with the given RunTracker.
TODO: See `RunTracker.start`.
Updates reporting config once we've parsed cmd-line flags.
coding=utf-8 Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). Licensed under the Apache License, Version 2.0 (see LICENSE). Capture initial console reporting into a buffer. We'll do something with it once we know what the cmd-line flag settings are. Set up HTML reporting. We always want that. Add some useful RunInfo. And start tracking the run. Get any output silently buffered in the old console reporter, and remove it. Ideally, we'd use terminfo or somesuch to discover whether a terminal truly supports color, but most that don't set TERM=dumb. TODO: Separate flag for this? Set up the new console reporter. Also write plaintext logs to a file. This is completely separate from the html reports. | 847 | en | 0.851744 |
import numpy as np
import pygame as pg
from numba import njit
def main():
size = np.random.randint(20,60) # size of the map
posx, posy, posz = 1.5, np.random.uniform(1, size -1), 0.5
rot, rot_v = (np.pi/4, 0)
lx, ly, lz = (size*20, size*30, 1000)
mr, mg, mb, maph, mapr, exitx, exity, mapt, maps = maze_generator(int(posx), int(posy), size)
enx, eny, seenx, seeny, lock = np.random.uniform(2, size-3 ), np.random.uniform(2, size-3), 0, 0, 0
maph[int(enx)][int(eny)] = 0
shoot, sx, sy, sdir = 1, -1, -1, rot
res, res_o = 5, [96, 112, 160, 192, 224, 260, 300, 340, 400, 480, 540, 600, 800]
width, height, mod, inc, rr, gg, bb = adjust_resol(24)
running = True
pg.init()
font = pg.font.SysFont("Arial", 18)
font2 = pg.font.SysFont("Impact", 48)
screen = pg.display.set_mode((800, 600))
rr, gg, bb = np.linspace(0,0.8, width*height), np.linspace(0.5,.1, width*height), np.linspace(1,0.1, width*height)
pixels = np.dstack((rr,gg,bb))
pixels = np.reshape(pixels, (height,width,3))
surf = pg.surfarray.make_surface((np.rot90(pixels*255)).astype('uint8'))
surf = pg.transform.scale(surf, (750, 550))
screen.blit(surf, (25, 25))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("red")),(45,95))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("blue")),(55,105))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("white")),(50,100))
screen.blit(font2.render(" Loading, please wait... ", 1, pg.Color("black"), pg.Color("grey")),(50,300))
pg.display.update()
clock = pg.time.Clock()
pg.mouse.set_visible(False)
et = 0.1
mplayer = np.zeros([size, size])
enx, eny, mplayer, et, shoot, sx, sy, sdir, seenx, seeny, lock = agents(enx, eny, maph, posx, posy, rot, et, shoot, sx, sy, sdir, mplayer, seenx, seeny, lock)
sstart, timer, count, autores, smooth = None, 0, -100, 1, 0
pause = 0
pg.mixer.set_num_channels(3)
ambient = pg.mixer.Sound('soundfx/HauntSilentPartner.mp3')
ambient.set_volume(0.5)
runfx = pg.mixer.Sound('soundfx/run.mp3')
shotfx = pg.mixer.Sound('soundfx/slap.mp3')
killfx = pg.mixer.Sound('soundfx/shutdown.mp3')
respawnfx = pg.mixer.Sound('soundfx/respawn.mp3')
successfx = pg.mixer.Sound('soundfx/success.mp3')
failfx = pg.mixer.Sound('soundfx/fail.mp3')
pg.mixer.Channel(0).play(ambient, -1)
pg.mixer.Channel(1).play(respawnfx)
run = 1
score = 0
ticks = pg.time.get_ticks()/100000
while running:
count += 1
for event in pg.event.get():
if event.type == pg.QUIT or (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE):
if not pause:
pause = 1
pg.mixer.Channel(1).play(respawnfx)
endmsg = " Game paused. Current score: " + str(score)
else:
endmsg = " Thanks for playing! Total score: " + str(score)
pg.mixer.Channel(1).play(killfx)
running = False
if sstart == None and(event.type == pg.MOUSEBUTTONDOWN or event.type == pg.MOUSEBUTTONUP):
shoot = 1
if event.type == pg.KEYDOWN:
if event.key == ord('p'): # pause
if not pause:
pause = 1
endmsg = " Game paused. Current score: " + str(score)
elif (int(posx) != exitx or int(posy) != exity):
pause = 0
if pause and event.key == ord('n'): # new game
pause = 0
size = np.random.randint(20,60)
posx, posy, posz = 1.5, np.random.uniform(1, size -1), 0.5
rot, rot_v = (np.pi/4, 0)
mr, mg, mb, maph, mapr, exitx, exity, mapt, maps = maze_generator(int(posx), int(posy), size)
enx, eny, seenx, seeny, lock, run = 0, 0, 0, 0, 0, 1
shoot, sx, sy, sstart = 0, -1, -1, None
mplayer = np.zeros([size, size])
et = 0.1
enx, eny, mplayer, et, shoot, sx, sy, sdir, seenx, seeny, lock = agents(enx, eny, maph, posx, posy, rot, et, shoot, sx, sy, sdir, mplayer, seenx, seeny, lock)
count = -100
if autores:
width, height, mod, inc, rr, gg, bb = adjust_resol(24)
pg.mixer.Channel(1).play(respawnfx)
if event.key == ord('t'): # toggle auto resolution
autores = not(autores)
if event.key == ord('y'): # toggle auto resolution
smooth = not(smooth)
if not autores:
if event.key == ord('q'): # manually change resolution
if res > 0 :
res = res-1
width, height, mod, inc, rr, gg, bb = adjust_resol(res_o[res])
if event.key == ord('e'):
if res < len(res_o)-1 :
res = res+1
width, height, mod, inc, rr, gg, bb = adjust_resol(res_o[res])
if not pause:
rr, gg, bb = super_fast(width, height, mod, inc, posx, posy, posz, rot, rot_v, mr, mg, mb, lx, ly, lz, mplayer, exitx, exity, mapr, mapt, maps, rr, gg, bb, enx, eny, sx, sy, size)
pixels = np.dstack((rr,gg,bb))
pixels = np.reshape(pixels, (height,width,3))
surf = pg.surfarray.make_surface((np.rot90(pixels*255)).astype('uint8'))
if shoot or smooth:
surf = pg.transform.smoothscale(surf, (800, 600))
else:
surf = pg.transform.scale(surf, (800, 600))
screen.blit(surf, (0, 0))
## fpss = int(clock.get_fps())pg.time.get_ticks()/100000
fpss = int(1000/(pg.time.get_ticks() - ticks*100000))
fps = font.render(str(fpss)+' w: '+ str(width) + ' Score: '+str(score), 1, pg.Color("coral"))
screen.blit(fps,(10,0))
if autores and count > 10: #auto adjust render resolution
if fpss < 50 and width > 100:
count = 0
width, height, mod, inc, rr, gg, bb = adjust_resol(int(width*0.8))
if fpss > 65 and width < 728:
count = 0
width, height, mod, inc, rr, gg, bb = adjust_resol(int(width*1.1))
# player's movement
if (int(posx) == exitx and int(posy) == exity):
endmsg = " You escaped safely! "
pg.mixer.Channel(1).play(successfx)
score += 1
pause = 1
pressed_keys = pg.key.get_pressed()
et = clock.tick()/500
if et > 0.5:
et = 0.5
if shoot or sstart != None:
if sstart == None:
pg.mixer.Channel(2).play(shotfx)
if fpss < 60 and autores:
count = -50
width, height, mod, inc, rr, gg, bb = adjust_resol(int(width*0.8))
sstart = pg.time.get_ticks()
elif pg.time.get_ticks() - sstart > 500:
shoot, sx, sy, sstart = 0, -1, -1, None
if enx == 0:
if not run:
pg.mixer.Channel(1).play(killfx)
run = 1
if np.random.uniform() > 0.999:
cos, sin = np.cos(rot), np.sin(rot)
for ee in range(100):
enx = np.clip(np.random.normal(posx, 5), 1, size-2)
eny = np.clip(np.random.normal(posy, 5), 1, size-2)
dtp = (enx-posx)**2 + (eny-posy)**2
if maph[int(enx)][int(eny)] == 0 and dtp > 16 and dtp < 49:
break
if maph[int(enx)][int(eny)] != 0:
enx, eny = 0, 0
else:
seenx, seeny, lock = enx, eny, 0
screen.blit(font2.render(" Enemy Respawning! ", 1, pg.Color("red"), pg.Color("grey")),(300,50))
pg.mixer.Channel(1).play(respawnfx)
else:
dtp = (enx-posx)**2 + (eny-posy)**2
if dtp < 1:
score -= 1
endmsg = " You died! Current score: " + str(score)
pg.mixer.Channel(1).play(failfx)
enx, eny, seenx, seeny, lock = 0, 0, 0, 0, 0
pause = 1
surf = pg.surfarray.make_surface((np.rot90(255-pixels*255)).astype('uint8'))
surf = pg.transform.smoothscale(surf, (800, 600))
screen.blit(surf, (0, 0))
elif dtp > 300:
enx, eny, seenx, seeny, lock = 0, 0, 0, 0, 0
run = 0
ticks = pg.time.get_ticks()/100000
lx = size/2 + 1000*np.cos(ticks)
ly = size/2 + 1000*np.sin(ticks)
posx, posy, rot, rot_v, shoot = movement(pressed_keys,posx, posy, rot, rot_v, maph, et, shoot, sstart)
pg.mouse.set_pos([400, 300])
mplayer = np.zeros([size, size])
enx, eny, mplayer, et, shoot, sx, sy, sdir,seenx, seeny, lock = agents(enx, eny, maph, posx, posy, rot, et, shoot, sx, sy, sdir, mplayer, seenx, seeny, lock)
if run and (seenx == posx or seeny == posy):
run = False
pg.mixer.Channel(1).play(runfx)
else:
clock.tick(30)
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("red")),(45,45))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("blue")),(55,55))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("white")),(50,50))
screen.blit(font2.render(endmsg, 1, pg.Color("salmon"), (100, 34, 60)),(50,320))
if (int(posx) == exitx and int(posy) == exity):
screen.blit(font2.render(" Your current score is "+str(score), 1, pg.Color("grey"), (80, 34, 80)),(50,390))
else:
screen.blit(font2.render(" Press P to continue ", 1, pg.Color("grey"), (80, 34, 80)),(50,390))
screen.blit(font2.render(" Press N for a new game ", 1, pg.Color("grey"), (45, 34, 100)),(50,460))
screen.blit(font2.render(" Press ESC to leave ", 1, pg.Color("grey"), (13, 34, 139)),(50,530))
pg.display.update()
screen.blit(font2.render(endmsg, 1, pg.Color("salmon"), (100, 34, 60)),(50,320))
pg.mixer.fadeout(1000)
pg.display.update()
print(endmsg)
pg.time.wait(2000)
pg.quit()
def maze_generator(x, y, size):
mr = np.random.uniform(0,1, (size,size))
mg = np.random.uniform(0,1, (size,size))
mb = np.random.uniform(0,1, (size,size))
mapr = np.random.choice([0, 0, 0, 0, 1], (size,size))
maps = np.random.choice([0, 0, 0, 0, 1], (size,size))
mapt = np.random.choice([0, 0, 0, 1, 2], (size,size))
maptemp = np.random.choice([0,0, 1], (size,size))
maph = np.random.uniform(0.25, 0.99, (size,size))
maph[np.where(maptemp == 0)] = 0
maph[0,:], maph[size-1,:], maph[:,0], maph[:,size-1] = (1,1,1,1)
maps[0,:], maps[size-1,:], maps[:,0], maps[:,size-1] = (0,0,0,0)
maph[x][y], mapr[x][y] = (0, 0)
count = 0
while 1:
testx, testy = (x, y)
if np.random.uniform() > 0.5:
testx = testx + np.random.choice([-1, 1])
else:
testy = testy + np.random.choice([-1, 1])
if testx > 0 and testx < size -1 and testy > 0 and testy < size -1:
if maph[testx][testy] == 0 or count > 5:
count = 0
x, y = (testx, testy)
maph[x][y], mapr[x][y] = (0, 0)
if x == size-2:
exitx, exity = (x, y)
break
else:
count = count+1
return mr, mg, mb, maph, mapr, exitx, exity, mapt, maps
def movement(pressed_keys,posx, posy, rot, rot_v, maph, et, shoot, sstart):
x, y = (posx, posy)
p_mouse = pg.mouse.get_pos()
rot, rot_v = rot - np.clip((p_mouse[0]-400)/200, -0.2, .2), rot_v -(p_mouse[1]-300)/400
rot_v = np.clip(rot_v, -1, 1)
if pressed_keys[pg.K_UP] or pressed_keys[ord('w')]:
x, y = (x + et*np.cos(rot), y + et*np.sin(rot))
if pressed_keys[pg.K_DOWN] or pressed_keys[ord('s')]:
x, y = (x - et*np.cos(rot), y - et*np.sin(rot))
if pressed_keys[pg.K_LEFT] or pressed_keys[ord('a')]:
x, y = (x - et*np.sin(rot), y + et*np.cos(rot))
if pressed_keys[pg.K_RIGHT] or pressed_keys[ord('d')]:
x, y = (x + et*np.sin(rot), y - et*np.cos(rot))
if maph[int(x)][int(y)] == 0:
posx, posy = (x, y)
if not shoot and sstart == None and pressed_keys[pg.K_SPACE]:
shoot = 1
return posx, posy, rot, rot_v, shoot
@njit(fastmath=True)
def super_fast(width, height, mod, inc, posx, posy, posz, rot, rot_v, mr, mg, mb, lx, ly, lz, maph, exitx, exity, mapr, mapt, maps, pr, pg, pb, enx, eny, sx, sy, size):
texture=[[ .95, .99, .97, .8], # brick wall
[ .97, .95, .96, .85],
[.8, .85, .8, .8],
[ .93, .8, .98, .96],
[ .99, .8, .97, .95],
[.8, .85, .8, .8]]
idx = 0
for j in range(height): #vertical loop
rot_j = rot_v + np.deg2rad(24 - j/mod)
sinzo = inc*np.sin(rot_j)
coszo = inc*np.sqrt(abs(np.cos(rot_j)))
for i in range(width): #horizontal vision loop
rot_i = rot + np.deg2rad(i/mod - 30)
x, y, z = (posx, posy, posz)
sin, cos, sinz = coszo*np.sin(rot_i), coszo*np.cos(rot_i), sinzo
modr = 1
cx, cy, c1r, c2r, c3r = 1, 1, 1, 1, 1
shot, enem, mapv = 0, 0, 0
dtp = np.random.uniform(0.002,0.01)
while 1:
if (mapv == 0 or (sinz > 0 and (z > mapv or (mapv==6 and (z>0.4 or z <0.2)) or(z > 0.57 and mapv > 1)))): ## LoDev DDA for optimization
norm = np.sqrt(cos**2 + sin**2 + sinz**2)
rayDirX, rayDirY, rayDirZ = cos/norm + 1e-16, sin/norm + 1e-16, sinz/norm + 1e-16
mapX, mapY = int(x), int(y)
deltaDistX, deltaDistY, deltaDistZ= abs(1/rayDirX), abs(1/rayDirY), abs(1/rayDirZ)
if (rayDirX < 0):
stepX, sideDistX = -1, (x - mapX) * deltaDistX
else:
stepX, sideDistX = 1, (mapX + 1.0 - x) * deltaDistX
if (rayDirY < 0):
stepY, sideDistY = -1, (y - mapY) * deltaDistY
else:
stepY, sideDistY = 1, (mapY + 1 - y) * deltaDistY
if (rayDirZ < 0):
sideDistZ = z*deltaDistZ;
else:
sideDistZ = (1-z)*deltaDistZ
while (1):
if (sideDistX < sideDistY):
sideDistX += deltaDistX; mapX += stepX
dist = sideDistX; side = 0
if mapX < 1 or mapX > size-2:
break
else:
sideDistY += deltaDistY; mapY += stepY
dist = sideDistY; side = 1
if mapY < 1 or mapY > size-2:
break
if (maph[mapX][mapY] != 0):
break
if (side):
dist = dist - deltaDistY
else:
dist = dist - deltaDistX
if (dist > sideDistZ):
dist = sideDistZ
x = x + rayDirX*dist - cos/2
y = y + rayDirY*dist - sin/2
z = z + rayDirZ*dist - sinz/2
## end of LoDev DDA
x += cos; y += sin; z += sinz
if (z > 1 or z < 0): # check ceiling and floor
break
mapv = maph[int(x)][int(y)]
if mapv > 1 and z < 0.57:
if mapv == 2 or mapv == 8:
if z> 0.45 and (x-posx)**2 + (y-posy)**2 + (z-0.5)**2 < 0.005 :
break
if z < 0.45 and z > 0.3 and (x-posx)**2 + (y-posy)**2 < (z/10 - 0.02):
break
if z < 0.3 and (x-posx)**2 + (y-posy)**2 + (z-0.15)**2 < 0.023 :
break
if mapv == 3 or mapv == 9:
enem = 1
if z> 0.45 and (x-enx)**2 + (y-eny)**2 + (z-0.5)**2 < 0.005 :
break
if z < 0.45 and z > 0.3 and (x-enx)**2 + (y-eny)**2 < (z/10 - 0.02):
break
if z < 0.3 and (x-enx)**2 + (y-eny)**2 + (z-0.15)**2 < 0.023 :
break
if mapv > 5 and z < 0.4 and z > 0.2:
if ((x-sx)**2 + (y-sy)**2 + (z-0.3)**2 < dtp):#0.01):
shot = 1
break
if mapv > z and mapv < 2: # check walls
if maps[int(x)][int(y)]: # check spheres
if ((x-int(x)-0.5)**2 + (y-int(y)-0.5)**2 + (z-int(z)-0.5)**2 < 0.25):
if (mapr[int(x)][int(y)]): # spherical mirror
if (modr == 1):
cx, cy = int(x), int(y)
modr = modr*0.7
if (modr < 0.2):
break
if (mapv - z <= abs(sinz) ): ## horizontal surface
sinz = -sinz
else:
nx = (x-int(x)-0.5)/0.5; ny = (y-int(y)-0.5)/0.5; nz =(z-int(z)-0.5)/0.5
dot = 2*(cos*nx + sin*ny + sinz*nz)
cos = (cos - nx*dot); sin = (sin - ny*dot); sinz = (sinz - nz*dot)
x += cos; y += sin; z += sinz
else:
break
elif mapr[int(x)][int(y)]: # check reflections
if modr == 1:
cx, cy = int(x), int(y)
modr = modr*0.7
if modr < 0.2:
break
if abs(z-maph[int(x)][int(y)]) < abs(sinz):
sinz = -sinz
elif maph[int(x+cos)][int(y-sin)] == maph[int(x)][int(y)]:
cos = -cos
else:
sin = -sin
else:
break
if z > 1: # ceiling
deltaDistZ = (lz-z)*deltaDistZ
x += deltaDistZ*rayDirX; y += deltaDistZ*rayDirY; z = lz
dtol = np.sqrt((x-lx)**2+(y-ly)**2)
if dtol < 50: #light source
shot = 1
c1, c2, c3 = 1, 1, 0.5
else:
angle = np.rad2deg(np.arctan((y-ly)/(x-lx)))/np.random.uniform(12,15)
sh = (0.8+ abs(angle - int(angle))/5)/(dtol/1000)
if sh > 1:
sh = 1
if int(angle)%2 == 1:
c1, c2, c3 = 0.8*(1-sh), 0.86*(1-sh/4), (1-sh/10)
else:
c1, c2, c3 = 0.8*(1-sh), 0.9*(1-sh/4), (1-sh/10)
if sx != -1:
c1, c2, c3 = 0.7*c1, 0.7*c2, 0.7*c3
elif z < 0: # floor
z = 0
if int(x*2)%2 == int(y*2)%2:
c1, c2, c3 = .8,.8,.8
else:
if int(x) == exitx and int(y) == exity: #exit
c1, c2, c3 = 0,0,.6
else:
c1, c2, c3 = .1,.1,.1
elif mapv < 2: # walls
c1, c2, c3 = mr[int(x)][int(y)], mg[int(x)][int(y)], mg[int(x)][int(y)]
if mapt[int(x)][int(y)]: # textured walls
if y%1 < 0.05 or y%1 > 0.95:
ww = int((x*3)%1*4)
else:
ww = int((y*3)%1*4)
if x%1 < 0.95 and x%1 > 0.05 and y%1 < 0.95 and y%1 > 0.05:
zz = int(x*5%1*6)
else:
zz = int(z*5%1*6)
text = texture[zz][ww]
c1, c2, c3 = c1*text, c2*text, c3*text
if mapv - z <= abs(sinz):
z = mapv
elif not maps[int(x)][int(y)]:
if int(x-cos) != int(x):
x = max(int(x-cos), int(x))
modr = modr*0.80
else:
y = max(int(y-sin), int(y))
modr = modr*0.9
else:
if shot:
sh = ((x-sx)**2 + (y-sy)**2 + (z-0.3)**2)/0.012
c1, c2, c3 = 1, 0.6*sh+0.2 , 0.2*sh+0.1 # shot
elif z> 0.45:
c1, c2, c3 = 0.6, 0.3, 0.3 # Head
elif z > 0.3:
c1, c2, c3 = 0.3, 0.5, 0.5 # Chest
else:
if enem:
c1, c2, c3 = 1, 0.2, 0.2 # Roller red
else:
c1, c2, c3 = 0.2, 0.2, 1 # Roller blue
if modr <= 0.7 and not shot:
c1r, c2r, c3r = mr[cx][cy], mg[cx][cy], mg[cx][cy]
if not shot and z < 1:
dtp = np.sqrt((x-posx)**2+(y-posy)**2+(z-posz)**2)
if dtp > 7:
modr = modr/np.log((dtp-6)/4+np.e)
if z < 1: # shadows
if sx != -1 and maph[int(sx)][int(sy)] > 1:
shot, c3 = 1, c3 * 0.9
dtol = np.sqrt((x-sx)**2+(y-sy)**2+(z-0.35)**2)
cos, sin, sinz = .01*(sx-x)/dtol, .01*(sy-y)/dtol, .01*(0.35-z)/dtol
else:
dtol = np.sqrt((x-lx)**2+(y-ly)**2+(z-lz)**2)
cos, sin, sinz = .01*(lx-x)/dtol, .01*(ly-y)/dtol, .01*(lz-z)/dtol
x += cos; y += sin; z += sinz
mapv = maph[int(x)][int(y)]
if z < mapv and mapv < 1 and not maps[int(x)][int(y)]:
modr = modr*0.39
while modr > 0.45:
if (mapv == 0) or not shot and ((z > mapv) or (z > 0.57 and mapv > 1)): ## LoDev DDA for optimization
norm = np.sqrt(cos**2 + sin**2 + sinz**2)
rayDirX, rayDirY, rayDirZ = cos/norm + 1e-16, sin/norm + 1e-16, sinz/norm + 1e-16
mapX, mapY = int(x), int(y)
deltaDistX, deltaDistY, deltaDistZ= abs(1/rayDirX), abs(1/rayDirY), abs(1/rayDirZ)
if (rayDirX < 0):
stepX, sideDistX = -1, (x - mapX) * deltaDistX
else:
stepX, sideDistX = 1, (mapX + 1.0 - x) * deltaDistX
if (rayDirY < 0):
stepY, sideDistY = -1, (y - mapY) * deltaDistY
else:
stepY, sideDistY = 1, (mapY + 1 - y) * deltaDistY
if (rayDirZ < 0):
sideDistZ = z*deltaDistZ;
else:
sideDistZ = (1-z)*deltaDistZ
while (1):
if (sideDistX < sideDistY):
sideDistX += deltaDistX; mapX += stepX
dist = sideDistX; side = 0
if mapX < 1 or mapX > size-2:
break
else:
sideDistY += deltaDistY; mapY += stepY
dist = sideDistY; side = 1
if mapY < 1 or mapY > size-2:
break
if (maph[mapX][mapY] != 0):
break
if (side):
dist = dist - deltaDistY
else:
dist = dist - deltaDistX
if (dist > sideDistZ):
dist = sideDistZ
x = x + rayDirX*dist - cos/2
y = y + rayDirY*dist - sin/2
z = z + rayDirZ*dist - sinz/2
## end of LoDev DDA
x += cos; y += sin; z += sinz
mapv = maph[int(x)][int(y)]
if shot:
if mapv > 5 or (sinz > 0 and z > 0.35) or (sinz < 0 and z < 0.35):
break
elif z >1:
break
if z < 0.57 and mapv > 1:
if mapv == 3 or mapv == 9:
if z> 0.45 and (x-enx)**2 + (y-eny)**2 + (z-0.5)**2 < 0.005 :
modr = modr*0.67
elif z < 0.45 and z > 0.3 and (x-enx)**2 + (y-eny)**2 < (z/10 - 0.02):
modr = modr*0.67
elif z < 0.3 and (x-enx)**2 + (y-eny)**2 + (z-0.15)**2 < 0.023 :
modr = modr*0.67
elif mapv == 2 or mapv == 8:
if z> 0.45 and (x-posx)**2 + (y-posy)**2 + (z-0.5)**2 < 0.005 :
modr = modr*0.67
elif z < 0.45 and z > 0.3 and (x-posx)**2 + (y-posy)**2 < (z/10 - 0.02):
modr = modr*0.67
elif z < 0.3 and (x-posx)**2 + (y-posy)**2 + (z-0.15)**2 < 0.023 :
modr = modr*0.67
if mapv > 0 and z <= mapv and mapv < 2:
if maps[int(x)][int(y)]: # check spheres
if ((x-int(x)-0.5)**2 + (y-int(y)-0.5)**2 + (z-int(z)-0.5)**2 < 0.25):
modr = modr*0.9
else:
modr = modr*0.9
pr[idx] = modr*np.sqrt(c1*c1r)
pg[idx] = modr*np.sqrt(c2*c2r)
pb[idx] = modr*np.sqrt(c3*c3r)
idx += 1
return pr, pg, pb
def adjust_resol(width):
height = int(0.75*width)
mod = width/64
inc = 0.02/mod
rr = np.random.uniform(0,1,width * height)
gg = np.random.uniform(0,1,width * height)
bb = np.random.uniform(0,1,width * height)
## print('Resolution: ', width, height)
return width, height, mod, inc, rr, gg, bb
@njit(fastmath=True)
def agents(enx, eny, maph, posx, posy, rot, et, shoot, sx, sy, sdir, mplayer, seenx, seeny, lock):
if enx != 0:
if not lock or np.random.uniform(0,1) > 0.99:
dtp = np.sqrt((enx-posx)**2 + (eny-posy)**2)
cos, sin = (posx-enx)/dtp, (posy-eny)/dtp
x, y = enx, eny
for i in range(300):
x += 0.04*cos; y += 0.04*sin
if maph[int(x)][int(y)] != 0:
lock = 0
break
if(int(x) == int(posx) and int(y) == int(posy)):
seenx, seeny = posx, posy
lock = 1
break
if int(enx) == int(seenx) and int(eny) == int(seeny):
if not lock:
if shoot:
seenx, seeny = np.random.uniform(enx, posx), np.random.uniform(eny, posy)
else:
seenx, seeny = np.random.normal(enx, 2), np.random.normal(eny, 2)
else:
seenx, seeny = np.random.normal(posx, 2), np.random.normal(posy, 2)
dtp = np.sqrt((enx-seenx)**2 + (eny-seeny)**2)
cos, sin = (seenx-enx)/dtp, (seeny-eny)/dtp
x, y = enx + et*(cos+np.random.normal(0,.5)), eny + et*(sin+np.random.normal(0,.5))
if maph[int(x)][int(y)] == 0:
enx, eny = x, y
else:
if np.random.uniform(0,1) > 0.5:
x, y = enx - et*(sin+np.random.normal(0,.5)), eny + et*(cos+np.random.normal(0,.5))
else:
x, y = enx + et*(sin+np.random.normal(0,.5)), eny - et*(cos+np.random.normal(0,.5))
if maph[int(x)][int(y)] == 0:
enx, eny = x, y
else:
seenx, seeny = enx+np.random.normal(0,3), eny+np.random.normal(0,3)
lock = 0
mplayer[int(enx)][int(eny)] = 3
mplayer[int(posx)][int(posy)] = 2
if shoot:
if sx == -1:
sdir = rot+np.random.uniform(-.1,.1)
sx, sy = posx + .5*np.cos(sdir), posy + .5*np.sin(sdir)
sx, sy = sx + 5*et*np.cos(sdir), sy + 5*et*np.sin(sdir)
if enx != 0 and (sx - enx)**2 + (sy - eny)**2 < 0.02:
shoot, sx, sy, enx, eny, seenx, seeny = 0, -1, -1, 0, 0, 0, 0
if maph[int(sx)][int(sy)] != 0:
shoot, sx, sy = 0, -1, -1
else:
mplayer[int(sx)][int(sy)] += 6
mplayer = maph + mplayer
return(enx, eny, mplayer, et, shoot, sx, sy, sdir, seenx, seeny, lock)
if __name__ == '__main__':
main()
| RayTracingMazeEnem.py | 31,953 | size of the map pause new game toggle auto resolution toggle auto resolution manually change resolution fpss = int(clock.get_fps())pg.time.get_ticks()/100000auto adjust render resolution player's movement brick wallvertical loop horizontal vision loop LoDev DDA for optimization end of LoDev DDA check ceiling and floor0.01): check walls check spheres spherical mirror horizontal surface check reflections ceilinglight source floorexit walls textured walls shot Head Chest Roller red Roller blue shadows LoDev DDA for optimization end of LoDev DDA check spheres print('Resolution: ', width, height) | 612 | en | 0.57328 |
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
# Analog to digital converter example.
# Will loop forever printing ADC channel 1 raw and mV values every second.
# NOTE the ADC can only read voltages in the range of ~900mV to 1800mV!
import time
import board
import busio
import adafruit_lis3dh
# Uncomment if using SPI
# import digitalio
# Hardware I2C setup. Use the CircuitPlayground built-in accelerometer if available;
# otherwise check I2C pins.
if hasattr(board, "ACCELEROMETER_SCL"):
i2c = busio.I2C(board.ACCELEROMETER_SCL, board.ACCELEROMETER_SDA)
lis3dh = adafruit_lis3dh.LIS3DH_I2C(i2c, address=0x19)
else:
i2c = busio.I2C(board.SCL, board.SDA)
lis3dh = adafruit_lis3dh.LIS3DH_I2C(i2c)
# Hardware SPI setup:
# spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
# cs = digitalio.DigitalInOut(board.D5) # Set to correct CS pin!
# lis3dh = adafruit_lis3dh.LIS3DH_SPI(spi, cs)
# PyGamer I2C Setup:
# i2c = busio.I2C(board.SCL, board.SDA)
# lis3dh = adafruit_lis3dh.LIS3DH_I2C(i2c, address=0x19)
# Loop forever printing ADC readings.
while True:
# Read raw ADC value. Specify which ADC to read: 1, 2, or 3.
adc1_raw = lis3dh.read_adc_raw(1)
# Or read the ADC value in millivolts:
adc1_mV = lis3dh.read_adc_mV(1)
print("ADC 1 = {} ({} mV)".format(adc1_raw, adc1_mV))
time.sleep(1)
| Lights/adafruit-circuitpython-bundle-6.x-mpy-20210310/examples/lis3dh_adc.py | 1,427 | SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries SPDX-License-Identifier: MIT Analog to digital converter example. Will loop forever printing ADC channel 1 raw and mV values every second. NOTE the ADC can only read voltages in the range of ~900mV to 1800mV! Uncomment if using SPI import digitalio Hardware I2C setup. Use the CircuitPlayground built-in accelerometer if available; otherwise check I2C pins. Hardware SPI setup: spi = busio.SPI(board.SCK, board.MOSI, board.MISO) cs = digitalio.DigitalInOut(board.D5) Set to correct CS pin! lis3dh = adafruit_lis3dh.LIS3DH_SPI(spi, cs) PyGamer I2C Setup: i2c = busio.I2C(board.SCL, board.SDA) lis3dh = adafruit_lis3dh.LIS3DH_I2C(i2c, address=0x19) Loop forever printing ADC readings. Read raw ADC value. Specify which ADC to read: 1, 2, or 3. Or read the ADC value in millivolts: | 842 | en | 0.506 |
import datetime
from datetime import datetime, timedelta
from time import sleep
from app.search import add_to_index, delete_index, create_index, query_index
from app import db
from app.models import Post, User
from tests.BaseDbTest import BaseDbTest
class SearchTest(BaseDbTest):
index_name = "test_index"
def setUp(self):
super(SearchTest, self).setUp()
create_index(SearchTest.index_name)
def tearDown(self):
super(SearchTest, self).tearDown()
delete_index(SearchTest.index_name)
def test_index_posts(self):
# create four users
u1 = User(username='john', email='john@example.com')
u2 = User(username='susan', email='susan@example.com')
db.session.add_all([u1, u2])
# create four posts
now = datetime.utcnow()
p1 = Post(body="post post1 from john", author=u1,
timestamp=now + timedelta(seconds=1))
p2 = Post(body="post post2 from susan", author=u2,
timestamp=now + timedelta(seconds=4))
p3 = Post(body="post post3 from john", author=u1,
timestamp=now + timedelta(seconds=3))
p4 = Post(body="post post4 from john", author=u1,
timestamp=now + timedelta(seconds=2))
db.session.add_all([p1, p2, p3, p4])
db.session.commit()
add_to_index(SearchTest.index_name, p1)
add_to_index(SearchTest.index_name, p2)
add_to_index(SearchTest.index_name, p3)
add_to_index(SearchTest.index_name, p4)
sleep(1)
ids, total = query_index(SearchTest.index_name, "post1", 1, 20)
self.assertEqual(1, total)
self.assertEqual(p1.id, ids[0])
ids, total = query_index(SearchTest.index_name, "post", 1, 20)
self.assertEqual(4, total) | tests/SearchTest.py | 1,812 | create four users create four posts | 35 | en | 0.430308 |
# ---------------------------------------------------------------------
# CPE check
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import datetime
# NOC modules
from noc.services.discovery.jobs.base import DiscoveryCheck
from noc.sa.models.managedobject import ManagedObject
from noc.sa.models.profile import Profile
class CPECheck(DiscoveryCheck):
"""
CPE check
@todo: Remove stale CPE
"""
name = "cpe"
required_script = "get_cpe"
required_capabilities = ["CPE | Controller"]
def handler(self):
self.logger.info("Checking CPEs")
now = datetime.datetime.now()
result = self.object.scripts.get_cpe()
for cpe in result:
if cpe["status"] != "active":
self.logger.debug(
"[%s|%s] CPE status is '%s'. Skipping",
cpe["id"],
cpe["global_id"],
cpe["status"],
)
continue
mo = self.find_cpe(cpe["global_id"])
if mo:
changes = self.update_if_changed(
mo,
{
"controller": self.object,
"local_cpe_id": cpe["id"],
"global_cpe_id": cpe["global_id"],
"address": cpe["ip"],
"last_seen": now,
},
)
if changes:
self.logger.info(
"[%s|%s] Changed: %s",
cpe["id"],
cpe["global_id"],
", ".join("%s='%s'" % c for c in changes),
)
else:
name = cpe.get("name") or "cpe-%s" % cpe["global_id"]
if ManagedObject.objects.filter(name=name).exists():
name = "cpe-%s" % cpe["global_id"]
self.logger.info("[%s|%s] Created CPE %s", cpe["id"], cpe["global_id"], name)
mo = ManagedObject(
name=name,
pool=self.object.pool,
profile=Profile.get_by_id(Profile.get_generic_profile_id()),
object_profile=self.object.object_profile.cpe_profile
or self.object.object_profile,
administrative_domain=self.object.administrative_domain,
scheme=self.object.scheme,
segment=self.object.segment,
auth_profile=self.object.object_profile.cpe_auth_profile
or self.object.auth_profile,
address=cpe.get("ip") or "0.0.0.0",
controller=self.object,
last_seen=now,
local_cpe_id=cpe["id"],
global_cpe_id=cpe["global_id"],
)
mo.save()
@classmethod
def find_cpe(cls, global_id):
try:
return ManagedObject.objects.get(global_cpe_id=global_id)
except ManagedObject.DoesNotExist:
return None
| services/discovery/jobs/box/cpe.py | 3,285 | CPE check
@todo: Remove stale CPE
--------------------------------------------------------------------- CPE check --------------------------------------------------------------------- Copyright (C) 2007-2019 The NOC Project See LICENSE for details --------------------------------------------------------------------- Python modules NOC modules | 346 | en | 0.128131 |
"""The tests for the Entity component helper."""
# pylint: disable=protected-access
import asyncio
from collections import OrderedDict
import logging
import unittest
from unittest.mock import patch, Mock
from datetime import timedelta
import pytest
import homeassistant.core as ha
import homeassistant.loader as loader
from homeassistant.exceptions import PlatformNotReady
from homeassistant.components import group
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.setup import setup_component, async_setup_component
from homeassistant.helpers import discovery
import homeassistant.util.dt as dt_util
from tests.common import (
get_test_home_assistant, MockPlatform, MockModule, mock_coro,
async_fire_time_changed, MockEntity, MockConfigEntry,
mock_entity_platform, mock_integration)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "test_domain"
class TestHelpersEntityComponent(unittest.TestCase):
"""Test homeassistant.helpers.entity_component module."""
def setUp(self): # pylint: disable=invalid-name
"""Initialize a test Home Assistant instance."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Clean up the test Home Assistant instance."""
self.hass.stop()
def test_setting_up_group(self):
"""Set up the setting of a group."""
setup_component(self.hass, 'group', {'group': {}})
component = EntityComponent(_LOGGER, DOMAIN, self.hass,
group_name='everyone')
# No group after setup
assert len(self.hass.states.entity_ids()) == 0
component.add_entities([MockEntity()])
self.hass.block_till_done()
# group exists
assert len(self.hass.states.entity_ids()) == 2
assert self.hass.states.entity_ids('group') == ['group.everyone']
group = self.hass.states.get('group.everyone')
assert group.attributes.get('entity_id') == \
('test_domain.unnamed_device',)
# group extended
component.add_entities([MockEntity(name='goodbye')])
self.hass.block_till_done()
assert len(self.hass.states.entity_ids()) == 3
group = self.hass.states.get('group.everyone')
# Ordered in order of added to the group
assert group.attributes.get('entity_id') == \
('test_domain.goodbye', 'test_domain.unnamed_device')
def test_setup_loads_platforms(self):
"""Test the loading of the platforms."""
component_setup = Mock(return_value=True)
platform_setup = Mock(return_value=None)
mock_integration(self.hass,
MockModule('test_component', setup=component_setup))
# mock the dependencies
mock_integration(self.hass,
MockModule('mod2', dependencies=['test_component']))
mock_entity_platform(self.hass, 'test_domain.mod2',
MockPlatform(platform_setup))
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
assert not component_setup.called
assert not platform_setup.called
component.setup({
DOMAIN: {
'platform': 'mod2',
}
})
self.hass.block_till_done()
assert component_setup.called
assert platform_setup.called
def test_setup_recovers_when_setup_raises(self):
"""Test the setup if exceptions are happening."""
platform1_setup = Mock(side_effect=Exception('Broken'))
platform2_setup = Mock(return_value=None)
mock_entity_platform(self.hass, 'test_domain.mod1',
MockPlatform(platform1_setup))
mock_entity_platform(self.hass, 'test_domain.mod2',
MockPlatform(platform2_setup))
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
assert not platform1_setup.called
assert not platform2_setup.called
component.setup(OrderedDict([
(DOMAIN, {'platform': 'mod1'}),
("{} 2".format(DOMAIN), {'platform': 'non_exist'}),
("{} 3".format(DOMAIN), {'platform': 'mod2'}),
]))
self.hass.block_till_done()
assert platform1_setup.called
assert platform2_setup.called
@patch('homeassistant.helpers.entity_component.EntityComponent'
'._async_setup_platform', return_value=mock_coro())
@patch('homeassistant.setup.async_setup_component',
return_value=mock_coro(True))
def test_setup_does_discovery(self, mock_setup_component, mock_setup):
"""Test setup for discovery."""
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
component.setup({})
discovery.load_platform(self.hass, DOMAIN, 'platform_test',
{'msg': 'discovery_info'}, {DOMAIN: {}})
self.hass.block_till_done()
assert mock_setup.called
assert ('platform_test', {}, {'msg': 'discovery_info'}) == \
mock_setup.call_args[0]
@patch('homeassistant.helpers.entity_platform.'
'async_track_time_interval')
def test_set_scan_interval_via_config(self, mock_track):
"""Test the setting of the scan interval via configuration."""
def platform_setup(hass, config, add_entities, discovery_info=None):
"""Test the platform setup."""
add_entities([MockEntity(should_poll=True)])
mock_entity_platform(self.hass, 'test_domain.platform',
MockPlatform(platform_setup))
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
component.setup({
DOMAIN: {
'platform': 'platform',
'scan_interval': timedelta(seconds=30),
}
})
self.hass.block_till_done()
assert mock_track.called
assert timedelta(seconds=30) == mock_track.call_args[0][2]
def test_set_entity_namespace_via_config(self):
"""Test setting an entity namespace."""
def platform_setup(hass, config, add_entities, discovery_info=None):
"""Test the platform setup."""
add_entities([
MockEntity(name='beer'),
MockEntity(name=None),
])
platform = MockPlatform(platform_setup)
mock_entity_platform(self.hass, 'test_domain.platform', platform)
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
component.setup({
DOMAIN: {
'platform': 'platform',
'entity_namespace': 'yummy'
}
})
self.hass.block_till_done()
assert sorted(self.hass.states.entity_ids()) == \
['test_domain.yummy_beer', 'test_domain.yummy_unnamed_device']
@asyncio.coroutine
def test_extract_from_service_available_device(hass):
"""Test the extraction of entity from service and device is available."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2', available=False),
MockEntity(name='test_3'),
MockEntity(name='test_4', available=False),
])
call_1 = ha.ServiceCall('test', 'service')
assert ['test_domain.test_1', 'test_domain.test_3'] == \
sorted(ent.entity_id for ent in
(yield from component.async_extract_from_service(call_1)))
call_2 = ha.ServiceCall('test', 'service', data={
'entity_id': ['test_domain.test_3', 'test_domain.test_4'],
})
assert ['test_domain.test_3'] == \
sorted(ent.entity_id for ent in
(yield from component.async_extract_from_service(call_2)))
@asyncio.coroutine
def test_platform_not_ready(hass):
"""Test that we retry when platform not ready."""
platform1_setup = Mock(side_effect=[PlatformNotReady, PlatformNotReady,
None])
loader.set_component(hass, 'mod1',
MockModule('mod1'))
loader.set_component(hass, 'mod1.test_domain',
MockPlatform(platform1_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_setup({
DOMAIN: {
'platform': 'mod1'
}
})
assert len(platform1_setup.mock_calls) == 1
assert 'test_domain.mod1' not in hass.config.components
utcnow = dt_util.utcnow()
with patch('homeassistant.util.dt.utcnow', return_value=utcnow):
# Should not trigger attempt 2
async_fire_time_changed(hass, utcnow + timedelta(seconds=29))
yield from hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 1
# Should trigger attempt 2
async_fire_time_changed(hass, utcnow + timedelta(seconds=30))
yield from hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 2
assert 'test_domain.mod1' not in hass.config.components
# This should not trigger attempt 3
async_fire_time_changed(hass, utcnow + timedelta(seconds=59))
yield from hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 2
# Trigger attempt 3, which succeeds
async_fire_time_changed(hass, utcnow + timedelta(seconds=60))
yield from hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 3
assert 'test_domain.mod1' in hass.config.components
@asyncio.coroutine
def test_extract_from_service_returns_all_if_no_entity_id(hass):
"""Test the extraction of everything from service."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service')
assert ['test_domain.test_1', 'test_domain.test_2'] == \
sorted(ent.entity_id for ent in
(yield from component.async_extract_from_service(call)))
@asyncio.coroutine
def test_extract_from_service_filter_out_non_existing_entities(hass):
"""Test the extraction of non existing entities from service."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service', {
'entity_id': ['test_domain.test_2', 'test_domain.non_exist']
})
assert ['test_domain.test_2'] == \
[ent.entity_id for ent
in (yield from component.async_extract_from_service(call))]
@asyncio.coroutine
def test_extract_from_service_no_group_expand(hass):
"""Test not expanding a group."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
test_group = yield from group.Group.async_create_group(
hass, 'test_group', ['light.Ceiling', 'light.Kitchen'])
yield from component.async_add_entities([test_group])
call = ha.ServiceCall('test', 'service', {
'entity_id': ['group.test_group']
})
extracted = yield from component.async_extract_from_service(
call, expand_group=False)
assert extracted == [test_group]
@asyncio.coroutine
def test_setup_dependencies_platform(hass):
"""Test we setup the dependencies of a platform.
We're explictely testing that we process dependencies even if a component
with the same name has already been loaded.
"""
loader.set_component(hass, 'test_component',
MockModule('test_component',
dependencies=['test_component2']))
loader.set_component(hass, 'test_component2',
MockModule('test_component2'))
loader.set_component(hass, 'test_component.test_domain', MockPlatform())
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_setup({
DOMAIN: {
'platform': 'test_component',
}
})
assert 'test_component' in hass.config.components
assert 'test_component2' in hass.config.components
assert 'test_domain.test_component' in hass.config.components
async def test_setup_entry(hass):
"""Test setup entry calls async_setup_entry on platform."""
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass, 'test_domain.entry_domain',
MockPlatform(async_setup_entry=mock_setup_entry,
scan_interval=timedelta(seconds=5)))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert await component.async_setup_entry(entry)
assert len(mock_setup_entry.mock_calls) == 1
p_hass, p_entry, p_add_entities = mock_setup_entry.mock_calls[0][1]
assert p_hass is hass
assert p_entry is entry
assert component._platforms[entry.entry_id].scan_interval == \
timedelta(seconds=5)
async def test_setup_entry_platform_not_exist(hass):
"""Test setup entry fails if platform doesnt exist."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='non_existing')
assert (await component.async_setup_entry(entry)) is False
async def test_setup_entry_fails_duplicate(hass):
"""Test we don't allow setting up a config entry twice."""
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass, 'test_domain.entry_domain',
MockPlatform(async_setup_entry=mock_setup_entry))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert await component.async_setup_entry(entry)
with pytest.raises(ValueError):
await component.async_setup_entry(entry)
async def test_unload_entry_resets_platform(hass):
"""Test unloading an entry removes all entities."""
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass, 'test_domain.entry_domain',
MockPlatform(async_setup_entry=mock_setup_entry))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert await component.async_setup_entry(entry)
assert len(mock_setup_entry.mock_calls) == 1
add_entities = mock_setup_entry.mock_calls[0][1][2]
add_entities([MockEntity()])
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 1
assert await component.async_unload_entry(entry)
assert len(hass.states.async_entity_ids()) == 0
async def test_unload_entry_fails_if_never_loaded(hass):
"""."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
with pytest.raises(ValueError):
await component.async_unload_entry(entry)
async def test_update_entity(hass):
"""Test that we can update an entity with the helper."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entity = MockEntity()
entity.async_update_ha_state = Mock(return_value=mock_coro())
await component.async_add_entities([entity])
# Called as part of async_add_entities
assert len(entity.async_update_ha_state.mock_calls) == 1
await hass.helpers.entity_component.async_update_entity(entity.entity_id)
assert len(entity.async_update_ha_state.mock_calls) == 2
assert entity.async_update_ha_state.mock_calls[-1][1][0] is True
async def test_set_service_race(hass):
"""Test race condition on setting service."""
exception = False
def async_loop_exception_handler(_, _2) -> None:
"""Handle all exception inside the core loop."""
nonlocal exception
exception = True
hass.loop.set_exception_handler(async_loop_exception_handler)
await async_setup_component(hass, 'group', {})
component = EntityComponent(_LOGGER, DOMAIN, hass, group_name='yo')
for i in range(2):
hass.async_create_task(component.async_add_entities([MockEntity()]))
await hass.async_block_till_done()
assert not exception
async def test_extract_all_omit_entity_id(hass, caplog):
"""Test extract all with None and *."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service')
assert ['test_domain.test_1', 'test_domain.test_2'] == \
sorted(ent.entity_id for ent in
await component.async_extract_from_service(call))
assert ('Not passing an entity ID to a service to target all entities is '
'deprecated') in caplog.text
async def test_extract_all_use_match_all(hass, caplog):
"""Test extract all with None and *."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service', {'entity_id': 'all'})
assert ['test_domain.test_1', 'test_domain.test_2'] == \
sorted(ent.entity_id for ent in
await component.async_extract_from_service(call))
assert ('Not passing an entity ID to a service to target all entities is '
'deprecated') not in caplog.text
| tests/helpers/test_entity_component.py | 17,404 | Test homeassistant.helpers.entity_component module.
Handle all exception inside the core loop.
Test the platform setup.
Test the platform setup.
Initialize a test Home Assistant instance.
Clean up the test Home Assistant instance.
Test the extraction of entity from service and device is available.
Test the extraction of non existing entities from service.
Test not expanding a group.
Test the extraction of everything from service.
Test that we retry when platform not ready.
Test setting an entity namespace.
Test the setting of the scan interval via configuration.
Set up the setting of a group.
Test we setup the dependencies of a platform.
We're explictely testing that we process dependencies even if a component
with the same name has already been loaded.
Test setup for discovery.
Test the loading of the platforms.
Test the setup if exceptions are happening.
The tests for the Entity component helper.
pylint: disable=protected-access pylint: disable=invalid-name pylint: disable=invalid-name No group after setup group exists group extended Ordered in order of added to the group mock the dependencies Should not trigger attempt 2 Should trigger attempt 2 This should not trigger attempt 3 Trigger attempt 3, which succeeds Called as part of async_add_entities | 1,274 | en | 0.838819 |
#!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class GetBillingAddressResponse:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'result': 'GetBillingAddressResult',
'status': 'str',
'error_message': 'str',
'composedOn': 'long'
}
self.result = None # GetBillingAddressResult
self.status = None # str
self.error_message = None # str
self.composedOn = None # long
| groupdocs/models/GetBillingAddressResponse.py | 1,169 | NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
!/usr/bin/env python GetBillingAddressResult str str long | 751 | en | 0.81734 |
# Generated by Django 3.0.4 on 2020-03-29 13:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('analyzer', '0004_auto_20200328_1750'),
]
operations = [
migrations.AddField(
model_name='diseasestats',
name='country',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.PROTECT, to='analyzer.Country'),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='diseasestats',
unique_together={('disease_season', 'country', 'stats_date')},
),
]
| analyzer/migrations/0005_auto_20200329_1308.py | 685 | Generated by Django 3.0.4 on 2020-03-29 13:08 | 45 | en | 0.628438 |
# encoding=utf8
# This is temporary fix to import module from parent folder
# It will be removed when package is published on PyPI
import sys
sys.path.append('../')
# End of fix
import random
from NiaPy.algorithms.basic import MultiStrategyDifferentialEvolution
from NiaPy.util import StoppingTask, OptimizationType
from NiaPy.benchmarks import Sphere
#we will run Differential Evolution for 5 independent runs
for i in range(5):
task = StoppingTask(D=10, nFES=1000, optType=OptimizationType.MINIMIZATION, benchmark=Sphere())
algo = MultiStrategyDifferentialEvolution(NP=50, F=0.5, CR=0.9)
best = algo.run(task=task)
print('%s -> %s' % (best[0].x, best[1]))
| examples/run_msde.py | 676 | encoding=utf8 This is temporary fix to import module from parent folder It will be removed when package is published on PyPI End of fixwe will run Differential Evolution for 5 independent runs | 192 | en | 0.857897 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.