hexsha
stringlengths
40
40
size
int64
1
1.03M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
239
max_stars_repo_name
stringlengths
5
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
239
max_issues_repo_name
stringlengths
5
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
239
max_forks_repo_name
stringlengths
5
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.03M
avg_line_length
float64
1
958k
max_line_length
int64
1
1.03M
alphanum_fraction
float64
0
1
ace7edbe2ea74279c4cde740fb2b933f3a565449
4,423
py
Python
model/layers/msr_blocks.py
Shank2358/NPMMR-Det
414d148ff2ba5edbe870a8dafb6336845fb9ffbb
[ "Apache-2.0" ]
27
2021-01-09T07:35:45.000Z
2022-02-06T03:18:54.000Z
model/layers/msr_blocks.py
Shank2358/NPMMR-Det
414d148ff2ba5edbe870a8dafb6336845fb9ffbb
[ "Apache-2.0" ]
4
2021-07-20T07:16:03.000Z
2022-03-29T14:22:59.000Z
model/layers/msr_blocks.py
Shank2358/NPMMR-Det
414d148ff2ba5edbe870a8dafb6336845fb9ffbb
[ "Apache-2.0" ]
1
2021-12-26T09:12:08.000Z
2021-12-26T09:12:08.000Z
import torch import torch.nn as nn from dropblock import DropBlock2D, LinearScheduler from ..layers.convolutions import Convolutional class MSR_Convset_L(nn.Module): def __init__(self, filters_in): super(MSR_Convset_L, self).__init__() self.__dw0 = Convolutional(filters_in=filters_in, filters_out=filters_in*2, kernel_size=3, stride=1, pad=1, norm="bn", activate="leaky") self.__pw0 = Convolutional(filters_in=filters_in*2, filters_out=filters_in, kernel_size=1, stride=1, pad=0, norm="bn", activate="leaky") self.__dw1 = Convolutional(filters_in=filters_in, filters_out=filters_in, kernel_size=3, stride=1, pad=2, dila=2, norm="bn", activate="leaky") self.__dw2 = Convolutional(filters_in=filters_in, filters_out=filters_in, kernel_size=3, stride=1, pad=4, dila=4, norm="bn", activate="leaky") self.__dw3 = Convolutional(filters_in=filters_in, filters_out=filters_in, kernel_size=3, stride=1, pad=6, dila=6, norm="bn", activate="leaky") self.__pw1 = Convolutional(filters_in=filters_in*4, filters_out=filters_in, kernel_size=1, stride=1, pad=0, norm="bn", activate="Mish") self.__drop = LinearScheduler(DropBlock2D(block_size=3, drop_prob=0.1), start_value=0., stop_value=0.1, nr_steps=5) def forward(self, x): dw0 = self.__dw0(x) dw0 = self.__drop(dw0) pw0 = self.__pw0(dw0) dw1 = self.__dw1(pw0) dw2 = self.__dw2(pw0)+dw1 dw3 = self.__dw3(pw0)+dw2 cat = torch.cat((pw0, dw1, dw2, dw3),1) pw1 = self.__pw1(cat) return pw1 class MSR_Convset_M(nn.Module): def __init__(self, filters_in): super(MSR_Convset_M, self).__init__() self.__dw0 = Convolutional(filters_in=filters_in, filters_out=filters_in*2, kernel_size=3, stride=1, pad=1, norm="bn", activate="leaky") self.__pw0 = Convolutional(filters_in=filters_in*2, filters_out=filters_in, kernel_size=1, stride=1, pad=0, norm="bn", activate="leaky") self.__dw1 = Convolutional(filters_in=filters_in, filters_out=filters_in, kernel_size=3, stride=1, pad=1, dila=1, norm="bn", activate="leaky") self.__dw2 = Convolutional(filters_in=filters_in, filters_out=filters_in, kernel_size=3, stride=1, pad=2, dila=2, norm="bn", activate="leaky") self.__pw1 = Convolutional(filters_in=filters_in*2, filters_out=filters_in, kernel_size=1, stride=1, pad=0, norm="bn", activate="Mish") self.__drop = LinearScheduler(DropBlock2D(block_size=3, drop_prob=0.1), start_value=0., stop_value=0.1, nr_steps=5) def forward(self, x): dw0 = self.__dw0(x) dw0 = self.__drop(dw0) pw0 = self.__pw0(dw0) dw1 = self.__dw1(pw0) dw2 = self.__dw2(pw0)+dw1 cat = torch.cat((dw1, dw2),1) pw1 = self.__pw1(cat) return pw1 class MSR_Convset_S(nn.Module): def __init__(self, filters_in): super(MSR_Convset_S, self).__init__() self.__dw0 = Convolutional(filters_in=filters_in, filters_out=filters_in*2, kernel_size=3, stride=1, pad=1, norm="bn", activate="leaky") self.__pw0 = Convolutional(filters_in=filters_in*2, filters_out=filters_in, kernel_size=1, stride=1, pad=0, norm="bn", activate="leaky") self.__dw1 = Convolutional(filters_in=filters_in, filters_out=filters_in*2, kernel_size=3, stride=1, pad=1, dila=1, norm="bn", activate="leaky") self.__pw1 = Convolutional(filters_in=filters_in*2, filters_out=filters_in, kernel_size=1, stride=1, pad=0, norm="bn", activate="leaky") self.__drop = LinearScheduler(DropBlock2D(block_size=3, drop_prob=0.1), start_value=0., stop_value=0.1, nr_steps=5) def forward(self, x): dw0 = self.__dw0(x) dw0 = self.__drop(dw0) pw0 = self.__pw0(dw0) dw1 = self.__dw1(pw0) pw1 = self.__pw1(dw1) return pw1
55.2875
123
0.595297
ace7ee9b76dafb8e1cfec25b96e093eaeac8e593
5,345
py
Python
nuage_tempest_plugin/tests/api/baremetal/baremetal_topology.py
nuagenetworks/nuage-tempest-plugin
ac1bfb0709c7bbaf04017af3050fb3ed1ad1324a
[ "Apache-1.1" ]
1
2021-01-03T01:47:51.000Z
2021-01-03T01:47:51.000Z
nuage_tempest_plugin/tests/api/baremetal/baremetal_topology.py
nuagenetworks/nuage-tempest-plugin
ac1bfb0709c7bbaf04017af3050fb3ed1ad1324a
[ "Apache-1.1" ]
null
null
null
nuage_tempest_plugin/tests/api/baremetal/baremetal_topology.py
nuagenetworks/nuage-tempest-plugin
ac1bfb0709c7bbaf04017af3050fb3ed1ad1324a
[ "Apache-1.1" ]
1
2020-10-16T12:04:39.000Z
2020-10-16T12:04:39.000Z
# Copyright 2017 NOKIA # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nuage_tempest_plugin.lib.utils import constants class BaremetalTopology(object): def __init__(self, vsd_client, network, subnet, router, port, security_group): self.vsd_client = vsd_client self.network = network self.subnet = subnet self.router = router self.normal_port = port self.baremetal_port = None self.security_group = security_group @property def vsd_vport_parent(self): if not getattr(self, '_vsd_vport_parent', False): self._vsd_vport_parent = self.vsd_client.get_global_resource( self.vsd_vport_parent_resource, filters='externalID', filter_value=self.subnet['id'])[0] return self._vsd_vport_parent @property def vsd_vport_parent_resource(self): if not getattr(self, '_vsd_vport_parent_resource', False): if self.router: self._vsd_vport_parent_resource = constants.SUBNETWORK else: self._vsd_vport_parent_resource = constants.L2_DOMAIN return self._vsd_vport_parent_resource @property def vsd_baremetal_vport(self): if not getattr(self, '_vsd_baremetal_vport', False): vsd_vports = self.vsd_client.get_vport( self.vsd_vport_parent_resource, self.vsd_vport_parent['ID'], filters='externalID', filter_value=self.baremetal_port['id']) self._vsd_baremetal_vport = vsd_vports[0] return self._vsd_baremetal_vport @property def vsd_domain(self): if not getattr(self, '_vsd_domain', False): if self.router: zone = self.vsd_client.get_global_resource( constants.ZONE + '/' + self.vsd_vport_parent['parentID'])[0] self._vsd_domain = self.vsd_client.get_global_resource( constants.DOMAIN + '/' + zone['parentID'])[0] else: self._vsd_domain = self.vsd_vport_parent return self._vsd_domain @property def vsd_domain_resource(self): if not getattr(self, '_vsd_domain_resource', False): if self.router: self._vsd_domain_resource = constants.DOMAIN else: self._vsd_domain_resource = constants.L2_DOMAIN return self._vsd_domain_resource @property def vsd_policygroups(self): return self.get_vsd_policygroups() def get_vsd_policygroups(self, force_read=False): if force_read or not getattr(self, '_vsd_policygroups', False): self._vsd_policygroups = self.vsd_client.get_policygroup( self.vsd_domain_resource, self.vsd_domain['ID']) return self._vsd_policygroups @property def vsd_baremetal_interface_resource(self): if not getattr(self, '_vsd_baremetal_interface_resource', False): if self.vsd_baremetal_vport['type'] == constants.VPORT_TYPE_HOST: self._vsd_baremetal_interface_resource = constants.HOST_IFACE else: self._vsd_baremetal_interface_resource = constants.BRIDGE_IFACE return self._vsd_baremetal_interface_resource @property def vsd_baremetal_interface(self): if not getattr(self, '_vsd_baremetal_interface', False): self._vsd_baremetal_interface = self.vsd_client.get_child_resource( constants.VPORT, self.vsd_baremetal_vport['ID'], self.vsd_baremetal_interface_resource)[0] return self._vsd_baremetal_interface @property def vsd_egress_acl_template(self): if not getattr(self, '_vsd_egress_acl_templates', False): self._vsd_egress_acl_templates = \ self.vsd_client.get_egressacl_template( self.vsd_domain_resource, self.vsd_domain['ID'])[0] return self._vsd_egress_acl_templates @property def vsd_egress_acl_entries(self): if not getattr(self, '_vsd_egress_acl_entries', False): self._vsd_egress_acl_entries = \ self.vsd_client.get_egressacl_entytemplate( constants.EGRESS_ACL_TEMPLATE, self.vsd_egress_acl_template['ID']) return self._vsd_egress_acl_entries @property def vsd_baremetal_dhcp_opts(self): if not getattr(self, '_vsd_baremetal_dhcp_opts', False): self._vsd_baremetal_dhcp_opts = self.vsd_client.get_dhcpoption( self.vsd_baremetal_interface_resource, self.vsd_baremetal_interface['ID']) return self._vsd_baremetal_dhcp_opts
39.88806
79
0.649392
ace7eec258a88cf696537ea345b8db3eea91373a
123
py
Python
app/api/helloworld.py
chrislaskey/kingdom
be82551824adadfc0c70e08b188eb45adae974c1
[ "MIT" ]
6
2015-02-17T23:40:42.000Z
2021-11-04T17:22:57.000Z
app/api/helloworld.py
chrislaskey/tree-tracker
23597af0fe3c58cd57622cb01b303ed7743dc0e1
[ "MIT" ]
null
null
null
app/api/helloworld.py
chrislaskey/tree-tracker
23597af0fe3c58cd57622cb01b303ed7743dc0e1
[ "MIT" ]
1
2020-04-20T05:50:02.000Z
2020-04-20T05:50:02.000Z
from flask.ext.restful import Resource class HelloWorld(Resource): def get(self): return {'hello': 'world'}
15.375
38
0.666667
ace7eedc9527c1ab92c183fcd789e535b624e25c
7,523
py
Python
dedal/multi_task.py
xxdreck/google-research
dac724bc2b9362d65c26747a8754504fe4c615f8
[ "Apache-2.0" ]
2
2022-01-21T18:15:34.000Z
2022-01-25T15:21:34.000Z
dedal/multi_task.py
xxdreck/google-research
dac724bc2b9362d65c26747a8754504fe4c615f8
[ "Apache-2.0" ]
110
2021-10-01T18:22:38.000Z
2021-12-27T22:08:31.000Z
dedal/multi_task.py
admariner/google-research
7cee4b22b925581d912e8d993625c180da2a5a4f
[ "Apache-2.0" ]
1
2021-06-28T23:13:58.000Z
2021-06-28T23:13:58.000Z
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generic Multi task architecture.""" import copy from typing import Any, Dict, List, Mapping, Sequence, Tuple, TypeVar, Generic import gin T = TypeVar('T') class NamedLists(dict, Generic[T]): """A generic architecture for multi tasks with potentially several levels.""" def __init__(self, layers): layers = {k: list(v) for k, v in layers.items()} super().__init__(layers) def __getattr__(self, attr): return self[attr] @property def levels(self): return list(self.values()) @property def size(self): return sum(len(x) for x in self.values()) def constant_copy(self, value): """Returns a copy of the structure with only the same value everywhere.""" return NamedLists( layers={k: [value for _ in v] for k, v in self.items()}) def copy(self): """Returns a copy of the NamedLists.""" return NamedLists(copy.deepcopy(super().copy())) def pack(self, values, default_value=None): """Packs the values in a NamedLists with the same structure as self.""" result = self.constant_copy(default_value) it = result.__iter__() for val in values: next(it) it._level[it._idx] = val # pylint: disable=protected-access return result def flatten(self, empty_value=None): result = {} for name, values in self.items(): for i, value in enumerate(values): result[f'{name}/{i}'] = value if not values: # special case for empty list to keep the structure. result[name + '/'] = empty_value return result @staticmethod def unflatten(values): """Unflatten a dict of values that have been previously flattened.""" result = dict() for name, value in values.items(): idx = name.rfind('/') key = name[:idx] if key not in result: result[key] = [] if idx != len(name) - 1: result[key].append(value) return NamedLists(result) class _Iterator: """Iterator on NamedLists.""" def __init__(self, container): self._level_iter = iter(container.values()) self._level = None self._idx = -1 def __next__(self): self._idx += 1 if self._level is None or self._idx >= len(self._level): self._level = next(self._level_iter) # Might raise StopIteration here. self._idx = -1 return self.__next__() return self._level[self._idx] def __iter__(self): return NamedLists._Iterator(self) @property def shape(self): return tuple(len(level) for level in self.levels) @gin.configurable class Backbone(NamedLists, Generic[T]): """A specific case of NamedList that is used in sequence alignments.""" def __init__(self, embeddings = (), alignments = ()): super().__init__(layers=dict(embeddings=embeddings, alignments=alignments)) @classmethod def constant_from_shape(cls, value, shape): return cls( embeddings=[value for _ in range(shape[0])], alignments=[value for _ in range(shape[1])]) @gin.configurable class SwitchNamedLists(NamedLists[int]): """Provides methods to merge N compatible `NamedLists`. A `SwitchNamedLists` instance is a `NamedLists[int]` with values in [0, N) whose structure matches that of the desired merged `NamedLists` and elements indicate from which of the N input `NamedLists` the corresponding output value should be taken. That is, `output.key[l] = inputs[self.key[l]].key[l]`, where `inputs` is a sequence of N `NamedLists`. The N input `NamedLists` are assumed to be compatible in the sense that they have the same keys and the total number of elements they contain equals the number of elements in the `SwitchSeqAlign` instance. That is, `self.size == sum(inputs_i.size for inputs_i in inputs)` must hold true. """ @property def n(self): """Returns the number of `NamedLists` being "switched over".""" return max(max(l) for l in self.values()) + 1 # Assumes elems in [0, n). def filter(self, inputs, i): """Removes elements from `NamedLists` not belonging to i-th input. Primarily used to remove "dummy" values e.g. from model output. Args: inputs: a `NamedLists` with structure identical to `self`. i: an int between 0 and N-1, both inclusive, where N is the number of `NamedLists` to be merged. Returns: A `NamedLists` defined as `output.key = [v for v, j in zip(inputs.key, self.key) if j == i]`. That is, for each key, only those elements in the list for which `self` takes value `i` at the matching position will be kept. """ flags = self.get_selector(i) layers = {} for k in self.keys(): layers[k] = [v for v, flag in zip(inputs[k], flags[k]) if flag] return NamedLists(layers) def merge(self, inputs): """Merges a sequence of N compatible `NamedLists`. Args: inputs: a sequence of N `NamedLists` with the same keys as `self` satisfying `self.size == sum(inputs_i.size for inputs_i in inputs)`. Returns: a `NamedLists` instance such that `output.key[l] = inputs[self.key[l]].key[l]` for each key in `self`. """ inputs = [list(inputs_i) for inputs_i in inputs] offsets = len(inputs) * [0] outputs = [] for i in list(self): # Needed to appease AutoGraph? outputs.append(inputs[i][offsets[i]]) offsets[i] += 1 return self.pack(outputs) def merge_flattened( self, inputs): """Merges a sequence of N compatible, flattened `NamedLists`. Args: inputs: a sequence of N `Mapping[str, T]` corresponding to N `NamedLists` that have been flattened. These must have the same keys as `self` and satisfy `self.size == sum(unflatten(inputs_i).size for inputs_i in inputs)`. Returns: a `NamedLists` instance such that `output.key[l] = inputs[self.key[l]].key[l]` for each key in `self`, flattened to `Mapping[str, T]`. """ return self.merge([Backbone.unflatten(m_i) for m_i in inputs]).flatten() def get_selector(self, i): """Returns `NamedLists` of bools flagging elements from i-th input. Args: i: an int between 0 and N - 1, both inclusive, where N is the number of `NamedLists` to be merged. Returns: a `NamedLists[bool]` such that `output.key[l] = self.key[l] == i`. """ return self.pack([j == i for j in self]) @gin.configurable class SwitchBackbone(SwitchNamedLists): """A specific case of SwitchNamedLists that is used in sequence alignments.""" def __init__(self, embeddings = (), alignments = ()): super().__init__(layers=dict(embeddings=embeddings, alignments=alignments)) @classmethod def constant_like(cls, container, value = 0): return cls( embeddings=[value for _ in container.embeddings], alignments=[value for _ in container.alignments])
32.149573
80
0.659444
ace7efcc3ffd582a7a73361e60dd06bf5ef52ea3
1,220
py
Python
examples/basics/plotting/colorbar.py
ghisvail/vispy
39d4a81db6d84f813bd23e76ff3d61bd4e6bf46f
[ "BSD-3-Clause" ]
2
2020-11-27T10:51:56.000Z
2020-12-28T20:39:14.000Z
examples/basics/plotting/colorbar.py
ghisvail/vispy
39d4a81db6d84f813bd23e76ff3d61bd4e6bf46f
[ "BSD-3-Clause" ]
2
2015-11-04T19:43:29.000Z
2015-11-19T04:26:29.000Z
examples/basics/plotting/colorbar.py
ghisvail/vispy
39d4a81db6d84f813bd23e76ff3d61bd4e6bf46f
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright (c) 2015, Vispy Development Team. # Distributed under the (new) BSD License. See LICENSE.txt for more info. # vispy: gallery 1 """ Plot different styles of ColorBar using vispy.plot """ from vispy import plot as vp import numpy as np # arg( e^(1/z) ) def exp_z_inv(x, y): z = np.complex(x, y) f = np.exp(1.0 / z) return np.angle(f, deg=True) # create a 2d grid whose elements are of exp_z_inv def gen_image(width, height): x_vals = np.linspace(-0.5, 0.5, width) y_vals = np.linspace(-0.5, 0.5, height) grid = np.meshgrid(x_vals, y_vals) v_fn = np.vectorize(exp_z_inv) return v_fn(*grid).astype(np.float) fig = vp.Fig(size=(800, 600), show=False) plot = fig[0, 0] plot.bgcolor = "#efefef" img = gen_image(500, 500) plot.image(img, cmap="hsl") plot.camera.set_range((100, 400), (100, 400)) positions = ["top", "bottom", "left", "right"] for position in positions: plot.colorbar(position=position, label="argument of e^(1/z)", clim=("0°", "180°"), cmap="hsl", border_width=1, border_color="#aeaeae") if __name__ == '__main__': fig.show(run=True)
24.897959
73
0.607377
ace7f0e27503d675e74527531563643349db91a3
7,802
py
Python
djangoproj/djangoapp/csc/nl/ja/cabocha_token.py
pbarton666/buzz_bot
9f44c66e8ecb10e231f70989421f164d7a55029a
[ "MIT" ]
null
null
null
djangoproj/djangoapp/csc/nl/ja/cabocha_token.py
pbarton666/buzz_bot
9f44c66e8ecb10e231f70989421f164d7a55029a
[ "MIT" ]
null
null
null
djangoproj/djangoapp/csc/nl/ja/cabocha_token.py
pbarton666/buzz_bot
9f44c66e8ecb10e231f70989421f164d7a55029a
[ "MIT" ]
null
null
null
#python-encoding: utf-8 from csc.nl.ja.debug import * from csc.nl.ja.util import * from csc.nl.ja.tree import * import MeCab import CaboCha import re class JaToken(JaTreeLeaf, JaLanguageNode): ''' Represents a single token inside a word/chunk/utterance This should only be called from JaUtterance, as it assumes that the text given has been cleaned properly. Right now, this class is essentially a tree parser for CaboCha ''' def __init__(self, cabocha_token): JaTreeLeaf.__init__(self) pos = [None, None, None, None] if cabocha_token.feature_list_size == 9: ( pos[0], # 品詞 # pos[1], # 品詞細分類1 # pos[2], # 品詞細分類2 # pos[3], # 品詞細分類3 # self.conj_form, # 活用形 # self.infl_type, # 活用型 # self.base_form, # 原形 # self.reading, # 読み # self.prounciation # 発音 # ) = [ cabocha_token.feature_list(i) for i in range(0, cabocha_token.feature_list_size) ] elif cabocha_token.feature_list_size == 7: ( pos[0], # 品詞 # pos[1], # 品詞細分類1 # pos[2], # 品詞細分類2 # pos[3], # 品詞細分類3 # self.base_form, # 原形 # self.infl_type, # 活用型 # self.reading # 読み # ) = [ cabocha_token.feature_list(i) for i in range(0, cabocha_token.feature_list_size) ] self.conj_form = '*' self.prounciation = '*' self.surface = clean_input(cabocha_token.normalized_surface) # Do NOT use .surface or CaboCha WILL crash # # Cleanup input # self.pos = pos[0] self.pos_string = ':'.join(filter(lambda x: x != '*', pos)) if self.base_form == '*': self.base_form = None if self.conj_form == '*': self.conj_form = None if self.infl_type == '*': self.infl_type = None if self.prounciation == '*': self.prounciation = None if self.reading == '*': self.reading = None def __str__(self): return self.surface def query_pos(self, string): ''' Internally used to search for terms in the original PoS string ''' string = ja_enc(string) return len(filter(None, [string == x for x in re.split(':', self.pos_string)])) > 0 dump_lines = JaDebug.dump_lines_token @shared_property def is_verb(self): ''' True if this is a verb, or any part of a verb Note: Will report true for any verb, aux. verb, or certain types of verb conjunctions Additionally check is_independant() and is_auxilliary_verb() for verb stems ''' return self.query_pos('動詞') or self.is_auxilliary_verb @shared_property def is_noun(self): ''' True if this is any kind of noun or noun affix Note: na adjectives will NOT return true for this test despite being classified as nouns by CaboCha ''' return self.query_pos('名詞') and not self.is_na_adjective @shared_property def is_adverb(self): ''' True if this is any kind of adverb or adverb affix ''' return self.query_pos('副詞') @shared_property def is_nai_adjective_stem(self): ''' True if this is a nai adjective stem ''' return self.is_noun and self.query_pos('ナイ形容詞語幹') @shared_property def is_auxilliary_verb(self): ''' True if this is a na adjective stem ''' return self.query_pos('助動詞') @shared_property def is_na_adjective(self): ''' True If this is a na adjective Note: All na adjectives are nouns, and the 'na' affix is not guaranteed ''' return self.query_pos('形容動詞語幹') @shared_property def is_i_adjective(self): ''' True if this is an i adjective ''' return self.query_pos('形容詞') @shared_property def is_adjective(self): ''' True if this is either na or i adjective or is a determinative ''' return self.is_i_adjective or self.is_na_adjective or self.is_determinative @shared_property def is_irregular(self): ''' True if this is a verb root or suffix with an irregular conjugation ''' return re.search('特殊', self.conj_form or '') != None @shared_property def is_determinative(self): ''' True if this word is a determinative (kono, sono, etc.) ''' return self.query_pos('連体詞') @shared_property def is_particle(self): ''' True if this is a particle. That is: ha (wa), ga, he (e), wo (o), ni, etc. Without context, sometimes these are misclassified (example: ha (wa) -> ha (leaf)) ''' return self.query_pos('助詞') @shared_property def is_number(self): ''' True if this is a number ''' return self.query_pos('数') @shared_property def is_counter(self): ''' True if this is an object counter ''' return self.query_pos('助数詞') @shared_property def is_unknown(self): ''' True if this word was classifed by MeCab ''' return self.query_pos('未知語') @shared_property def is_punctuation(self): ''' True if this word is punctuation ''' return self.query_pos('記号') @shared_property def is_stopword(self): ''' True if this word is a stopword ''' # Particle # if self.is_particle: return True if self.is_punctuation: return True if self.is_adverb: return True return False @shared_property def is_suffix(self): ''' True if this is a suffix ''' return self.query_pos('接尾') @shared_property def is_independant(self): ''' True if this is independant. This is NOT the same thing as not self.is_dependant() (usually occurs in conjunctive clauses) ''' return self.query_pos('自立') @shared_property def is_dependant(self): ''' True if this is dependant. This is NOT the same thing as not self.is_independant() ''' return self.query_pos('非自立') @shared_property def is_imperitive(self): ''' True if this is in imperitive (-na[kere]) form (most likely a verb root) ''' return self.query_pos('未然形') @shared_property def is_hypothetical(self): ''' True if this is in hypothetical (-kere) form (most likely a verb conjugate) ''' return self.query_pos('仮定形') @shared_property def is_conjunctive(self): ''' True if this is in conjuctive (-te) form (most likely a verb root) ''' return self.is_suru_conjunctive or self.is_te_conjunctive or self.infl_type == ja_enc('連用形') @shared_property def is_te_conjunctive(self): ''' True if this is te-conjuctive (adjective-suru or adjective-naru mostly) ''' return re.match(ja_enc('連用テ接続'), self.infl_type or '') != None @shared_property def is_suru_conjunctive(self): ''' True if this is in conjuctive noun-suru form ''' return self.query_pos('サ変接続') @shared_property def is_base_inflection(self): ''' True if this is in in base inflective form. ''' return self.infl_type == ja_enc('基本形') @shared_property def is_token(self): ''' Always True ''' return True @property def stem(self): ''' The stem of the word we're in (here to allow elegant recursion - real work is done in words) ''' return self.base_form or self.surface lemma_form = stem inflection = stem @shared_property def is_negative(self): return False
33.34188
114
0.593438
ace7f19d3e0db4be1d426d60b83408e8b959fca7
1,056
py
Python
problem_loader.py
robbiemu/Greedy-Algorithms-Minimum-Spanning-Trees-and-Dynamic-Programming
a43004642c0b7f7270ec3f7a141dd2b48db34947
[ "MIT" ]
null
null
null
problem_loader.py
robbiemu/Greedy-Algorithms-Minimum-Spanning-Trees-and-Dynamic-Programming
a43004642c0b7f7270ec3f7a141dd2b48db34947
[ "MIT" ]
null
null
null
problem_loader.py
robbiemu/Greedy-Algorithms-Minimum-Spanning-Trees-and-Dynamic-Programming
a43004642c0b7f7270ec3f7a141dd2b48db34947
[ "MIT" ]
null
null
null
import requests import pickle import logging def preprocess_data(data): return list(map(int, data.split())) class ProblemLoader(): def __init__(self, url, fname="100kint.p", preprocessor=preprocess_data): self.url = url self.preprocess_data = preprocessor self.fname = fname def fetch(self, clear=False): values = [] if clear: logging.debug("Ignoring any existing dataset!") logging.info("Preprocess new dataset") r = requests.get(self.url, allow_redirects=True) values = self.preprocess_data(r.content) pickle.dump(values, open(self.fname, "wb")) return values try: logging.info("Loading existing dataset") values = pickle.load(open(self.fname, "rb")) except: logging.debug("Failed to load existing dataset!") logging.info("Preprocess new dataset") r = requests.get(self.url, allow_redirects=True) values = self.preprocess_data(r.content) pickle.dump(values, open(self.fname, "wb")) return values
30.171429
75
0.655303
ace7f253a9adb5de6047d3323805b6b41d6c24da
736
py
Python
First_course/ex6_5.py
laetrid/learning
b28312c34db2118fb7d5691834b8f7e628117642
[ "Apache-2.0" ]
null
null
null
First_course/ex6_5.py
laetrid/learning
b28312c34db2118fb7d5691834b8f7e628117642
[ "Apache-2.0" ]
null
null
null
First_course/ex6_5.py
laetrid/learning
b28312c34db2118fb7d5691834b8f7e628117642
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python ''' 5. Write a program that prompts a user for an IP address, then checks if the IP address is valid, and then converts the IP address to binary (dotted decimal format). Re-use the functions created in exercises 3 and 4 ('import' the functions into your new program). ''' # Import functiosn - ip check, ip to bin and bin padding from ex6_3 import ip_checker from ex6_4 import ip2bin, bin_padding ip_valid = False while not ip_valid: ip_addr = raw_input('Please enter an IP address: ') ip_valid = ip_checker(ip_addr) ip_addr_bin = ip2bin(ip_addr) bin_padding(ip_addr_bin) print "" print "The IP address: %s is valid." % ip_addr print "In binary it is: %s." % '.'.join(ip_addr_bin) print "" # The END
28.307692
263
0.730978
ace7f2c04b677603dae640d14c68595717cdf97e
14,930
py
Python
tensorflow_datasets/image/open_images.py
thanhkaist/datasets
02da35c558ec8ea704e744a2008c5cecb2e7a0a1
[ "Apache-2.0" ]
2
2019-10-20T05:40:10.000Z
2019-10-31T17:25:52.000Z
tensorflow_datasets/image/open_images.py
thanhkaist/datasets
02da35c558ec8ea704e744a2008c5cecb2e7a0a1
[ "Apache-2.0" ]
null
null
null
tensorflow_datasets/image/open_images.py
thanhkaist/datasets
02da35c558ec8ea704e744a2008c5cecb2e7a0a1
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright 2019 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Open images datasets. https://storage.googleapis.com/openimages/web/index.html """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import functools import io import os from absl import logging import numpy as np import tensorflow as tf import tensorflow_datasets.public_api as tfds _DESCRIPTION = '''\ Open Images is a dataset of ~9M images that have been annotated with image-level labels and object bounding boxes. The training set of V4 contains 14.6M bounding boxes for 600 object classes on 1.74M images, making it the largest existing dataset with object location annotations. The boxes have been largely manually drawn by professional annotators to ensure accuracy and consistency. The images are very diverse and often contain complex scenes with several objects (8.4 per image on average). Moreover, the dataset is annotated with image-level labels spanning thousands of classes. ''' _CITATION = '''\ @article{OpenImages, author = {Alina Kuznetsova and Hassan Rom and Neil Alldrin and Jasper Uijlings and Ivan Krasin and Jordi Pont-Tuset and Shahab Kamali and Stefan Popov and Matteo Malloci and Tom Duerig and Vittorio Ferrari}, title = {The Open Images Dataset V4: Unified image classification, object detection, and visual relationship detection at scale}, year = {2018}, journal = {arXiv:1811.00982} } @article{OpenImages2, author = {Krasin, Ivan and Duerig, Tom and Alldrin, Neil and Ferrari, Vittorio and Abu-El-Haija, Sami and Kuznetsova, Alina and Rom, Hassan and Uijlings, Jasper and Popov, Stefan and Kamali, Shahab and Malloci, Matteo and Pont-Tuset, Jordi and Veit, Andreas and Belongie, Serge and Gomes, Victor and Gupta, Abhinav and Sun, Chen and Chechik, Gal and Cai, David and Feng, Zheyun and Narayanan, Dhyanesh and Murphy, Kevin}, title = {OpenImages: A public dataset for large-scale multi-label and multi-class image classification.}, journal = {Dataset available from https://storage.googleapis.com/openimages/web/index.html}, year={2017} } ''' # Reading from .tar.gz is slower than extracting the gz and then reading from # tar. We still read from the tar because it's faster to read fewer files on # many network based FS. # pylint: disable=line-too-long _URLS = { 'train_images': [tfds.download.Resource( # pylint:disable=g-complex-comprehension url='http://open-images-dataset.s3.amazonaws.com/tar/train_%s.tar.gz' % i_, extract_method=tfds.download.ExtractMethod.GZIP) for i_ in '0123456789abcdef'], 'test_images': tfds.download.Resource( url='http://open-images-dataset.s3.amazonaws.com/tar/test.tar.gz', extract_method=tfds.download.ExtractMethod.GZIP), 'validation_images': tfds.download.Resource( url='http://open-images-dataset.s3.amazonaws.com/tar/validation.tar.gz', extract_method=tfds.download.ExtractMethod.GZIP), 'train_human_labels': 'https://storage.googleapis.com/openimages/2018_04/train/train-annotations-human-imagelabels.csv', 'train_machine_labels': 'https://storage.googleapis.com/openimages/2018_04/train/train-annotations-machine-imagelabels.csv', 'test_human_labels': 'https://storage.googleapis.com/openimages/2018_04/test/test-annotations-human-imagelabels.csv', 'test_machine_labels': 'https://storage.googleapis.com/openimages/2018_04/test/test-annotations-machine-imagelabels.csv', 'validation_human_labels': 'https://storage.googleapis.com/openimages/2018_04/validation/validation-annotations-human-imagelabels.csv', 'validation_machine_labels': 'https://storage.googleapis.com/openimages/2018_04/validation/validation-annotations-machine-imagelabels.csv', 'train-annotations-bbox': 'https://storage.googleapis.com/openimages/2018_04/train/train-annotations-bbox.csv', 'test-annotations-bbox': 'https://storage.googleapis.com/openimages/2018_04/test/test-annotations-bbox.csv', 'validation-annotations-bbox': 'https://storage.googleapis.com/openimages/2018_04/validation/validation-annotations-bbox.csv', } # pylint: enable=line-too-long _Object = collections.namedtuple('Object', ['label', 'confidence', 'source']) _Bbox = collections.namedtuple('Bbox', [ 'label', 'source', 'bbox', 'is_occluded', 'is_truncated', 'is_group_of', 'is_depiction', 'is_inside']) IMAGE_LEVEL_SOURCES = [ 'verification', 'crowdsource-verification', # human labels 'machine', ] BBOX_SOURCES = [ 'freeform', 'xclick', # Manually drawn boxes. 'activemil', # Machine generated, human controlled. ] class OpenImagesV4Config(tfds.core.BuilderConfig): """BuilderConfig for OpenImagesV4.""" def __init__(self, target_pixels=None, **kwargs): """BuilderConfig for OpenImagesV4. Args: target_pixels: If given, rescale the images so that the number of pixels is roughly this value. **kwargs: keyword arguments forward to super. """ super(OpenImagesV4Config, self).__init__(**kwargs) self._target_pixels = target_pixels @property def target_pixels(self): return self._target_pixels class OpenImagesV4(tfds.core.GeneratorBasedBuilder): """Open Images v4.""" BUILDER_CONFIGS = [ OpenImagesV4Config( name='original', version='0.2.0', description='Images at their original resolution and quality.'), OpenImagesV4Config( name='300k', version='0.2.1', description='Images have roughly 300,000 pixels, at 72 JPEG quality.', target_pixels=300000), OpenImagesV4Config( name='200k', version='0.2.1', description='Images have roughly 200,000 pixels, at 72 JPEG quality.', target_pixels=200000) ] def _info(self): source_class_label = tfds.features.ClassLabel( names=IMAGE_LEVEL_SOURCES + BBOX_SOURCES) all_class_label = tfds.features.ClassLabel( names_file=tfds.core.get_tfds_path( os.path.join('image', 'open_images_classes_all.txt'))) trainable_class_label = tfds.features.ClassLabel( names_file=tfds.core.get_tfds_path( os.path.join('image', 'open_images_classes_trainable.txt'))) boxable_class_label = tfds.features.ClassLabel( names_file=tfds.core.get_tfds_path( os.path.join('image', 'open_images_classes_boxable.txt'))) return tfds.core.DatasetInfo( builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({ 'image': tfds.features.Image(), 'image/filename': tfds.features.Text(), # eg '226f0a1873b9bf8e.jpg' 'objects': tfds.features.Sequence({ 'label': all_class_label, # Original data is 0, .1, ..., 1. We use 0, 1, 2, ..., 10. 'confidence': tf.int32, 'source': source_class_label, }), 'objects_trainable': tfds.features.Sequence({ 'label': trainable_class_label, # Original data is 0, .1, ..., 1. We use 0, 1, 2, ..., 10. 'confidence': tf.int32, 'source': source_class_label, }), 'bobjects': tfds.features.Sequence({ 'label': boxable_class_label, 'source': source_class_label, 'bbox': tfds.features.BBoxFeature(), # Following values can be: 1 (true), 0 (false) and -1 (unknown). 'is_occluded': tf.int8, 'is_truncated': tf.int8, 'is_group_of': tf.int8, 'is_depiction': tf.int8, 'is_inside': tf.int8, }), }), urls=['https://storage.googleapis.com/openimages/web/index.html'], citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" paths = dl_manager.download_and_extract(_URLS) # Load labels from CSVs: def load(names): csv_positions = [0] * len(names) return functools.partial(_load_objects, [paths[name] for name in names], csv_positions) train_objects = load(['train_human_labels', 'train_machine_labels']) test_objects = load(['test_human_labels', 'test_machine_labels']) validation_objects = load(['validation_human_labels', 'validation_machine_labels']) def load_boxes(name): csv_positions = [0] return functools.partial(_load_bboxes, paths[name], csv_positions) train_bbox = load_boxes('train-annotations-bbox') test_bbox = load_boxes('test-annotations-bbox') validation_bbox = load_boxes('validation-annotations-bbox') return [ tfds.core.SplitGenerator( name=tfds.Split.TRAIN, num_shards=512, gen_kwargs=dict(archive_paths=paths['train_images'], objects_getter=train_objects, bboxes_getter=train_bbox, prefixes='0123456789abcdef'), ), tfds.core.SplitGenerator( name=tfds.Split.TEST, num_shards=36, gen_kwargs=dict(archive_paths=[paths['test_images']], objects_getter=test_objects, bboxes_getter=test_bbox), ), tfds.core.SplitGenerator( name=tfds.Split.VALIDATION, num_shards=12, gen_kwargs=dict(archive_paths=[paths['validation_images']], objects_getter=validation_objects, bboxes_getter=validation_bbox), ), ] def _generate_examples(self, archive_paths, objects_getter, bboxes_getter, prefixes=None): """Yields examples.""" trainable_classes = set( self.info.features['objects_trainable']['label'].names) for i, archive_path in enumerate(archive_paths): prefix = prefixes[i] if prefixes else None objects = objects_getter(prefix) bboxes = bboxes_getter(prefix) logging.info('Opening archive %s ...', archive_path) archive = tfds.download.iter_archive( archive_path, tfds.download.ExtractMethod.TAR_STREAM) for fpath, fobj in archive: fname = os.path.basename(fpath) image_id = int(os.path.splitext(fname)[0], 16) image_objects = [obj._asdict() for obj in objects.get(image_id, [])] image_bboxes = [bbox._asdict() for bbox in bboxes.get(image_id, [])] image_objects_trainable = [ obj for obj in image_objects if obj['label'] in trainable_classes ] yield { 'image': _resize_image_if_necessary( fobj, target_pixels=self.builder_config.target_pixels), 'image/filename': fname, 'objects': image_objects, 'objects_trainable': image_objects_trainable, 'bobjects': image_bboxes, } def _resize_image_if_necessary(image_fobj, target_pixels=None): """Resize an image to have (roughly) the given number of target pixels. Args: image_fobj: File object containing the original image. target_pixels: If given, number of pixels that the image must have. Returns: A file object. """ if target_pixels is None: return image_fobj cv2 = tfds.core.lazy_imports.cv2 # Decode image using OpenCV2. image = cv2.imdecode( np.fromstring(image_fobj.read(), dtype=np.uint8), flags=3) # Get image height and width. height, width, _ = image.shape actual_pixels = height * width if actual_pixels > target_pixels: factor = np.sqrt(target_pixels / actual_pixels) image = cv2.resize(image, dsize=None, fx=factor, fy=factor) # Encode the image with quality=72 and store it in a BytesIO object. _, buff = cv2.imencode('.jpg', image, [int(cv2.IMWRITE_JPEG_QUALITY), 72]) return io.BytesIO(buff.tostring()) def _load_objects(csv_paths, csv_positions, prefix): """Returns objects listed within given CSV files.""" logging.info('Loading CSVs %s from positions %s with prefix %s', csv_paths, csv_positions, prefix) objects = collections.defaultdict(list) for i, labels_path in enumerate(csv_paths): with tf.io.gfile.GFile(labels_path) as csv_f: if csv_positions[i] > 0: csv_f.seek(csv_positions[i]) else: csv_f.readline() # Drop headers reader = csv.reader(csv_f) for image_id, source, label, confidence in reader: if prefix and image_id[0] != prefix: break csv_positions[i] = csv_f.tell() image_id = int(image_id, 16) current_obj = _Object(label, int(float(confidence) * 10), source) objects[image_id].append(current_obj) return dict(objects) def _load_bboxes(csv_path, csv_positions, prefix): """Returns bounded boxes listed within given CSV file.""" logging.info('Loading CSVs %s from positions %s with prefix %s', csv_path, csv_positions, prefix) boxes = collections.defaultdict(list) with tf.io.gfile.GFile(csv_path) as csv_f: if csv_positions[0] > 0: csv_f.seek(csv_positions[0]) else: csv_f.readline() # Drop headers reader = csv.reader(csv_f) for (image_id, source, label, confidence, xmin, xmax, ymin, ymax, is_occluded, is_truncated, is_group_of, is_depiction, is_inside, ) in reader: if prefix and image_id[0] != prefix: break csv_positions[0] = csv_f.tell() image_id = int(image_id, 16) del confidence # always 1 in bounding boxes. current_row = _Bbox( label, source, tfds.features.BBox( float(ymin), float(xmin), float(ymax), float(xmax)), int(is_occluded), int(is_truncated), int(is_group_of), int(is_depiction), int(is_inside)) boxes[image_id].append(current_row) return dict(boxes)
39.813333
143
0.655794
ace7f2f30bbf628decc2dc7f89828619fa63cea6
3,221
py
Python
config/settings.py
cooksta120021/LsS-LmS
c053477a323b220f59c012e1729bdb6796cd1547
[ "MIT" ]
null
null
null
config/settings.py
cooksta120021/LsS-LmS
c053477a323b220f59c012e1729bdb6796cd1547
[ "MIT" ]
2
2022-02-06T16:15:46.000Z
2022-03-05T19:27:32.000Z
config/settings.py
cooksta120021/LsS-LmS
c053477a323b220f59c012e1729bdb6796cd1547
[ "MIT" ]
null
null
null
""" Django settings for config project. Generated by 'django-admin startproject' using Django 4.0.3. For more information on this file, see https://docs.djangoproject.com/en/4.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/4.0/ref/settings/ """ from pathlib import Path # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'django-insecure-ay*abjpgw@z7(^-v-&sp4=oycr)05pxzwj^6v)rir7t7tyf+jz' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'config.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'config.wsgi.application' # Database # https://docs.djangoproject.com/en/4.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/4.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/4.0/howto/static-files/ STATIC_URL = 'static/' # Default primary key field type # https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
25.975806
91
0.701645
ace7f3d123565af5813154602bc0d3f7e2fbad6e
746
py
Python
example_app/update_db.py
quanpower/flask-iotdashboard
e676e4edfc6014f16c761544deb6f86f2c6399c3
[ "MIT" ]
null
null
null
example_app/update_db.py
quanpower/flask-iotdashboard
e676e4edfc6014f16c761544deb6f86f2c6399c3
[ "MIT" ]
null
null
null
example_app/update_db.py
quanpower/flask-iotdashboard
e676e4edfc6014f16c761544deb6f86f2c6399c3
[ "MIT" ]
1
2018-12-03T07:05:43.000Z
2018-12-03T07:05:43.000Z
from socket import * import time import binascii import datetime # from bitstring import BitArray import sqlite3 import struct conn = sqlite3.connect('daq.db', timeout=5) print("Opened database successfully") cursor = conn.cursor() select_sql = '''SELECT * FROM channels;''' results = cursor.execute(select_sql) all_channels = results.fetchall() print("Table selected successfully") ai_range_list= [] for channel in all_channels: ai_range_list.append({'channel_name':channel[1], 'pmax':channel[2],'pmin':channel[3],'high_limit':channel[4],'low_limit':channel[5],'unit':channel[6]}) print(ai_range_list) # update_sql = "update channels set high_limit = 50, low_limit=10, unit=c where id =1" # cursor.execute(update_sql) conn.close()
23.3125
155
0.74933
ace7f46327014f63bd8a3f0dcc1fc132dd32ac13
4,059
py
Python
nova/cmd/dhcpbridge.py
bopopescu/nova_audit
1cd2901802f82d39411adfa04cf2f432ff3bf280
[ "Apache-2.0" ]
1
2020-02-21T19:19:11.000Z
2020-02-21T19:19:11.000Z
nova/cmd/dhcpbridge.py
bopopescu/nova_audit
1cd2901802f82d39411adfa04cf2f432ff3bf280
[ "Apache-2.0" ]
null
null
null
nova/cmd/dhcpbridge.py
bopopescu/nova_audit
1cd2901802f82d39411adfa04cf2f432ff3bf280
[ "Apache-2.0" ]
1
2020-07-24T09:15:58.000Z
2020-07-24T09:15:58.000Z
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handle lease database updates from DHCP servers. """ from __future__ import print_function import os import sys from oslo.config import cfg from nova import config from nova import context from nova import db from nova.network import rpcapi as network_rpcapi from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import rpc CONF = cfg.CONF CONF.import_opt('host', 'nova.netconf') CONF.import_opt('network_manager', 'nova.service') LOG = logging.getLogger(__name__) def add_lease(mac, ip_address): """Set the IP that was assigned by the DHCP server.""" api = network_rpcapi.NetworkAPI() api.lease_fixed_ip(context.get_admin_context(), ip_address, CONF.host) def old_lease(mac, ip_address): """Called when an old lease is recognized.""" # NOTE(vish): We assume we heard about this lease the first time. # If not, we will get it the next time the lease is # renewed. pass def del_lease(mac, ip_address): """Called when a lease expires.""" api = network_rpcapi.NetworkAPI() api.release_fixed_ip(context.get_admin_context(), ip_address, CONF.host) def init_leases(network_id): """Get the list of hosts for a network.""" ctxt = context.get_admin_context() network_ref = db.network_get(ctxt, network_id) network_manager = importutils.import_object(CONF.network_manager) return network_manager.get_dhcp_leases(ctxt, network_ref) def add_action_parsers(subparsers): parser = subparsers.add_parser('init') # NOTE(cfb): dnsmasq always passes mac, and ip. hostname # is passed if known. We don't care about # hostname, but argparse will complain if we # do not accept it. for action in ['add', 'del', 'old']: parser = subparsers.add_parser(action) parser.add_argument('mac') parser.add_argument('ip') parser.add_argument('hostname', nargs='?', default='') parser.set_defaults(func=globals()[action + '_lease']) CONF.register_cli_opt( cfg.SubCommandOpt('action', title='Action options', help='Available dhcpbridge options', handler=add_action_parsers)) def main(): """Parse environment and arguments and call the appropriate action.""" config.parse_args(sys.argv, default_config_files=jsonutils.loads(os.environ['CONFIG_FILE'])) logging.setup("nova") global LOG LOG = logging.getLogger('nova.dhcpbridge') if CONF.action.name in ['add', 'del', 'old']: msg = (_("Called '%(action)s' for mac '%(mac)s' with ip '%(ip)s'") % {"action": CONF.action.name, "mac": CONF.action.mac, "ip": CONF.action.ip}) LOG.debug(msg) CONF.action.func(CONF.action.mac, CONF.action.ip) else: try: network_id = int(os.environ.get('NETWORK_ID')) except TypeError: LOG.error(_("Environment variable 'NETWORK_ID' must be set.")) return(1) print(init_leases(network_id)) rpc.cleanup()
33
78
0.673811
ace7f4a1230c084fa37ad1863c28143501dd824a
106
py
Python
settings/Dataset_Check_Panel_settings.py
bopopescu/Lauecollect
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
[ "MIT" ]
null
null
null
settings/Dataset_Check_Panel_settings.py
bopopescu/Lauecollect
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
[ "MIT" ]
1
2019-10-22T21:28:31.000Z
2019-10-22T21:39:12.000Z
settings/Dataset_Check_Panel_settings.py
bopopescu/Lauecollect
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
[ "MIT" ]
2
2019-06-06T15:06:46.000Z
2020-07-20T02:03:22.000Z
report.properties = { 'Label': 'dataset.report', 'Enabled': 'True', } report.refresh_period = 5.0
17.666667
30
0.632075
ace7f65cac5257a12f1603c0667149ff4b3394b8
1,111
py
Python
main.py
doggo4242/log
48fceeb7383ba89155bc9ea2b54ba9befad502d8
[ "Apache-2.0" ]
1
2022-01-01T23:04:30.000Z
2022-01-01T23:04:30.000Z
main.py
doggo4242/log
48fceeb7383ba89155bc9ea2b54ba9befad502d8
[ "Apache-2.0" ]
null
null
null
main.py
doggo4242/log
48fceeb7383ba89155bc9ea2b54ba9befad502d8
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 ''' Copyright 2021 doggo4242 Development Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import discord from discord.ext import commands import pymongo import os from log_cogs import * intents = discord.Intents.default() intents.members = True intents.guilds = True bot = commands.Bot(command_prefix='l!',intents=intents) client = pymongo.MongoClient('mongodb://mongodb:27017/') file_db = client['file_db']['links'] bot.add_cog(Util(bot,client,file_db)) bot.add_cog(Management(bot)) bot.add_cog(Listeners(bot)) bot.add_cog(LogCommands(bot)) bot.run(os.getenv('LOG_TOKEN'))
30.027027
75
0.756976
ace7f6806150b5e88bd5f8238c9bf0343cb75f69
8,587
py
Python
experiments/ashvin/corl2019/debug/pusher2/offpolicy_ccrig_debug.py
Asap7772/rail-rl-franka-eval
4bf99072376828193d05b53cf83c7e8f4efbd3ba
[ "MIT" ]
null
null
null
experiments/ashvin/corl2019/debug/pusher2/offpolicy_ccrig_debug.py
Asap7772/rail-rl-franka-eval
4bf99072376828193d05b53cf83c7e8f4efbd3ba
[ "MIT" ]
null
null
null
experiments/ashvin/corl2019/debug/pusher2/offpolicy_ccrig_debug.py
Asap7772/rail-rl-franka-eval
4bf99072376828193d05b53cf83c7e8f4efbd3ba
[ "MIT" ]
null
null
null
import railrl.misc.hyperparameter as hyp from experiments.murtaza.multiworld.skew_fit.reacher.generate_uniform_dataset import generate_uniform_dataset_reacher from multiworld.envs.mujoco.cameras import sawyer_init_camera_zoomed_in, sawyer_pusher_camera_upright_v2 from railrl.launchers.launcher_util import run_experiment from railrl.torch.grill.launcher import * import railrl.torch.vae.vae_schedules as vae_schedules from railrl.torch.vae.conv_vae import imsize48_default_architecture, imsize48_default_architecture_with_more_hidden_layers from railrl.launchers.arglauncher import run_variants from railrl.torch.grill.cvae_experiments import ( grill_her_td3_offpolicy_online_vae_full_experiment, ) from multiworld.envs.pygame.multiobject_pygame_env import Multiobj2DWallEnv from multiworld.envs.mujoco.sawyer_xyz.sawyer_push_multiobj_subset import SawyerMultiobjectEnv from railrl.torch.vae.conditional_conv_vae import DeltaCVAE from railrl.torch.vae.vae_trainer import DeltaCVAETrainer from railrl.data_management.online_conditional_vae_replay_buffer import \ OnlineConditionalVaeRelabelingBuffer x_var = 0.2 x_low = -x_var x_high = x_var y_low = 0.5 y_high = 0.7 t = 0 if __name__ == "__main__": variant = dict( double_algo=False, online_vae_exploration=False, imsize=48, init_camera=sawyer_init_camera_zoomed_in, env_class=SawyerMultiobjectEnv, env_kwargs=dict( num_objects=1, object_meshes=None, fixed_start=True, num_scene_objects=[1], maxlen=0.1, action_repeat=1, puck_goal_low=(x_low + 0.01, y_low + 0.01), puck_goal_high=(x_high - 0.01, y_high - 0.01), hand_goal_low=(x_low + 3*t, y_low + t), hand_goal_high=(x_high - 3*t, y_high -t), mocap_low=(x_low + 2*t, y_low , 0.0), mocap_high=(x_high - 2*t, y_high, 0.5), object_low=(x_low + 0.01, y_low + 0.01, 0.02), object_high=(x_high - 0.01, y_high - 0.01, 0.02), use_textures=True, init_camera=sawyer_init_camera_zoomed_in, # preload_obj_dict=[ # dict(color2=(1, 0, 0)), # dict(color2=(0, 1, 0)), # dict(color2=(0, 0, 1)), # dict(color2=(1, .4, .7)), # dict(color2=(0, .4, .8)), # dict(color2=(.8, .8, 0)), # dict(color2=(1, .5, 0)), # dict(color2=(.4, 0, .4)), # dict(color2=(.4, .2, 0)), # dict(color2=(0, .4, .4)), # ], ), grill_variant=dict( save_video=True, custom_goal_sampler='replay_buffer', online_vae_trainer_kwargs=dict( beta=20, lr=0, ), save_video_period=50, qf_kwargs=dict( hidden_sizes=[400, 300], ), policy_kwargs=dict( hidden_sizes=[400, 300], ), vf_kwargs=dict( hidden_sizes=[400, 300], ), max_path_length=50, algo_kwargs=dict( batch_size=128, num_epochs=101, num_eval_steps_per_epoch=1000, num_expl_steps_per_train_loop=1000, num_trains_per_train_loop=500, min_num_steps_before_training=1000, vae_training_schedule=vae_schedules.never_train, oracle_data=False, vae_save_period=25, parallel_vae_train=False, dataset_path="/tmp/Multiobj2DWallEnv_N1020_sawyer_init_camera_zoomed_in_imsize48_random_oracle_split_0.npy", rl_offpolicy_num_training_steps=0, ), td3_trainer_kwargs=dict( discount=0.99, # min_num_steps_before_training=4000, reward_scale=1.0, # render=False, tau=1e-2, ), replay_buffer_class=OnlineConditionalVaeRelabelingBuffer, replay_buffer_kwargs=dict( start_skew_epoch=10, max_size=int(100000), fraction_goals_rollout_goals=0.2, fraction_goals_env_goals=0.5, exploration_rewards_type='None', vae_priority_type='vae_prob', priority_function_kwargs=dict( sampling_method='importance_sampling', decoder_distribution='gaussian_identity_variance', # decoder_distribution='bernoulli', num_latents_to_sample=10, ), power=-1, relabeling_goal_sampling_mode='vae_prior', save_decoded_to_internal_keys=False, ), exploration_goal_sampling_mode='vae_prior', evaluation_goal_sampling_mode='reset_of_env', normalize=False, render=False, exploration_noise=0.2, exploration_type='ou', training_mode='train', testing_mode='test', reward_params=dict( epsilon=0.05, ), observation_key='latent_observation', desired_goal_key='latent_desired_goal', vae_wrapped_env_kwargs=dict( sample_from_true_prior=True, ), algorithm='ONLINE-VAE-SAC-BERNOULLI', # vae_path="ashvin/corl2019/offpolicy/dcvae2/run0/id0/itr_800.pkl", ), train_vae_variant=dict( # representation_size=4, beta=10, beta_schedule_kwargs=dict( x_values=(0, 1500), y_values=(1, 50), ), num_epochs=10, dump_skew_debug_plots=False, # decoder_activation='gaussian', decoder_activation='sigmoid', use_linear_dynamics=False, generate_vae_dataset_kwargs=dict( N=1020, n_random_steps=51, test_p=.9, use_cached=False, show=False, oracle_dataset=True, oracle_dataset_using_set_to_goal=True, non_presampled_goal_img_is_garbage=False, random_rollout_data=True, conditional_vae_dataset=True, save_trajectories=True, enviorment_dataset=False, ), vae_trainer_class=DeltaCVAETrainer, vae_class=DeltaCVAE, vae_kwargs=dict( input_channels=3, architecture=imsize48_default_architecture_with_more_hidden_layers, decoder_distribution='gaussian_identity_variance', ), # TODO: why the redundancy? algo_kwargs=dict( start_skew_epoch=5000, is_auto_encoder=False, batch_size=32, lr=1e-3, skew_config=dict( method='vae_prob', power=0, ), skew_dataset=False, priority_function_kwargs=dict( decoder_distribution='gaussian_identity_variance', sampling_method='importance_sampling', # sampling_method='true_prior_sampling', num_latents_to_sample=10, ), use_parallel_dataloading=False, ), save_period=25, ), region="us-west-2", logger_variant=dict( tensorboard=True, ), slurm_variant=dict( timeout_min=48 * 60, cpus_per_task=10, gpus_per_node=1, ), ) search_space = { 'seedid': range(5), 'grill_variant.algo_kwargs.rl_offpolicy_num_training_steps': [10, ], # 'grill_variant.reward_params.type':['latent_bound'], #, 'latent_distance' 'train_vae_variant.latent_sizes': [(2, 4)], #(3 * objects, 3 * colors) # 'train_vae_variant.beta': [1], # 'train_vae_variant.generate_vae_dataset_kwargs.n_random_steps': [100] } sweeper = hyp.DeterministicHyperparameterSweeper( search_space, default_parameters=variant, ) variants = [] for variant in sweeper.iterate_hyperparameters(): variants.append(variant) run_variants(grill_her_td3_offpolicy_online_vae_full_experiment, variants, run_id=1)
37.995575
124
0.573891
ace7f6c57189a154de750e3dd6cfdbd7e204e294
611
py
Python
openpifpaf_hand/__init__.py
DuncanZauss/openpifpaf_hand
bea3529d46e859060681a4ba180a8e8cee6a3f7b
[ "MIT" ]
null
null
null
openpifpaf_hand/__init__.py
DuncanZauss/openpifpaf_hand
bea3529d46e859060681a4ba180a8e8cee6a3f7b
[ "MIT" ]
null
null
null
openpifpaf_hand/__init__.py
DuncanZauss/openpifpaf_hand
bea3529d46e859060681a4ba180a8e8cee6a3f7b
[ "MIT" ]
null
null
null
import openpifpaf from .freihand import Freihand from .rhd import RHD from .cifonly import CifOnly def register(): openpifpaf.DATAMODULES['freihand'] = Freihand openpifpaf.DATAMODULES['rhd'] = RHD openpifpaf.DECODERS.add(CifOnly) openpifpaf.CHECKPOINT_URLS['shufflenetv2k16-hand'] = 'https://github.com/DuncanZauss/' \ 'openpifpaf_assets/releases/download/v0.1.0/rhd_freihand_sk16.pkl.epoch600' openpifpaf.CHECKPOINT_URLS['shufflenetv2k16-wb-hand'] = 'https://github.com/DuncanZauss/' \ 'openpifpaf_assets/releases/download/v0.1.0/freihand_wholebody_sk16.pkl.epoch600'
38.1875
95
0.754501
ace7f7689f95202d2c7ee370343eec47066dbb76
21,765
py
Python
integration-tests/integration-test-script/world.py
althea-net/althea_rs
49e8e4886a9f40bda8b45afe88cd3862c2fc5c82
[ "Apache-2.0" ]
29
2019-06-28T10:58:28.000Z
2022-03-23T08:07:06.000Z
integration-tests/integration-test-script/world.py
althea-net/althea_rs
49e8e4886a9f40bda8b45afe88cd3862c2fc5c82
[ "Apache-2.0" ]
19
2019-06-23T12:26:14.000Z
2022-03-28T17:58:26.000Z
integration-tests/integration-test-script/world.py
althea-net/althea_rs
49e8e4886a9f40bda8b45afe88cd3862c2fc5c82
[ "Apache-2.0" ]
3
2019-08-09T16:06:54.000Z
2020-11-10T00:27:37.000Z
from pprint import pprint from termcolor import colored import errno import json import os import random import re import shlex import signal import subprocess import sys import time import toml from utils import cleanup from utils import exec_or_exit from utils import exec_no_exit from utils import prep_netns from utils import switch_binaries from utils import start_rita_exit from utils import start_rita from utils import start_babel from utils import get_rita_settings from utils import assert_test from utils import ip_to_num from utils import num_to_ip from utils import fuzzy_traffic_match from utils import fuzzy_match class World: def __init__(self): self.nodes = {} self.connections = {} self.bounty_id = None self.exit_id = None self.external = None def add_node(self, node): assert node.id not in self.nodes self.nodes[node.id] = node def add_exit_node(self, node): assert node.id not in self.nodes self.nodes[node.id] = node self.exit_id = node.id def add_external_node(self, node): assert node.id not in self.nodes self.nodes[node.id] = node self.external = node.id def add_connection(self, connection): connection.canonicalize() self.connections[(connection.a.id, connection.b.id)] = connection connection.a.add_neighbor(connection.b.id) connection.b.add_neighbor(connection.a.id) def to_ip(self, node): if self.exit_id == node.id: return "172.168.1.254" else: return "fd00::{}".format(node.id) def create(self, VERBOSE, COMPAT_LAYOUT, COMPAT_LAYOUTS, RITA, RITA_EXIT, DIR_A, DIR_B, RITA_A, RITA_EXIT_A, RITA_B, RITA_EXIT_B, NETWORK_LAB, BABELD, POSTGRES_DATABASE, POSTGRES_USER, POSTGRES_CONFIG, POSTGRES_BIN, INITDB_BIN, EXIT_NAMESPACE, EXIT_SETTINGS, dname): cleanup() # scale config modifies tests to reduce disk usage and disable # some infeasible tests to allow for larger numbers of nodes scale_configuration = len(self.nodes.items()) > 10 nodes = {} for id in self.nodes: nodes[str(id)] = {"ip": num_to_ip(id)} edges = [] for id, conn in self.connections.items(): edges.append({ "nodes": ["{}".format(conn.a.id), "{}".format(conn.b.id)], "->": "", "<-": "" }) network = {"nodes": nodes, "edges": edges} network_string = json.dumps(network) print("network topology: {}".format(network)) print(NETWORK_LAB) proc = subprocess.Popen( ['/bin/bash', NETWORK_LAB], stdin=subprocess.PIPE, universal_newlines=True) proc.stdin.write(network_string) proc.stdin.close() proc.wait() print("network-lab completed") for id in self.nodes: prep_netns(id) print("namespaces prepped") print("Starting postgres in exit namespace") if POSTGRES_DATABASE is not None: exec_or_exit("sudo ip netns exec {} sudo -u {} {} -D {} -c config_file={}".format( EXIT_NAMESPACE, POSTGRES_USER, POSTGRES_BIN, POSTGRES_DATABASE, POSTGRES_CONFIG), False) time.sleep(30) else: exec_no_exit("sudo ip netns exec {} sudo -u {} PGDATA=/var/lib/postgresql/data {}".format( EXIT_NAMESPACE, POSTGRES_USER, INITDB_BIN), True) exec_or_exit("sudo ip netns exec {} sudo -u {} PGDATA=/var/lib/postgresql/data {}".format( EXIT_NAMESPACE, POSTGRES_USER, POSTGRES_BIN), False) time.sleep(30) exec_no_exit("psql -c 'drop database test;' -U postgres", True) exec_no_exit("psql -c 'create database test;' -U postgres", True) print("Perform initial database migrations") exec_or_exit('sudo ip netns exec {} diesel migration run --database-url="postgres://postgres@localhost/test" --migration-dir=../exit_db/migrations'.format(EXIT_NAMESPACE)) # redo the migration so that we can run several times exec_or_exit('sudo ip netns exec {} diesel migration redo --database-url="postgres://postgres@localhost/test" --migration-dir=../exit_db/migrations'.format(EXIT_NAMESPACE)) print("starting babel") # if this is set to zero route checking won't work # but it will reduce logging pressure in large networks log = "1" scale = False if scale_configuration: log = "0" for id, node in self.nodes.items(): start_babel(node, log, scale_configuration, BABELD) print("babel started") (RITA, RITA_EXIT) = switch_binaries(self.exit_id, VERBOSE, RITA, RITA_EXIT, COMPAT_LAYOUT, COMPAT_LAYOUTS, RITA_A, RITA_EXIT_A, RITA_B, RITA_EXIT_B) start_rita_exit(self.nodes[self.exit_id], dname, RITA_EXIT) time.sleep(1) self.exit_price = get_rita_settings( self.exit_id)["exit_network"]["exit_price"] EXIT_SETTINGS["exits"]["exit_a"]["wg_public_key"] = get_rita_settings( self.exit_id)["exit_network"]["wg_public_key"] EXIT_SETTINGS["exits"]["exit_a"]["id"]["wg_public_key"] = get_rita_settings( self.exit_id)["exit_network"]["wg_public_key"] EXIT_SETTINGS["exits"]["exit_a"]["payment"] = {} EXIT_SETTINGS["exits"]["exit_a"]["payment"]["eth_address"] = get_rita_settings( self.exit_id)["payment"]["eth_address"] print("starting rita") log = "TRACE" # reduce logging in large configurations to keep disk pressure # from going insane if scale_configuration: log = "ERROR" for id, node in self.nodes.items(): if id != self.exit_id and id != self.external: (RITA, RITA_EXIT) = switch_binaries(id, VERBOSE, RITA, RITA_EXIT, COMPAT_LAYOUT, COMPAT_LAYOUTS, RITA_A, RITA_EXIT_A, RITA_B, RITA_EXIT_B) start_rita(node, dname, log, RITA, EXIT_SETTINGS) time.sleep(0.5 + random.random() / 2) # wait 0.5s - 1s print() print("rita started") def test_reach(self, node_from, node_to, PING6): ping = subprocess.Popen( ["ip", "netns", "exec", "netlab-{}".format(node_from.id), PING6, num_to_ip(node_to.id), "-c", "1"], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) output = ping.stdout.read().decode("utf-8") return "1 packets transmitted, 1 received, 0% packet loss" in output def test_exit_reach(self, node, exit_internal_ip): ping = subprocess.Popen( ["ip", "netns", "exec", "netlab-{}".format(node.id), "ping", "{}".format(exit_internal_ip), "-c", "1"], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) output = ping.stdout.read().decode("utf-8") return "1 packets transmitted, 1 received, 0% packet loss" in output def test_reach_all(self, PING6, verbose=True, global_fail=True): for i in self.nodes.values(): for j in self.nodes.values(): if not assert_test(self.test_reach(i, j, PING6), "Reachability " + "from node {} ({}) to {} ({})".format(i.id, i.revision, j.id, j.revision), verbose=verbose, global_fail=global_fail): return False return True def test_exit_reach_all(self, verbose=True, global_fail=True): exit_internal_ip = get_rita_settings( self.exit_id)["exit_network"]["own_internal_ip"] ret = True for node in self.nodes.values(): if node.id == self.exit_id: continue elif not assert_test(self.test_exit_reach(node, exit_internal_ip), "Exit Reachability " + "from node {} ({})".format(node.id, node.revision), verbose=verbose, global_fail=global_fail): ret = False if global_fail and not ret: exit(1) return ret def test_routes(self, all_routes, verbose=True, global_fail=True): """ Check the presence of all optimal routes. """ result = True # Caution: all_routes directly relies on the layout of the netlab mesh. # # The routes are organized into a dictionary with nodes as keys and # the expected routes as values: # all_routes = { # <where_from>: [ # (<where_to>, <price>, <next_hop>), # [...] # ], # [...] # } for node, routes in all_routes.items(): for route in routes: if node.id == route[0].id: continue desc = ("Optimal route from node {} ({}) " + "to {} ({}) with next-hop {} ({}) and price {}").format( node.id, node.revision, route[0].id, route[0].revision, route[2].id, route[2].revision, route[1]) result = result and assert_test(node.has_route(*route, verbose=verbose ), desc, verbose=verbose, global_fail=global_fail) return result def test_endpoints_all(self, VERBOSE): curl_args = "curl -sfg6 --retry 5 -m 60 " for node in self.nodes.values(): # We don't expect the exit to work the same as others if node.id == self.exit_id: # Exit-specific stuff continue print(colored("====== Endpoints for node {} ======".format(node.id), "green")) # /neighbors if VERBOSE: print(colored("Hitting /neighbors:", "green")) result = subprocess.Popen(shlex.split("ip netns exec " + "netlab-{} {} [::1]:4877/neighbors".format(node.id, curl_args)), stdout=subprocess.PIPE) assert_test(not result.wait(), "curl-ing /neighbors") stdout = result.stdout.read().decode('utf-8') try: print("Received neighbors:") if VERBOSE: neighbors = json.loads(stdout) pprint(neighbors) else: print(stdout) except ValueError as e: print('Unable to decode JSON {!r}: {}'.format(stdout, e)) assert_test(False, "Decoding the neighbors JSON") # /exits if VERBOSE: print(colored("Hitting /exits:", "green")) result = subprocess.Popen(shlex.split("ip netns exec " + "netlab-{} {} [::1]:4877/exits".format(node.id, curl_args)), stdout=subprocess.PIPE, stderr=subprocess.PIPE) assert_test(not result.wait(), "curl-ing /exits") stdout = result.stdout.read().decode('utf-8') try: print("Received exits:") if VERBOSE: exits = json.loads(stdout) pprint(exits) else: print(stdout) except ValueError as e: print('Unable to decode JSON {!r}: {}'.format(stdout, e)) assert_test(False, "Decoding the exits JSON") # /info if VERBOSE: print(colored("Hitting /info:", "green")) result = subprocess.Popen(shlex.split("ip netns exec " + "netlab-{} {} [::1]:4877/info".format(node.id, curl_args)), stdout=subprocess.PIPE, stderr=subprocess.PIPE) assert_test(not result.wait(), "curl-ing /info") stdout = result.stdout.read().decode('utf-8') try: print("Received info:") if VERBOSE: info = json.loads(stdout) pprint(info) else: print(stdout) except ValueError as e: print('Unable to decode JSON {!r}: {}'.format(stdout, e)) assert_test(False, "Decoding the info JSON") # /settings if VERBOSE: print(colored("Hitting /settings:", "green")) result = subprocess.Popen(shlex.split("ip netns exec " + "netlab-{} {} [::1]:4877/settings".format(node.id, curl_args)), stdout=subprocess.PIPE, stderr=subprocess.PIPE) assert_test(not result.wait(), "curl-ing /settings") stdout = result.stdout.read().decode('utf-8') try: print("Received settings:") if VERBOSE: settings = json.loads(stdout) pprint(settings) else: print(stdout) except ValueError as e: print('Unable to decode JSON {!r}: {}'.format(stdout, e)) assert_test(False, "Decoding the settings JSON") def get_debts(self): """Creates a nested dictionary of balances, for example balances[1][3] is the balance node 1 has for node 3""" status = True balances = {} n = 0 while True: ip = num_to_ip(n) status = subprocess.Popen( ["ip", "netns", "exec", "netlab-{}".format(n), "curl", "-s", "-g", "-6", "[::1]:4877/debts"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) status.wait() output = status.stdout.read().decode("utf-8") # not zero is a hack to deal with the fact that the 7 node format is # one indexed and the arbitrary node one is zero indexed if output == "" and n != 0: break elif output == "" and n == 0: n += 1 continue status = json.loads(output) balances[ip_to_num(ip)] = {} for i in status: peer_ip = i["identity"]["mesh_ip"] peer_debt = int(i["payment_details"]["debt"]) balances[ip_to_num(ip)][ip_to_num(peer_ip)] = peer_debt n += 1 return balances def gen_traffic(self, from_node, to_node, duration, speed): if from_node.id == self.exit_id: server = subprocess.Popen( ["ip", "netns", "exec", "netlab-{}".format(from_node.id), "iperf3", "-s", "-V"]) time.sleep(2) client = subprocess.Popen( ["ip", "netns", "exec", "netlab-{}".format(to_node.id), "iperf3", "-c", self.to_ip(from_node), "-V", "-u", "-t {}".format(duration), "-b {}M".format(speed), "-R", ]) else: server = subprocess.Popen( ["ip", "netns", "exec", "netlab-{}".format(to_node.id), "iperf3", "-s", "-V"]) time.sleep(2) client = subprocess.Popen( ["ip", "netns", "exec", "netlab-{}".format(from_node.id), "iperf3", "-c", self.to_ip(to_node), "-V", "-u", "-t {}".format(duration), "-b {}M".format(speed)]) client.wait() server.send_signal(signal.SIGKILL) server.wait() def test_traffic(self, traffic_test_pairs, time, speed): """Generates test traffic from and to the specified nodes, then ensure that all nodes agree""" for (from_node, to_node) in traffic_test_pairs: print("Test traffic...") self.gen_traffic(from_node, to_node, time, speed) def test_debts_values(self, test_traffic_pairs, time, speed, debts, all_routes, exit_id, exit_price): """Uses the traffic pairs and the provided time and speed to compute what every node should owe every other node""" intended_debts = {} # the data transferred by each test, remember some tests overlap in paths # prices are in wei/byte so this is in bytes expected_data_transfer = (time * speed * 1000000) / 8 for (from_node, to_node) in test_traffic_pairs: # exit fees are owed independently of routing to the destination if to_node.id == exit_id or from_node.id == exit_id: exit_node = self.get_node(to_node, from_node, exit_id) other_node = self.get_not_node(to_node, from_node, exit_id) self.init_pair(intended_debts, from_node, to_node) self.init_pair(intended_debts, to_node, from_node) # exit is owed intended_debts[exit_node][other_node] -= exit_price * \ expected_data_transfer # client owes intended_debts[other_node][exit_node] += exit_price * \ expected_data_transfer last_via = from_node via = from_node while True: (via, price) = self.get_best_route(all_routes, via, to_node) if via.id == to_node.id: break if last_via.id == exit_id: exit = from_node client = to_node self.init_pair(intended_debts, client, exit) self.init_pair(intended_debts, exit, client) intended_debts[exit][client] -= \ price * expected_data_transfer intended_debts[client][exit] += \ price * expected_data_transfer self.init_pair(intended_debts, last_via, via) self.init_pair(intended_debts, via, last_via) # we add what's owed to the first node, but now we must # follow the entire path adding smaller amounts each time intended_debts[last_via][via] += \ price * expected_data_transfer intended_debts[via][last_via] -= \ price * expected_data_transfer last_via = via for node in intended_debts.keys(): for owed in intended_debts[node].keys(): if node.id not in debts or owed.id not in debts[node.id]: print("Debts map is incomplete! {} Has a predicted debt of {} for {} but no actual debt".format( node.id, intended_debts[node][owed], owed.id)) continue if not fuzzy_match(debts[node.id][owed.id], intended_debts[node][owed]): print("{} has a predicted debt of {} for {} but actual debt is {} {:.2%} accurate".format( node.id, intended_debts[node][owed], owed.id, debts[node.id][owed.id], intended_debts[node][owed]/debts[node.id][owed.id])) # exit(1) def get_best_route(self, all_routes, from_node, target_node): """Very simple utility function to find routes""" best = 99999999 best_route = None for (to, price, via) in all_routes[from_node]: if price < best and to is target_node: best_route = via best = price if best_route is None: print("There's a problem with the provided all_routes values!") exit(1) return (best_route, best) def init_pair(self, d, a, b): """helper function to create zero entires for nested dicts""" if a not in d: d[a] = {} if b not in d[a]: d[a][b] = 0 def get_node(self, a, b, id): if a.id == id: return a elif b.id == id: return b else: exit(1) def get_not_node(self, a, b, id): if a.id != id: return a elif b.id != id: return b else: exit(1) def test_debts_reciprocal_matching(self, debts): """Tests that in a network nodes generally agree on debts, within a few percent this is done by making sure that debts[1][3] is within a few percent of debts[3][1]""" for node in debts.keys(): for node_to_compare in debts[node].keys(): if node_to_compare not in debts: print("{} is not in the debts list".format( node_to_compare)) continue elif node not in debts[node_to_compare]: print("Node {} has a debt for Node {} but not the other way around!".format( node, node_to_compare)) continue res = fuzzy_traffic_match( debts[node][node_to_compare], debts[node_to_compare][node]) if not res: print("Nodes {} and {} do not agree! {} has {} and {} has {}!".format( node, node_to_compare, node, debts[node][node_to_compare], node_to_compare, debts[node_to_compare][node])) # exit(1) print("All debts match their reciprocal!")
42.509766
270
0.537009
ace7f7b662dfcf2f8b7d7518680d629b2141d363
8,730
py
Python
keyboard_and_video.py
aescay/tello_experiments
498b68a35a543d77c67e25e8ad721d4aae83a94c
[ "MIT" ]
1
2020-08-16T21:54:41.000Z
2020-08-16T21:54:41.000Z
keyboard_and_video.py
aescay/tello_experiments
498b68a35a543d77c67e25e8ad721d4aae83a94c
[ "MIT" ]
null
null
null
keyboard_and_video.py
aescay/tello_experiments
498b68a35a543d77c67e25e8ad721d4aae83a94c
[ "MIT" ]
null
null
null
# Test script taken from the tellopy package experiments """ tellopy sample using keyboard and video player Requires mplayer to record/save video. Controls: - tab to lift off - WASD to move the drone - space/shift to ascend/descent slowly - Q/E to yaw slowly - arrow keys to ascend, descend, or yaw quickly - backspace to land, or P to palm-land - enter to take a picture - R to start recording video, R again to stop recording (video and photos will be saved to a timestamped file in ~/Pictures/) - Z to toggle camera zoom state (zoomed-in widescreen or high FOV 4:3) """ import time import sys import tellopy import pygame import pygame.display import pygame.key import pygame.locals import pygame.font import os import datetime from subprocess import Popen, PIPE # from tellopy import logger # log = tellopy.logger.Logger('TelloUI') prev_flight_data = None video_player = None video_recorder = None font = None wid = None date_fmt = '%Y-%m-%d_%H%M%S' def toggle_recording(drone, speed): global video_recorder global date_fmt if speed == 0: return if video_recorder: # already recording, so stop video_recorder.stdin.close() status_print('Video saved to %s' % video_recorder.video_filename) video_recorder = None return # start a new recording filename = '%s/Pictures/tello-%s.mp4' % (os.getenv('HOME'), datetime.datetime.now().strftime(date_fmt)) video_recorder = Popen([ 'mencoder', '-', '-vc', 'x264', '-fps', '30', '-ovc', 'copy', '-of', 'lavf', '-lavfopts', 'format=mp4', # '-ffourcc', 'avc1', # '-really-quiet', '-o', filename, ], stdin=PIPE) video_recorder.video_filename = filename status_print('Recording video to %s' % filename) def take_picture(drone, speed): if speed == 0: return drone.take_picture() def palm_land(drone, speed): if speed == 0: return drone.palm_land() def toggle_zoom(drone, speed): # In "video" mode the drone sends 1280x720 frames. # In "photo" mode it sends 2592x1936 (952x720) frames. # The video will always be centered in the window. # In photo mode, if we keep the window at 1280x720 that gives us ~160px on # each side for status information, which is ample. # Video mode is harder because then we need to abandon the 16:9 display size # if we want to put the HUD next to the video. if speed == 0: return drone.set_video_mode(not drone.zoom) pygame.display.get_surface().fill((0,0,0)) pygame.display.flip() controls = { 'w': 'forward', 's': 'backward', 'a': 'left', 'd': 'right', 'space': 'up', 'left shift': 'down', 'right shift': 'down', 'q': 'counter_clockwise', 'e': 'clockwise', # arrow keys for fast turns and altitude adjustments 'left': lambda drone, speed: drone.counter_clockwise(speed*2), 'right': lambda drone, speed: drone.clockwise(speed*2), 'up': lambda drone, speed: drone.up(speed*2), 'down': lambda drone, speed: drone.down(speed*2), 'tab': lambda drone, speed: drone.takeoff(), 'backspace': lambda drone, speed: drone.land(), 'p': palm_land, 'r': toggle_recording, 'z': toggle_zoom, 'enter': take_picture, 'return': take_picture, } class FlightDataDisplay(object): # previous flight data value and surface to overlay _value = None _surface = None # function (drone, data) => new value # default is lambda drone,data: getattr(data, self._key) _update = None def __init__(self, key, format, colour=(255,255,255), update=None): self._key = key self._format = format self._colour = colour if update: self._update = update else: self._update = lambda drone,data: getattr(data, self._key) def update(self, drone, data): new_value = self._update(drone, data) if self._value != new_value: self._value = new_value self._surface = font.render(self._format % (new_value,), True, self._colour) return self._surface def flight_data_mode(drone, *args): return (drone.zoom and "VID" or "PIC") def flight_data_recording(*args): return (video_recorder and "REC 00:00" or "") # TODO: duration of recording def update_hud(hud, drone, flight_data): (w,h) = (158,0) # width available on side of screen in 4:3 mode blits = [] for element in hud: surface = element.update(drone, flight_data) if surface is None: continue blits += [(surface, (0, h))] # w = max(w, surface.get_width()) h += surface.get_height() h += 64 # add some padding overlay = pygame.Surface((w, h), pygame.SRCALPHA) overlay.fill((0,0,0)) # remove for mplayer overlay mode for blit in blits: overlay.blit(*blit) pygame.display.get_surface().blit(overlay, (0,0)) pygame.display.update(overlay.get_rect()) def status_print(text): pygame.display.set_caption(text) hud = [ FlightDataDisplay('height', 'ALT %3d'), FlightDataDisplay('ground_speed', 'SPD %3d'), FlightDataDisplay('battery_percentage', 'BAT %3d%%'), FlightDataDisplay('wifi_strength', 'NET %3d%%'), FlightDataDisplay(None, 'CAM %s', update=flight_data_mode), FlightDataDisplay(None, '%s', colour=(255, 0, 0), update=flight_data_recording), ] def flightDataHandler(event, sender, data): global prev_flight_data text = str(data) if prev_flight_data != text: update_hud(hud, sender, data) prev_flight_data = text def videoFrameHandler(event, sender, data): global video_player global video_recorder if video_player is None: cmd = [ 'mplayer', '-fps', '35', '-really-quiet' ] if wid is not None: cmd = cmd + [ '-wid', str(wid) ] video_player = Popen(cmd + ['-'], stdin=PIPE) try: video_player.stdin.write(data) except IOError as err: status_print(str(err)) video_player = None try: if video_recorder: video_recorder.stdin.write(data) except IOError as err: status_print(str(err)) video_recorder = None def handleFileReceived(event, sender, data): global date_fmt # Create a file in ~/Pictures/ to receive image data from the drone. path = '%s/Pictures/tello-%s.jpeg' % ( os.getenv('HOME'), datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')) with open(path, 'wb') as fd: fd.write(data) status_print('Saved photo to %s' % path) def main(): pygame.init() pygame.display.init() pygame.display.set_mode((1280, 720)) pygame.font.init() global font font = pygame.font.SysFont("dejavusansmono", 32) global wid if 'window' in pygame.display.get_wm_info(): wid = pygame.display.get_wm_info()['window'] print("Tello video WID:", wid) drone = tellopy.Tello() drone.connect() drone.start_video() drone.subscribe(drone.EVENT_FLIGHT_DATA, flightDataHandler) drone.subscribe(drone.EVENT_VIDEO_FRAME, videoFrameHandler) drone.subscribe(drone.EVENT_FILE_RECEIVED, handleFileReceived) speed = 30 try: while 1: time.sleep(0.01) # loop with pygame.event.get() is too mush tight w/o some sleep for e in pygame.event.get(): # WASD for movement if e.type == pygame.locals.KEYDOWN: print('+' + pygame.key.name(e.key)) keyname = pygame.key.name(e.key) if keyname == 'escape': drone.quit() exit(0) if keyname in controls: key_handler = controls[keyname] if type(key_handler) == str: getattr(drone, key_handler)(speed) else: key_handler(drone, speed) elif e.type == pygame.locals.KEYUP: print('-' + pygame.key.name(e.key)) keyname = pygame.key.name(e.key) if keyname in controls: key_handler = controls[keyname] if type(key_handler) == str: getattr(drone, key_handler)(0) else: key_handler(drone, 0) except e: print(str(e)) finally: print('Shutting down connection to drone...') if video_recorder: toggle_recording(drone, 1) drone.quit() exit(1) if __name__ == '__main__': main()
31.861314
93
0.608133
ace7faa8a54bac5aab1b90359360338750448ecd
92,970
py
Python
theano/tensor/blas.py
JimmyRetza/Theano
72d83bce0d547d54ab3513bcba35c166979f7a6f
[ "BSD-3-Clause" ]
9
2018-10-29T20:25:25.000Z
2021-11-17T11:03:17.000Z
theano/tensor/blas.py
JimmyRetza/Theano
72d83bce0d547d54ab3513bcba35c166979f7a6f
[ "BSD-3-Clause" ]
null
null
null
theano/tensor/blas.py
JimmyRetza/Theano
72d83bce0d547d54ab3513bcba35c166979f7a6f
[ "BSD-3-Clause" ]
1
2018-04-06T08:31:11.000Z
2018-04-06T08:31:11.000Z
"""Ops and optimizations for using BLAS calls BLAS = Basic Linear Algebra Subroutines Learn more about BLAS here: http://www.netlib.org/blas/blast-forum/ The standard BLAS libraries implement what is called "legacy BLAS" in that document. This documentation describes Theano's BLAS optimization pipeline. Where there is a discrepancy between how things do work and how they *should* work, both aspects should be documented. There are four kinds of BLAS Ops in Theano: - Python implementations (this file) - SciPy-based (blas_scipy) - C-based (blas_c) - GPU-based (theano.gpuarray) Notes ----- Unfortunately (because it's confusing) this file currently contains Ops that contain both Python and C versions. I think it would be better to move the C implementations to blas_c so that this file is pure Python. -JB Ops === GEMM: Dot22, Dot22Scalar, GemmRelated, Gemm ------------------------------------------- The BLAS GEMM operation implements Z <- a X Y + b Z, where Z, X and Y are matrices, and a and b are scalars. Dot22 is a GEMM where a=1, b=0, and Z is allocated every time. Dot22Scalar is a GEMM where b=0 and Z is allocated every time. Gemm is a GEMM in all its generality. In the future we can refactor the GemmRelated, Gemm, Dot22 and Dot22Scalar Ops into a single Op. That new Op (Gemm2) is basically a normal Gemm, but with an additional configuration variable that says to ignore the input Z. Setting that configuration variable to True would make Gemm2 equivalent to the current Dot22 and Dot22Scalar. This would make the file a lot easier to read, and save a few hundred lines of library, to say nothing of testing and documentation. GEMV: Gemv ---------- The BLAS GEMV operation implements Z <- a X Y + b Z, where X is a matrix, Y, and Z are vectors, and a and b are scalars. GER: Ger -------- The BLAS GER operation implements Z <- a X' Y + Z, where X and Y are vectors, and matrix Z gets a rank-1 update. Other Notable BLAS-related Ops ------------------------------ SYRK is another useful special case of GEMM. Particularly SYRK preserves symmetry in the matrix that it updates. See how the linear-algebra module uses symmetry hints before implementing this Op, so that this Op is compatible with that system. Optimizations ============= The optimization pipeline works something like this: 1. identify dot22 from dot 2. identify gemm from dot22 3. identify dot22scalar from dot22 that are not gemm 4. specialize gemm to gemv where applicable 5. specialize gemm to ger where applicable 6. specialize dot22 -> gemv or ger where applicable :note: GEMM is the most canonical BLAS signature that we deal with so far, it would be good to turn most things into GEMM (dot, inner, outer, dot22, dot22scalar), and then to specialize from gemm to the various other L2 and L3 operations. Identify Dot22 -------------- Numpy's dot supports arguments that are of any rank, and we should support that too (just for compatibility). The BLAS optimizations work with Dot Ops whose inputs are each either vector or matrix. So the first part of the optimization pipeline is to transform qualifying Dot Ops to Dot22 Ops. Dot22 Ops may be transformed further, but they will get implemented by a BLAS call. More precisely, Dot nodes whose inputs are all vectors or matrices and whose inputs both have the same dtype, and whose dtype is float or complex, become Dot22. This is implemented in `local_dot_to_dot22`. Identify Gemm from Dot22 ------------------------ This is complicated, done in GemmOptimizer. Identify Dot22Scalar from Dot22 ------------------------------- Dot22 Ops that remain after the GemmOptimizer is done have not qualified as GEMM Ops. Still they might be scaled by a factor, in which case we use Dot22Scalar which is like Gemm, but without the b and the Z. In the future it would be good to merge this into the GemmOptimizer. Specialize Gemm to Gemv ----------------------- If arguments to GEMM are dimshuffled vectors, then we can use GEMV instead. This optimization is `local_gemm_to_gemv`. """ from __future__ import absolute_import, print_function, division import copy import logging import os import time import numpy as np import numpy.distutils try: import numpy.distutils.__config__ # noqa except ImportError: pass from six import iteritems from six.moves import reduce, xrange from theano import config from theano.gof import (utils, Op, view_roots, local_optimizer, Optimizer, InconsistencyError, toolbox, SequenceDB, EquilibriumOptimizer, Apply, ReplacementDidntRemovedError) from theano.gof.params_type import ParamsType from theano.gof.opt import inherit_stack_trace from theano.printing import pprint, FunctionPrinter, debugprint from theano.compile.mode import optdb import theano.scalar from theano.scalar import bool as bool_t from theano.tensor import basic as T from theano.tensor.blas_headers import blas_header_text from theano.tensor.blas_headers import blas_header_version from theano.tensor.opt import in2out, local_dimshuffle_lift from theano.tensor.type import values_eq_approx_remove_inf_nan _logger = logging.getLogger('theano.tensor.blas') try: import scipy.linalg.blas have_fblas = True try: fblas = scipy.linalg.blas.fblas except AttributeError: # A change merged in Scipy development version on 2012-12-02 replaced # `scipy.linalg.blas.fblas` with `scipy.linalg.blas`. # See http://github.com/scipy/scipy/pull/358 fblas = scipy.linalg.blas _blas_gemv_fns = {np.dtype('float32'): fblas.sgemv, np.dtype('float64'): fblas.dgemv, np.dtype('complex64'): fblas.cgemv, np.dtype('complex128'): fblas.zgemv} except ImportError as e: have_fblas = False # This is used in Gemv and ScipyGer. We use CGemv and CGer # when theano.config.blas.ldflags is defined. So we don't need a # warning in that case. if not config.blas.ldflags: _logger.warning('Failed to import scipy.linalg.blas, and ' 'Theano flag blas.ldflags is empty. ' 'Falling back on slower implementations for ' 'dot(matrix, vector), dot(vector, matrix) and ' 'dot(vector, vector) (%s)', str(e)) # If check_init_y() == True we need to initialize y when beta == 0. def check_init_y(): if check_init_y._result is None: if not have_fblas: check_init_y._result = False y = float('NaN') * np.ones((2,)) x = np.ones((2,)) A = np.ones((2, 2)) gemv = _blas_gemv_fns[y.dtype] gemv(1.0, A.T, x, 0.0, y, overwrite_y=True, trans=True) check_init_y._result = np.isnan(y).any() return check_init_y._result check_init_y._result = None class Gemv(Op): """ expression is beta * y + alpha * A x A is matrix x, y are vectors alpha, beta are scalars output is a vector that can be inplace on y """ __props__ = ("inplace",) def __init__(self, inplace): self.inplace = inplace if inplace: self.destroy_map = {0: [0]} def __str__(self): if self.inplace: return '%s{inplace}' % self.__class__.__name__ else: return '%s{no_inplace}' % self.__class__.__name__ def make_node(self, y, alpha, A, x, beta): y = T.as_tensor_variable(y) x = T.as_tensor_variable(x) A = T.as_tensor_variable(A) alpha = T.as_tensor_variable(alpha) beta = T.as_tensor_variable(beta) if y.dtype != A.dtype or y.dtype != x.dtype: raise TypeError('Gemv requires matching dtypes', (y.dtype, A.dtype, x.dtype)) if A.ndim != 2: raise TypeError('gemv requires matrix for A', A.type) if x.ndim != 1: raise TypeError('gemv requires vector for x', x.type) if y.ndim != 1: raise TypeError('gemv requires vector for y', y.type) return Apply(self, [y, alpha, A, x, beta], [y.type()]) def perform(self, node, inputs, out_storage, params=None): y, alpha, A, x, beta = inputs if (have_fblas and y.shape[0] != 0 and x.shape[0] != 0 and y.dtype in _blas_gemv_fns): gemv = _blas_gemv_fns[y.dtype] if (A.shape[0] != y.shape[0] or A.shape[1] != x.shape[0]): raise ValueError( 'Incompatible shapes for gemv ' '(beta * y + alpha * dot(A, x)). y: %s, A: %s, x: %s ' % (y.shape, A.shape, x.shape)) if beta == 0 and check_init_y(): y.fill(0) # Here I suppose that A is in c order. If we don't make it # explicitly as fortran order, scipy 0.7.2 seam to create # a copy in fortran order instead of just reshaping it # and using the trans flag. # If A is already in fortran order, make it in c order and using the # trans flag don't seam to cause slowdown. # out_storage[0][0] = gemv(alpha, A, x, beta, y, # overwrite_y=self.inplace) out_storage[0][0] = gemv(alpha, A.T, x, beta, y, overwrite_y=self.inplace, trans=True) else: out = np.dot(A, x) if alpha != 1: out *= alpha if beta != 0: if beta != 1: out += beta * y else: out += y out_storage[0][0] = np.asarray(out, dtype=y.dtype) def infer_shape(self, node, input_shapes): return [input_shapes[0]] gemv_no_inplace = Gemv(inplace=False) gemv_inplace = Gemv(inplace=True) # For the user interface. Opt will make them inplace later gemv = gemv_no_inplace class Ger(Op): """ BLAS defines general rank-1 update GER as A <- A + alpha x y' for matrix A, scalar alpha, vectors x and y. This interface to GER allows non-destructive operation on A via the `destructive` argument to the constructor. """ __props__ = ("destructive",) def __init__(self, destructive): self.destructive = destructive if destructive: self.destroy_map = {0: [0]} def __str__(self): if self.destructive: return '%s{destructive}' % self.__class__.__name__ else: return '%s{non-destructive}' % self.__class__.__name__ def make_node(self, A, alpha, x, y): A = T.as_tensor_variable(A) y = T.as_tensor_variable(y) x = T.as_tensor_variable(x) alpha = T.as_tensor_variable(alpha) if not(A.dtype == x.dtype == y.dtype == alpha.dtype): raise TypeError('ger requires matching dtypes', (A.dtype, alpha.dtype, x.dtype, y.dtype)) if alpha.ndim != 0: raise TypeError('ger requires scalar alpha', alpha.type) if A.ndim != 2: raise TypeError('ger requires matrix for A', A.type) if x.ndim != 1: raise TypeError('ger requires vector for x', x.type) if y.ndim != 1: raise TypeError('ger requires vector for y', y.type) if x.dtype not in ('float32', 'float64', 'complex64', 'complex128'): raise TypeError('only float and complex types supported', x.dtype) return Apply(self, [A, alpha, x, y], [A.type()]) def perform(self, node, inp, out, params=None): cA, calpha, cx, cy = inp cZ, = out if self.destructive: A = cA else: A = cA.copy() if calpha != 1: A += calpha * np.outer(cx, cy) else: A += np.outer(cx, cy) cZ[0] = A def infer_shape(self, node, input_shapes): return [input_shapes[0]] ger = Ger(destructive=False) ger_destructive = Ger(destructive=True) def ldflags(libs=True, flags=False, libs_dir=False, include_dir=False): """Extract a list of compilation flags from config.blas.ldflags. Depending on the options, different type of flags will be kept. It returns a list of libraries against which an Op's object file should be linked to benefit from a BLAS implementation. Parameters ---------- libs : bool, optional Extract flags starting with "-l" (the default is True). libs_dir : bool, optional Extract flags starting with "-L" (the default is False). include_dir : bool, optional Extract flags starting with "-I" (the default is False). flags: bool, optional Extract all the other flags (the default is False). Returns ------- list of strings Extracted flags. """ ldflags_str = theano.config.blas.ldflags return _ldflags(ldflags_str=ldflags_str, libs=libs, flags=flags, libs_dir=libs_dir, include_dir=include_dir) @utils.memoize def _ldflags(ldflags_str, libs, flags, libs_dir, include_dir): """Extract list of compilation flags from a string. Depending on the options, different type of flags will be kept. Parameters ---------- ldflags_str : string The string to process. Typically, this will be the content of `theano.config.blas.ldflags`. libs : bool Extract flags starting with "-l". flags: bool Extract all the other flags. libs_dir: bool Extract flags starting with "-L". include_dir: bool Extract flags starting with "-I". Returns ------- list of strings Extracted flags. """ rval = [] if libs_dir: found_dyn = False dirs = [x[2:] for x in ldflags_str.split() if x.startswith('-L')] l = _ldflags(ldflags_str=ldflags_str, libs=True, flags=False, libs_dir=False, include_dir=False) for d in dirs: for f in os.listdir(d.strip('"')): if (f.endswith('.so') or f.endswith('.dylib') or f.endswith('.dll')): if any([f.find(ll) >= 0 for ll in l]): found_dyn = True if not found_dyn and dirs: _logger.warning( "We did not find a dynamic library in the " "library_dir of the library we use for blas. If you use " "ATLAS, make sure to compile it with dynamics library.") for t in ldflags_str.split(): # Remove extra quote. if (t.startswith("'") and t.endswith("'")) or (t.startswith('"') and t.endswith('"')): t = t[1:-1] try: t0, t1, t2 = t[0:3] assert t0 == '-' except Exception: raise ValueError('invalid token "%s" in ldflags_str: "%s"' % (t, ldflags_str)) if libs_dir and t1 == 'L': rval.append(t[2:]) elif include_dir and t1 == 'I': raise ValueError('Include dirs are not used for blas. We disable' ' this as this can hide other headers and this' ' is not wanted.', t) rval.append(t[2:]) elif libs and t1 == 'l': # example -lmkl rval.append(t[2:]) elif flags and t1 not in ['L', 'I', 'l']: # example -openmp rval.append(t) elif flags and t1 == 'L': # to find it when we load the compiled op if the env of the # used is not well configured. rval.append('-Wl,-rpath,' + t[2:]) return rval class GemmRelated(Op): """Base class for Gemm and Dot22. This class provides a kind of templated gemm Op. """ __props__ = () def c_support_code(self): # return cblas_header_text() mod_str = """ #ifndef MOD #define MOD % #endif static double time_time() // a time function like time.time() { struct timeval tv; gettimeofday(&tv, 0); return (double) tv.tv_sec + (double) tv.tv_usec / 1000000.0; } """ return blas_header_text() + mod_str def c_headers(self): # std.cout doesn't require the '%' symbol to print stuff... # so it works much better with python's string-substitution stuff. return ['<iostream>', '<time.h>', '<sys/time.h>'] def c_libraries(self): return ldflags() # code_cache_version is built by subclasses from # build_gemm_version def c_compile_args(self): return ldflags(libs=False, flags=True) def c_lib_dirs(self): return ldflags(libs=False, libs_dir=True) def c_header_dirs(self): return ldflags(libs=False, include_dir=True) declare_NS = """ int unit = 0; int type_num = PyArray_DESCR(%(_x)s)->type_num; int type_size = PyArray_DESCR(%(_x)s)->elsize; // in bytes npy_intp* Nx = PyArray_DIMS(%(_x)s); npy_intp* Ny = PyArray_DIMS(%(_y)s); npy_intp* Nz = 0; //PyArray_DIMS(%(_zout)s); npy_intp* Sx = PyArray_STRIDES(%(_x)s); npy_intp* Sy = PyArray_STRIDES(%(_y)s); npy_intp* Sz = 0; //PyArray_STRIDES(%(_zout)s); //strides for x, y, z in dimensions 0, 1 int sx_0, sx_1, sy_0, sy_1, sz_0, sz_1; """ # implement if you don't have an inplace props # setup_z_Nz_Sz = None # otherwise implement # setup_z_Nz_Sz_inplace = None # setup_z_Nz_Sz_outplace = None check_xyz_rank2 = """ if (PyArray_NDIM(%(_x)s) != 2) { PyErr_Format(PyExc_NotImplementedError, "rank(x) != 2. rank(x) is %%d.", PyArray_NDIM(%(_x)s)); %(fail)s; } if (PyArray_NDIM(%(_y)s) != 2) { PyErr_Format(PyExc_NotImplementedError, "rank(y) != 2. rank(y) is %%d.", PyArray_NDIM(%(_y)s)); %(fail)s; } if (%(_zout)s && PyArray_NDIM(%(_zout)s) != 2) { PyErr_Format(PyExc_NotImplementedError, "rank(z) != 2. rank(z) is %%d.", PyArray_NDIM(%(_zout)s)); %(fail)s; } """ check_xyz_double_or_float = """ if ((PyArray_DESCR(%(_x)s)->type_num != NPY_DOUBLE) && (PyArray_DESCR(%(_x)s)->type_num != NPY_FLOAT)) {PyErr_SetString(PyExc_NotImplementedError, "type(x) is not double or float"); %(fail)s;} if ((PyArray_DESCR(%(_y)s)->type_num != NPY_DOUBLE) && (PyArray_DESCR(%(_y)s)->type_num != NPY_FLOAT)) {PyErr_SetString(PyExc_NotImplementedError, "type(y) is not double or float"); %(fail)s;} if ((PyArray_DESCR(%(_zout)s)->type_num != NPY_DOUBLE) && (PyArray_DESCR(%(_zout)s)->type_num != NPY_FLOAT)) {PyErr_SetString(PyExc_NotImplementedError, "type(z) is not double or float"); %(fail)s;} if ((PyArray_DESCR(%(_x)s)->type_num != PyArray_DESCR(%(_y)s)->type_num) ||(PyArray_DESCR(%(_x)s)->type_num != PyArray_DESCR(%(_zout)s)->type_num)) { PyErr_SetString(PyExc_NotImplementedError, "type(x), type(y), type(z) are not all the same"); %(fail)s; } """ # it is not necessary that a or b have the same type as x,y,z check_ab_double_or_float = """ if ((PyArray_DESCR(%(_a)s)->type_num != NPY_DOUBLE) && (PyArray_DESCR(%(_a)s)->type_num != NPY_FLOAT)) {PyErr_SetString(PyExc_NotImplementedError, "type(a) is not double or float"); %(fail)s;} if ((PyArray_DESCR(%(_b)s)->type_num != NPY_DOUBLE) && (PyArray_DESCR(%(_b)s)->type_num != NPY_FLOAT)) {PyErr_SetString(PyExc_NotImplementedError, "type(b) is not double or float"); %(fail)s;} """ check_dims = """ if (Nx[0] != Nz[0]) { PyErr_Format(PyExc_ValueError, "Shape mismatch: x has %%ld rows but z has %%ld rows", (long int)Nx[0], (long int)Nz[0]); %(fail)s; } if (Nx[1] != Ny[0]) { PyErr_Format(PyExc_ValueError, "Shape mismatch: x has %%ld cols (and %%ld rows) but y has %%ld rows (and %%ld cols)", (long int)Nx[1], (long int)Nx[0], (long int)Ny[0], (long int)Ny[1]); %(fail)s; } if (Ny[1] != Nz[1]) { PyErr_Format(PyExc_ValueError, "Shape mismatch: y has %%ld cols but z has %%ld cols", (long int)Ny[1], (long int)Nz[1]); %(fail)s; } // We must not raise an error when Nx[1] == 0. This would disable cases // that numpy.dot accept. """ check_strides = """ /* If some matrices are not contiguous on either dimensions, or have invalid strides, copy their content into a contiguous one */ if ((Sx[0] < 1) || (Sx[1] < 1) || (Sx[0] MOD type_size) || (Sx[1] MOD type_size) || ((Sx[0] != type_size) && (Sx[1] != type_size))) { PyArrayObject * _x_copy = (PyArrayObject *) PyArray_Copy(%(_x)s); if (!_x_copy) %(fail)s Py_XDECREF(%(_x)s); %(_x)s = _x_copy; Sx = PyArray_STRIDES(%(_x)s); } if ((Sy[0] < 1) || (Sy[1] < 1) || (Sy[0] MOD type_size) || (Sy[1] MOD type_size) || ((Sy[0] != type_size) && (Sy[1] != type_size))) { PyArrayObject * _y_copy = (PyArrayObject *) PyArray_Copy(%(_y)s); if (!_y_copy) %(fail)s Py_XDECREF(%(_y)s); %(_y)s = _y_copy; Sy = PyArray_STRIDES(%(_y)s); } if ((Sz[0] < 1) || (Sz[1] < 1) || (Sz[0] MOD type_size) || (Sz[1] MOD type_size) || ((Sz[0] != type_size) && (Sz[1] != type_size))) { PyArrayObject * _z_copy = (PyArrayObject *) PyArray_Copy(%(_zout)s); if (!_z_copy) %(fail)s Py_XDECREF(%(_zout)s); %(_zout)s = _z_copy; Sz = PyArray_STRIDES(%(_zout)s); } """ encode_strides_in_unit = """ /* encode the stride structure of _x,_y,_zout into a single integer */ unit |= ((Sx[1] == type_size || Nx[1]==1) ? 0x0 : (Sx[0] == type_size || Nx[0]==1) ? 0x1 : 0x2) << 8; unit |= ((Sy[1] == type_size || Ny[1]==1) ? 0x0 : (Sy[0] == type_size || Ny[0]==1) ? 0x1 : 0x2) << 4; unit |= ((Sz[1] == type_size || Nz[1]==1) ? 0x0 : (Sz[0] == type_size || Nz[0]==1) ? 0x1 : 0x2) << 0; """ compute_strides = """ /* create appropriate strides for malformed matrices that are row or column * vectors, or empty matrices. * In that case, the value of the stride does not really matter, but * some versions of BLAS insist that: * - they are not smaller than the number of elements in the array, * - they are not 0. */ sx_0 = (Nx[0] > 1) ? Sx[0]/type_size : (Nx[1] + 1); sx_1 = (Nx[1] > 1) ? Sx[1]/type_size : (Nx[0] + 1); sy_0 = (Ny[0] > 1) ? Sy[0]/type_size : (Ny[1] + 1); sy_1 = (Ny[1] > 1) ? Sy[1]/type_size : (Ny[0] + 1); sz_0 = (Nz[0] > 1) ? Sz[0]/type_size : (Nz[1] + 1); sz_1 = (Nz[1] > 1) ? Sz[1]/type_size : (Nz[0] + 1); """ begin_switch_typenum = """ switch (type_num) { """ case_float = """ case NPY_FLOAT: { """ # case_float_ab_constants = None case_float_gemm = """ float* x = (float*)PyArray_DATA(%(_x)s); float* y = (float*)PyArray_DATA(%(_y)s); float* z = (float*)PyArray_DATA(%(_zout)s); char N = 'N'; char T = 'T'; int Nz0 = Nz[0], Nz1 = Nz[1], Nx1 = Nx[1]; //std::cerr << (unit/256) MOD 16 << (unit / 16) MOD 16 << unit MOD 16<< '\\n'; //double t0 = time_time(); switch(unit) { case 0x000: sgemm_(&N, &N, &Nz1, &Nz0, &Nx1, &a, y, &sy_0, x, &sx_0, &b, z, &sz_0); break; case 0x100: sgemm_(&N, &T, &Nz1, &Nz0, &Nx1, &a, y, &sy_0, x, &sx_1, &b, z, &sz_0); break; case 0x010: sgemm_(&T, &N, &Nz1, &Nz0, &Nx1, &a, y, &sy_1, x, &sx_0, &b, z, &sz_0); break; case 0x110: sgemm_(&T, &T, &Nz1, &Nz0, &Nx1, &a, y, &sy_1, x, &sx_1, &b, z, &sz_0); break; case 0x001: sgemm_(&T, &T, &Nz0, &Nz1, &Nx1, &a, x, &sx_0, y, &sy_0, &b, z, &sz_1); break; case 0x101: sgemm_(&N, &T, &Nz0, &Nz1, &Nx1, &a, x, &sx_1, y, &sy_0, &b, z, &sz_1); break; case 0x011: sgemm_(&T, &N, &Nz0, &Nz1, &Nx1, &a, x, &sx_0, y, &sy_1, &b, z, &sz_1); break; case 0x111: sgemm_(&N, &N, &Nz0, &Nz1, &Nx1, &a, x, &sx_1, y, &sy_1, &b, z, &sz_1); break; default: PyErr_SetString(PyExc_ValueError, "some matrix has no unit stride"); %(fail)s; }; //fprintf(stderr, "Calling sgemm %%i %%i %%i %%i took %%f\\n", unit, Nz1, Nz0, Nx1, time_time() - t0); """ case_double = """ } break; case NPY_DOUBLE: { """ # case_double_ab_constants = None case_double_gemm = """ double* x = (double*)PyArray_DATA(%(_x)s); double* y = (double*)PyArray_DATA(%(_y)s); double* z = (double*)PyArray_DATA(%(_zout)s); char N = 'N'; char T = 'T'; int Nz0 = Nz[0], Nz1 = Nz[1], Nx1 = Nx[1]; //std::cerr << (unit/256) MOD 16 << (unit / 16) MOD 16 << unit MOD 16<< '\\n'; //double t0 = time_time(); //fprintf(stderr, "unit=%%x N= %%i %%i %%i S = %%i %%i %%i %%i %%i %%i\\n", unit, //Nz1, Nz0, Nx1, //sy_0, sy_1, //sx_0, sx_1, //sz_0, sz_1 //); switch(unit) { case 0x000: dgemm_(&N, &N, &Nz1, &Nz0, &Nx1, &a, y, &sy_0, x, &sx_0, &b, z, &sz_0); break; case 0x100: dgemm_(&N, &T, &Nz1, &Nz0, &Nx1, &a, y, &sy_0, x, &sx_1, &b, z, &sz_0); break; case 0x010: dgemm_(&T, &N, &Nz1, &Nz0, &Nx1, &a, y, &sy_1, x, &sx_0, &b, z, &sz_0); break; case 0x110: dgemm_(&T, &T, &Nz1, &Nz0, &Nx1, &a, y, &sy_1, x, &sx_1, &b, z, &sz_0); break; case 0x001: dgemm_(&T, &T, &Nz0, &Nz1, &Nx1, &a, x, &sx_0, y, &sy_0, &b, z, &sz_1); break; case 0x101: dgemm_(&N, &T, &Nz0, &Nz1, &Nx1, &a, x, &sx_1, y, &sy_0, &b, z, &sz_1); break; case 0x011: dgemm_(&T, &N, &Nz0, &Nz1, &Nx1, &a, x, &sx_0, y, &sy_1, &b, z, &sz_1); break; case 0x111: dgemm_(&N, &N, &Nz0, &Nz1, &Nx1, &a, x, &sx_1, y, &sy_1, &b, z, &sz_1); break; default: PyErr_SetString(PyExc_ValueError, "some matrix has no unit stride"); %(fail)s; }; //fprintf(stderr, "Calling dgemm %%i %%i %%i %%i took %%f\\n", // unit, Nz1, Nz0, Nx1, time_time()- t0); """ end_switch_typenum = """ } break; } """ def build_gemm_call(self): if hasattr(self, 'inplace'): setup_z_Nz_Sz = "if(%%(params)s->inplace){%s}else{%s}" % ( self.setup_z_Nz_Sz_inplace, self.setup_z_Nz_Sz_outplace) else: setup_z_Nz_Sz = self.setup_z_Nz_Sz return reduce(str.__add__, ( self.declare_NS, self.check_xyz_rank2, setup_z_Nz_Sz, self.check_xyz_double_or_float, self.check_ab_double_or_float, self.check_dims, self.check_strides, self.encode_strides_in_unit, self.compute_strides, self.begin_switch_typenum, self.case_float, self.case_float_ab_constants, self.case_float_gemm, self.case_double, self.case_double_ab_constants, self.case_double_gemm, self.end_switch_typenum), '') def build_gemm_version(self): return (13, blas_header_version()) class Gemm(GemmRelated): """In-place version of matrix-matrix multiplication (with accumulation). When a and b are scalars and x, y, and z are matrices, then gemm(z,a,x,y,b) is similar to b*z + a*dot(x,y) The difference between the two is that the top form is destructive on z, whereas the bottom form is not. Gemm works in-place on the storage associated with z, and the L{Variable} returned by Gemm has a storage that will be aliased to the storage of the z argument. Because of this in-place computation, an L{Apply} of this op will destroy the L{Variable} z on which it operates. (See L{DestructiveOps} for an explanation of what destroying means in the context of theano graphs. See L{BlasLapackSupport} for more optimized linear algebra operations.) """ E_rank = 'gemm only works for rank 2' E_scalar = 'gemm requires scalar argument' E_z_uniq = 'argument z aliased to x or y' # TODO: justify / delete this E_mixed = 'gemm requires matching dtypes' E_float = 'gemm requires floating-point dtypes' __props__ = ('inplace',) params_type = ParamsType(inplace=bool_t,) check_input = False def __init__(self, inplace): self.inplace = inplace if self.inplace: self.destroy_map = {0: [0]} def __str__(self): if self.inplace: inplace_str = 'inplace' else: inplace_str = 'no_inplace' return '%s{%s}' % (self.__class__.__name__, inplace_str) def __setstate__(self, dct): self.__dict__.update(dct) # Correctly reload older pickles where destroy_map were not # saved if 'destroy_map' not in self.__dict__ and self.inplace: self.destroy_map = {0: [0]} def __getstate__(self): rval = self.__dict__.copy() # Do not serialize the setup code, it will be restored in __setstate__ # depending on the value of 'inplace' rval.pop('setup_z_Nz_Sz', None) return rval def make_node(self, *inputs): inputs = list(map(T.as_tensor_variable, inputs)) if len(inputs) != 5: raise TypeError( "Wrong number of inputs for %s (expected 5, got %s)" % (self, len(inputs))) z, a, x, y, b = inputs zr, xr, yr = [set(view_roots(i)) for i in (z, x, y)] # We want the gemm to be inplace. When this op is inplace, it # declare to be inplace only on z. So to make it safe, we # raise an error if z can be a view on x or y. # I don't know if Theano currently can support that case. As # this case don't happen in our code, I won't spent time # investigating this. So the assert is for safety. I also # think there is another mechanism that would prevent this, # but I don't what to modify old code and have chance to break # something. if self.inplace: if zr.intersection(xr): raise InconsistencyError(Gemm.E_z_uniq, (z, x)) if zr.intersection(yr): raise InconsistencyError(Gemm.E_z_uniq, (z, y)) if z.ndim != 2: raise TypeError(Gemm.E_rank, z) if a.ndim != 0: raise TypeError(Gemm.E_scalar, a) if x.ndim != 2: raise TypeError(Gemm.E_rank, x) if y.ndim != 2: raise TypeError(Gemm.E_rank, y) if b.ndim != 0: raise TypeError(Gemm.E_scalar, b) if not (z.dtype == a.dtype == x.dtype == y.dtype == b.dtype): raise TypeError(Gemm.E_mixed, (z.dtype, a.dtype, x.dtype, y.dtype, b.dtype)) if (not z.dtype.startswith('float') and not z.dtype.startswith('complex')): raise TypeError(Gemm.E_float, (z.dtype)) output = z.type() return Apply(self, inputs, [output]) def perform(self, node, inp, out, params): z, a, x, y, b = inp zout, = out assert a.shape == () assert b.shape == () if not params.inplace: z = z.copy() # the original z will not be changed if z.shape == (): z.itemset(z * a + b * np.dot(x, y)) zout[0] = z else: if b == 0.0: if a == 1.0: z[:] = np.dot(x, y) elif a == -1.0: z[:] = -np.dot(x, y) else: z[:] = a * np.dot(x, y) elif b == 1.0: if a == 1.0: z += np.dot(x, y) elif a == -1.0: z -= np.dot(x, y) else: z += a * np.dot(x, y) else: z *= b z += a * np.dot(x, y) zout[0] = z def infer_shape(self, node, input_shapes): return [input_shapes[0]] setup_z_Nz_Sz_inplace = """ if (%(_zout)s != %(_z)s) { if (%(_zout)s) { Py_DECREF(%(_zout)s); } %(_zout)s = %(_z)s; Py_INCREF(%(_zout)s); } Nz = PyArray_DIMS(%(_z)s); Sz = PyArray_STRIDES(%(_z)s); """ setup_z_Nz_Sz_outplace = """ if ((NULL == %(_zout)s) || (PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_z)s)[0]) || (PyArray_DIMS(%(_zout)s)[1] != PyArray_DIMS(%(_z)s)[1]) || (PyArray_STRIDES(%(_zout)s)[0] <= 0) || (PyArray_STRIDES(%(_zout)s)[1] <= 0) || (PyArray_STRIDES(%(_zout)s)[0] MOD type_size) || (PyArray_STRIDES(%(_zout)s)[1] MOD type_size) || ((PyArray_STRIDES(%(_zout)s)[0] != type_size) && (PyArray_STRIDES(%(_zout)s)[1] != type_size))) { Py_XDECREF(%(_zout)s); npy_intp dims[2]; dims[0] = PyArray_DIMS(%(_z)s)[0]; dims[1] = PyArray_DIMS(%(_z)s)[1]; %(_zout)s = (PyArrayObject*)PyArray_SimpleNew(2, dims, PyArray_TYPE(%(_z)s)); //fprintf(stderr, "Gemm Allocating %%i %%i\\n", dims[0], dims[1]); if(!%(_zout)s) { PyErr_SetString(PyExc_MemoryError, "failed to alloc gemm_no_inplace output"); %(fail)s } } Nz = PyArray_DIMS(%(_zout)s); Sz = PyArray_STRIDES(%(_zout)s); if (PyArray_DESCR(%(_zout)s)->type_num == NPY_FLOAT) { float * zoutdata = (float*)PyArray_DATA(%(_zout)s); int zoi = Sz[0] / sizeof(float); int zoj = Sz[1] / sizeof(float); const float * zdata = (float*)PyArray_DATA(%(_z)s); int zi = PyArray_STRIDES(%(_z)s)[0]/sizeof(float); int zj = PyArray_STRIDES(%(_z)s)[1]/sizeof(float); for (int i = 0; i < Nz[0]; ++i) { for (int j = 0; j < Nz[1]; ++j) { zoutdata[zoi*i + zoj*j] = zdata[zi*i + zj*j]; } } } else if (PyArray_DESCR(%(_zout)s)->type_num == NPY_DOUBLE) { double * zoutdata = (double*) PyArray_DATA(%(_zout)s); int zoi = Sz[0] / sizeof(double); int zoj = Sz[1] / sizeof(double); const double * zdata = (double*)PyArray_DATA(%(_z)s); int zi = PyArray_STRIDES(%(_z)s)[0]/sizeof(double); int zj = PyArray_STRIDES(%(_z)s)[1]/sizeof(double); for (int i = 0; i < Nz[0]; ++i) { for (int j = 0; j < Nz[1]; ++j) { zoutdata[zoi*i + zoj*j] = zdata[zi*i + zj*j]; } } } else { PyErr_SetString(PyExc_AssertionError, "neither float nor double dtype"); %(fail)s } """ case_float_ab_constants = """ #define REAL float float a = (PyArray_DESCR(%(_a)s)->type_num == NPY_FLOAT) ? (REAL)(((float*)PyArray_DATA(%(_a)s))[0]) : (REAL)(((double*)PyArray_DATA(%(_a)s))[0]); float b = (PyArray_DESCR(%(_b)s)->type_num == NPY_FLOAT) ? (REAL)(((float*)PyArray_DATA(%(_b)s))[0]) : (REAL)(((double*)PyArray_DATA(%(_b)s))[0]); #undef REAL """ case_double_ab_constants = """ #define REAL double double a = (PyArray_DESCR(%(_a)s)->type_num == NPY_FLOAT) ? (REAL)(((float*)PyArray_DATA(%(_a)s))[0]) : (REAL)(((double*)PyArray_DATA(%(_a)s))[0]); double b = (PyArray_DESCR(%(_b)s)->type_num == NPY_FLOAT) ? (REAL)(((float*)PyArray_DATA(%(_b)s))[0]) : (REAL)(((double*)PyArray_DATA(%(_b)s))[0]); #undef REAL """ def c_code(self, node, name, inp, out, sub): _z, _a, _x, _y, _b = inp _zout, = out if node.inputs[0].type.dtype.startswith('complex'): raise utils.MethodNotDefined('%s.c_code' % self.__class__.__name__) full_code = self.build_gemm_call() % dict(locals(), **sub) return full_code def c_code_cache_version(self): gv = self.build_gemm_version() if gv: return (6,) + gv else: return gv gemm_inplace = Gemm(inplace=True) gemm_no_inplace = Gemm(inplace=False) # For the user interface. Theano optimization will make them inplace gemm = gemm_no_inplace pprint.assign(gemm_inplace, FunctionPrinter('gemm_inplace')) pprint.assign(gemm_no_inplace, FunctionPrinter('gemm_no_inplace')) def res_is_a(node, op, maxclients=None): if maxclients is not None: retval = (len(node.clients) <= maxclients) else: retval = True return (node.owner and node.owner.op == op and retval) def _as_scalar(res, dtype=None): """Return None or a TensorVariable whose type is in T.float_scalar_types""" if dtype is None: dtype = config.floatX if np.all(res.type.broadcastable): while res.owner and isinstance(res.owner.op, T.DimShuffle): res = res.owner.inputs[0] # may still have some number of True's if res.type.broadcastable: rval = res.dimshuffle() else: rval = res if rval.type.dtype in theano.tensor.integer_dtypes: # We check that the upcast of res and dtype won't change dtype. # If dtype is float64, we will cast int64 to float64. # This is valid when res is a scalar used as input to a dot22 # as the cast of the scalar can be done before or after the dot22 # and this will give the same result. if theano.scalar.upcast(res.dtype, dtype) == dtype: return T.cast(rval, dtype) else: return None return rval def _is_real_matrix(res): return (res.type.dtype in ('float16', 'float32', 'float64') and res.type.ndim == 2 and res.type.broadcastable[0] is False and res.type.broadcastable[1] is False) # cope with tuple vs. list def _is_real_vector(res): return (res.type.dtype in ('float16', 'float32', 'float64') and res.type.ndim == 1 and res.type.broadcastable[0] is False) def _beta_L_plus_alpha_M(beta, L, alpha, M, recurse_flip=True): # print 'BETA L + ALPHA M', beta, L, alpha, M, recurse_flip # EXPRESSION: (beta * L) + (alpha * M) # we've already checked the client counts, now just make the type check. # if res_is_a(M, _dot22, 1): if M.owner and M.owner.op == _dot22: Ml, Mr = M.owner.inputs rval = [gemm_no_inplace(L, alpha, Ml, Mr, beta)] # print 'GEMM 0', rval, beta, L, alpha, M return rval, M # it also might be the case that there is a dimshuffle between the + # and the dot22. local_dot_to_dot22 in particular will put in such things. if (M.owner and isinstance(M.owner.op, T.DimShuffle) and M.owner.inputs[0].owner and isinstance(M.owner.inputs[0].owner.op, Dot22)): MM = M.owner.inputs[0] if M.owner.op.new_order == (0,): # it is making a column MM into a vector MMl, MMr = MM.owner.inputs g = gemm_no_inplace(L.dimshuffle(0, 'x'), alpha, MMl, MMr, beta) rval = [g.dimshuffle(0)] return rval, MM if M.owner.op.new_order == (1,): # it is making a row MM into a vector MMl, MMr = MM.owner.inputs g = gemm_no_inplace(L.dimshuffle('x', 0), alpha, MMl, MMr, beta) rval = [g.dimshuffle(1)] return rval, MM if len(M.owner.op.new_order) == 0: # it is making a row MM into a vector MMl, MMr = MM.owner.inputs g = gemm_no_inplace(L.dimshuffle('x', 'x'), alpha, MMl, MMr, beta) rval = [g.dimshuffle()] return rval, MM # this is False'd out because of inadequate testing. # TODO see ticket #237 if False and res_is_a(M, gemm_no_inplace, 1): # EXPRESSION: (beta * L) + (alpha * (gemm_no_inplace(G, a, u, v, b))) # EXPRESSION: (beta * L) + alpha * (b * G) + alpha * a * dot(u, v) G, a, u, v, b = M.owner.inputs # print 'GEMM', G, L if res_is_a(G, _dot22, 1): # EXPRESSION: (beta * L) + # (alpha * (gemm_no_inplace(dot(x,y), a, u, v, b))) x, y = G.owner.inputs # EXPRESSION: (beta * L) + (alpha * ((b*dot(x,y) + # (a * dot(u, v))))) # EXPRESSION: (beta * L) + (alpha*b*dot(x,y)) + # (alpha * a * dot(u, v)) rval = [gemm_no_inplace(gemm_no_inplace(L, alpha * b, x, y, beta), alpha * a, u, v, 1.0)] return rval if (G is L): # EXPRESSION: (beta * L) + (alpha*b*L) + (alpha * a * dot(u, v)) rval = [gemm_no_inplace(L, alpha * a, u, v, alpha * b + beta)] return rval if (1.0 != alpha): # at the very least, move the alpha inside the gemm_no_inplace rval = [beta * L + gemm_no_inplace(G, alpha * a, u, v, alpha * b)] return rval if recurse_flip: return _beta_L_plus_alpha_M(alpha, M, beta, L, recurse_flip=False) else: return False, False def _gemm_canonicalize(r, scale, rval, maxclients): # Tries to interpret node as a sum of scalars * (vectors or matrices) def scaled(thing): if scale == 1: return thing if scale == -1 and thing.type.dtype != 'bool': return -thing else: return scale * thing try: r.type.broadcastable except Exception: return None if ((r.type.ndim not in (1, 2)) or r.type.dtype not in ('float16', 'float32', 'float64', 'complex64', 'complex128')): rval.append(scaled(r)) return rval if maxclients and len(getattr(r, 'clients', [])) > maxclients: rval.append((scale, r)) return rval if r.owner and r.owner.op == T.sub: _gemm_canonicalize(r.owner.inputs[0], scale, rval, 1) _gemm_canonicalize(r.owner.inputs[1], -scale, rval, 1) elif r.owner and r.owner.op == T.add: for i in r.owner.inputs: _gemm_canonicalize(i, scale, rval, 1) elif r.owner and r.owner.op == T.neg: _gemm_canonicalize(r.owner.inputs[0], -scale, rval, 1) elif r.owner and r.owner.op == T.mul: scalars = [] vectors = [] matrices = [] for i in r.owner.inputs: if np.all(i.type.broadcastable): while i.owner and isinstance(i.owner.op, T.DimShuffle): i = i.owner.inputs[0] if i.type.broadcastable: scalars.append(i.dimshuffle()) else: scalars.append(i) elif _is_real_vector(i): vectors.append(i) elif _is_real_matrix(i): matrices.append(i) else: # just put the original arguments as in the base case rval.append((scale, r)) return rval if len(matrices) == 1: assert len(vectors) == 0 m = matrices[0] if len(scalars) == 0: _gemm_canonicalize(m, scale, rval, 1) elif len(scalars) == 1: _gemm_canonicalize(m, scaled(scalars[0]), rval, 1) else: _gemm_canonicalize(m, T.mul(scaled(scalars[0]), *scalars[1:]), rval, 1) elif len(vectors) == 1: assert len(matrices) == 0 v = vectors[0] if len(scalars) == 0: _gemm_canonicalize(v, scale, rval, 1) elif len(scalars) == 1: _gemm_canonicalize(v, scaled(scalars[0]), rval, 1) else: _gemm_canonicalize(v, T.mul(scaled(scalars[0]), *scalars[1:]), rval, 1) else: # lets not open this up rval.append((scale, r)) else: rval.append((scale, r)) return rval def _factor_canonicalized(lst): # remove duplicates from canonicalized list # we only delete out of the right end of the list, # once i has touched a list element, it is permantent lst = list(lst) # print 'FACTOR', lst # for t in lst: # if not isinstance(t, (list, tuple)): # t = (t,) # for e in t: # try: # theano.printing.debugprint(e) # except TypeError: # print e, type(e) i = 0 while i < len(lst) - 1: try: s_i, M_i = lst[i] except Exception: i += 1 continue j = i + 1 while j < len(lst): try: s_j, M_j = lst[j] except Exception: j += 1 continue if M_i is M_j: s_i = s_i + s_j lst[i] = (s_i, M_i) del lst[j] else: j += 1 i += 1 return lst def _gemm_from_factored_list(lst): """ Returns None, or a list to replace node.outputs. """ lst2 = [] # Remove the tuple that can't be cast correctly. # This can happen when we try to cast a complex to a real for sM in lst: # Make every pair in list have matching dtypes # sM can be a tuple of 2 elements or a theano variable. if isinstance(sM, tuple): sm0, sm1 = sM sm0 = T.as_tensor_variable(sm0) if theano.scalar.upcast(sm0.dtype, sm1.dtype) == sm1.dtype: lst2.append((T.cast(sm0, sm1.dtype), sM[1])) lst = lst2 def item_to_var(t): try: s, M = t except Exception: return t if s == 1: return M if s == -1: return -M return s * M # Try every pair in the sM_list, trying to turn it into a gemm operation for i in xrange(len(lst) - 1): s_i, M_i = lst[i] for j in xrange(i + 1, len(lst)): s_j, M_j = lst[j] if M_i.type != M_j.type: continue # print 'TRYING', (s_i, M_i, s_j, M_j) gemm_of_sM_list, old_dot22 = _beta_L_plus_alpha_M(s_i, M_i, s_j, M_j) # print 'GOT IT', gemm_of_sM_list if gemm_of_sM_list: assert len(gemm_of_sM_list) == 1 add_inputs = [item_to_var(input) for k, input in enumerate(lst) if k not in (i, j)] add_inputs.extend(gemm_of_sM_list) if len(add_inputs) > 1: rval = [T.add(*add_inputs)] else: rval = add_inputs # print "RETURNING GEMM THING", rval return rval, old_dot22 def _gemm_from_node2(node): """ :todo: In many expressions, there are many ways to turn it into a gemm. For example dot(a,b) + c + d. This function should return all of them, so that if one version of gemm causes a cycle in the graph, then another application of gemm can be tried. """ lst = [] t0 = time.time() _gemm_canonicalize(node.outputs[0], 1.0, lst, 0) t1 = time.time() # print "GEMM CANON", lst if len(lst) > 1: lst = _factor_canonicalized(lst) t2 = time.time() rval = _gemm_from_factored_list(lst) t3 = time.time() # It can happen that _factor_canonicalized and # _gemm_from_factored_list return a node with an incorrect # type. This happens in particular when one of the scalar # factors forces the upcast of the whole expression. In that # case, we simply skip that candidate for Gemm. This was # discussed in # http://groups.google.com/group/theano-dev/browse_thread/thread/a3096c82856e3ad5, # but never made it into a trac ticket. if rval and (rval[0][0].type == node.outputs[0].type): return rval, t1 - t0, t2 - t1, t3 - t2 return None, t1 - t0, 0, 0 class GemmOptimizer(Optimizer): """Graph optimizer for inserting Gemm operations.""" def __init__(self): Optimizer.__init__(self) self.warned = False def add_requirements(self, fgraph): fgraph.attach_feature(toolbox.ReplaceValidate()) def apply(self, fgraph): did_something = True nb_iter = 0 nb_replacement = 0 nb_replacement_didn_t_remove = 0 nb_inconsistency_make = 0 nb_inconsistency_replace = 0 time_canonicalize = 0 time_factor_can = 0 time_factor_list = 0 time_toposort = 0 if fgraph.profile: validate_before = fgraph.profile.validate_time callbacks_before = fgraph.execute_callbacks_times.copy() callback_before = fgraph.execute_callbacks_time def on_import(new_node): if new_node is not node: nodelist.append(new_node) u = theano.gof.opt.Updater(on_import, None, None, name="GemmOptimizer") fgraph.attach_feature(u) while did_something: nb_iter += 1 t0 = time.time() nodelist = theano.gof.graph.io_toposort(fgraph.inputs, fgraph.outputs) time_toposort += time.time() - t0 did_something = False nodelist.reverse() for node in nodelist: if not (isinstance(node.op, T.Elemwise) and isinstance(node.op.scalar_op, (theano.scalar.Add, theano.scalar.Sub, theano.scalar.Neg, theano.scalar.Mul))): continue if node not in fgraph.apply_nodes: # This mean that we already removed this node from # the graph continue try: new_outputs, time1, time2, time3 = _gemm_from_node2(node) time_canonicalize += time1 time_factor_can += time2 time_factor_list += time3 except InconsistencyError: nb_inconsistency_make += 1 continue if new_outputs: new_outputs, old_dot22 = new_outputs assert len(new_outputs) == len(node.outputs) new_outputs[0].tag.values_eq_approx = values_eq_approx_remove_inf_nan try: fgraph.replace_all_validate_remove( list(zip(node.outputs, new_outputs)), [old_dot22], reason='GemmOptimizer', # For now we disable the warning as we know case # that we need to fix. warn=False, # warn=not self.warned ) did_something = True nb_replacement += 1 except InconsistencyError: # TODO: retry other applications of gemm (see comment # in _gemm_from_node) nb_inconsistency_replace += 1 except ReplacementDidntRemovedError: nb_replacement_didn_t_remove += 1 self.warned = True fgraph.remove_feature(u) if fgraph.profile: validate_time = fgraph.profile.validate_time - validate_before callback_time = fgraph.execute_callbacks_time - callback_before callbacks_time = {} for k, v in iteritems(fgraph.execute_callbacks_times): if k in callbacks_before: callbacks_time[k] = v - callbacks_before[k] else: callbacks_time[k] = v else: validate_time = None callback_time = None callbacks_time = {} return (self, nb_iter, nb_replacement, nb_replacement_didn_t_remove, nb_inconsistency_make, nb_inconsistency_replace, time_canonicalize, time_factor_can, time_factor_list, time_toposort, validate_time, callback_time, callbacks_time,) @staticmethod def print_profile(stream, prof, level=0): blanc = (' ' * level) print(blanc, "GemmOptimizer", file=stream) print(blanc, " nb_iter", prof[1], file=stream) print(blanc, " nb_replacement", prof[2], file=stream) print(blanc, " nb_replacement_didn_t_remove", prof[3], file=stream) print(blanc, " nb_inconsistency_make", prof[4], file=stream) print(blanc, " nb_inconsistency_replace", prof[5], file=stream) print(blanc, " time_canonicalize", prof[6], file=stream) print(blanc, " time_factor_can", prof[7], file=stream) print(blanc, " time_factor_list", prof[8], file=stream) print(blanc, " time_toposort", prof[9], file=stream) print(blanc, " validate_time", prof[10], file=stream) print(blanc, " callback_time", prof[11], file=stream) if prof[11] > 1: print(blanc, " callbacks_time", file=stream) for i in sorted(iteritems(prof[12]), key=lambda a: a[1]): if i[1] > 0: print(i) class Dot22(GemmRelated): """Compute a matrix-matrix product. This is a specialization of the more general Dot(). """ check_input = False def make_node(self, x, y): x = T.as_tensor_variable(x) y = T.as_tensor_variable(y) dtypes = ('float16', 'float32', 'float64', 'complex64', 'complex128') if x.type.ndim != 2 or x.type.dtype not in dtypes: raise TypeError(x) if y.type.ndim != 2 or y.type.dtype not in dtypes: raise TypeError(y) if y.type.dtype != x.type.dtype: raise TypeError('dtype mismatch to Dot22') bz = (x.type.broadcastable[0], y.type.broadcastable[1]) outputs = [T.tensor(x.type.dtype, bz)] return Apply(self, [x, y], outputs) def perform(self, node, inp, out): x, y = inp z, = out try: z[0] = np.asarray(np.dot(x, y)) except ValueError as e: # The error raised by numpy has no shape information, we mean to # add that e.args = e.args + (x.shape, y.shape) raise def infer_shape(self, node, input_shapes): return [[input_shapes[0][0], input_shapes[1][1]]] setup_z_Nz_Sz = """ if ((NULL == %(_zout)s) || (PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_x)s)[0]) || (PyArray_DIMS(%(_zout)s)[1] != PyArray_DIMS(%(_y)s)[1])) { if (NULL != %(_zout)s) Py_XDECREF(%(_zout)s); npy_intp dims[2]; dims[0] = PyArray_DIMS(%(_x)s)[0]; dims[1] = PyArray_DIMS(%(_y)s)[1]; %(_zout)s = (PyArrayObject*)PyArray_SimpleNew(2, dims, PyArray_TYPE(%(_x)s)); //fprintf(stderr, "Dot Allocating %%i %%i\\n", dims[0], dims[1]); if(!%(_zout)s) { PyErr_SetString(PyExc_MemoryError, "failed to alloc dot22 output"); %(fail)s } } Nz = PyArray_DIMS(%(_zout)s); Sz = PyArray_STRIDES(%(_zout)s); """ check_ab_double_or_float = "" case_float_ab_constants = """ float a = 1.0; float b = 0.0; """ case_double_ab_constants = """ double a = 1.0; double b = 0.0; """ def c_code(self, node, name, inp, out, sub): # DEBUG _x, _y = inp _zout, = out if node.inputs[0].type.dtype.startswith('complex'): raise utils.MethodNotDefined('%s.c_code' % self.__class__.__name__) if len(self.c_libraries()) <= 0: return super(Dot22, self).c_code(node, name, (_x, _y), (_zout, ), sub) full_code = self.build_gemm_call() % dict(locals(), **sub) return full_code def c_code_cache_version(self): gv = self.build_gemm_version() if gv: return (2,) + gv else: return gv _dot22 = Dot22() @local_optimizer([T.Dot]) def local_dot_to_dot22(node): # This works for tensor.outer too because basic.outer is a macro that # produces a dot(dimshuffle,dimshuffle) of form 4 below if not isinstance(node.op, T.Dot): return x, y = node.inputs if y.type.dtype != x.type.dtype: # TODO: upcast one so the types match _logger.info('Not optimizing dot with inputs %s %s %s %s', x, y, x.type, y.type) return if y.type.dtype in ['float16', 'float32', 'float64', 'complex64', 'complex128']: with inherit_stack_trace(node.outputs): if x.ndim == 2 and y.ndim == 2: return [_dot22(*node.inputs)] if x.ndim == 2 and y.ndim == 1: return [_dot22(x, y.dimshuffle(0, 'x')).dimshuffle(0)] if x.ndim == 1 and y.ndim == 2: return [_dot22(x.dimshuffle('x', 0), y).dimshuffle(1)] if x.ndim == 1 and y.ndim == 1: return [_dot22(x.dimshuffle('x', 0), y.dimshuffle(0, 'x')).dimshuffle()] _logger.info('Not optimizing dot with inputs %s %s %s %s', x, y, x.type, y.type) @local_optimizer([gemm_no_inplace], inplace=True) def local_inplace_gemm(node): if node.op == gemm_no_inplace: with inherit_stack_trace(node.outputs): return [gemm_inplace(*node.inputs)] @local_optimizer([gemv_no_inplace], inplace=True) def local_inplace_gemv(node): if node.op == gemv_no_inplace: with inherit_stack_trace(node.outputs): return [gemv_inplace(*node.inputs)] @local_optimizer([ger], inplace=True) def local_inplace_ger(node): if node.op == ger: with inherit_stack_trace(node.outputs): return [ger_destructive(*node.inputs)] @local_optimizer([gemm_no_inplace]) def local_gemm_to_gemv(node): """GEMM acting on row or column matrices -> GEMV.""" if node.op == gemm_no_inplace: z, a, x, y, b = node.inputs with inherit_stack_trace(node.outputs): if z.broadcastable == x.broadcastable == (True, False): r = gemv_no_inplace(z.dimshuffle(1), a, y.T, x.dimshuffle(1), b) return [r.dimshuffle('x', 0)] if z.broadcastable == y.broadcastable == (False, True): r = gemv_no_inplace(z.dimshuffle(0), a, x, y.dimshuffle(0), b) return [r.dimshuffle(0, 'x')] @local_optimizer([gemm_no_inplace]) def local_gemm_to_ger(node): """GEMM computing an outer-product -> GER.""" if node.op == gemm_no_inplace: z, a, x, y, b = node.inputs if x.broadcastable[1] and y.broadcastable[0]: with inherit_stack_trace(node.outputs): # x and y are both vectors so this might qualifies for a GER xv = x.dimshuffle(0) yv = y.dimshuffle(1) try: bval = T.get_scalar_constant_value(b) except T.NotScalarConstantError: # b isn't a constant, GEMM is doing useful pre-scaling return if bval == 1: # best case a natural GER rval = ger(z, a, xv, yv) return [rval] elif bval == 0: # GER on zeros_like should be faster than GEMM zeros = T.zeros([x.shape[0], y.shape[1]], x.dtype) rval = ger(zeros, a, xv, yv) return [rval] else: # if bval is another constant, then z is being usefully # pre-scaled and GER isn't really the right tool for the job. return # TODO: delete this optimization when we have the proper dot->gemm->ger pipeline # working @local_optimizer([_dot22]) def local_dot22_to_ger_or_gemv(node): """dot22 computing an outer-product -> GER.""" if node.op == _dot22: with inherit_stack_trace(node.outputs): x, y = node.inputs xb = x.broadcastable yb = y.broadcastable one = T.as_tensor_variable(np.asarray(1, dtype=x.dtype)) zero = T.as_tensor_variable(np.asarray(0, dtype=x.dtype)) if xb[1] and yb[0]: # x and y are both vectors so this might qualifies for a GER xv = x.dimshuffle(0) yv = y.dimshuffle(1) zeros = T.zeros([x.shape[0], y.shape[1]], dtype=x.dtype) rval = ger(zeros, one, xv, yv) return [rval] if xb[0] and yb[1]: # x and y are both vectors so this qualifies for a sdot / ddot # TODO: Theano doesn't have a sdot, but gemv is better than _dot22 xv = x.dimshuffle(1) zeros = T.AllocEmpty(x.dtype)(1) rval = gemv_no_inplace(zeros, one, y.T, xv, zero) return [rval.dimshuffle('x', 0)] if xb[0] and not yb[0] and not yb[1]: # x is vector, y is matrix so try gemv xv = x.dimshuffle(1) zeros = T.AllocEmpty(x.dtype)(y.shape[1]) rval = gemv_no_inplace(zeros, one, y.T, xv, zero) return [rval.dimshuffle('x', 0)] if not xb[0] and not xb[1] and yb[1]: # x is matrix, y is vector, try gemv yv = y.dimshuffle(0) zeros = T.AllocEmpty(x.dtype)(x.shape[0]) rval = gemv_no_inplace(zeros, one, x, yv, zero) return [rval.dimshuffle(0, 'x')] ################################# # # Set up the BlasOpt optimizer # ################################# blas_optdb = SequenceDB() # run after numerical stability optimizations (1.5) optdb.register('BlasOpt', blas_optdb, 1.7, 'fast_run', 'fast_compile') # run before specialize (2.0) because specialize is basically a # free-for-all that makes the graph crazy. # fast_compile is needed to have GpuDot22 created. blas_optdb.register('local_dot_to_dot22', in2out(local_dot_to_dot22), 0, 'fast_run', 'fast_compile') blas_optdb.register('gemm_optimizer', GemmOptimizer(), 10, 'fast_run') blas_optdb.register('local_gemm_to_gemv', EquilibriumOptimizer([local_gemm_to_gemv, local_gemm_to_ger, local_dot22_to_ger_or_gemv, local_dimshuffle_lift], max_use_ratio=5, ignore_newtrees=False), 15, 'fast_run') # After destroyhandler(49.5) but before we try to make elemwise things # inplace (75) blas_opt_inplace = in2out(local_inplace_gemm, local_inplace_gemv, local_inplace_ger, name="blas_opt_inplace") optdb.register('InplaceBlasOpt', blas_opt_inplace, 70.0, 'fast_run', 'inplace', 'blas_opt_inplace') class Dot22Scalar(GemmRelated): """Compute a matrix-matrix product. This is a specialization of the more general Dot() Used to call optimized gemm implementation. Also used to generate a gemm later. compute scalar*dot(x,y). """ check_input = False def make_node(self, x, y, a): if a.ndim != 0: raise TypeError(Gemm.E_scalar, a) if x.ndim != 2: raise TypeError(Gemm.E_rank, x) if y.ndim != 2: raise TypeError(Gemm.E_rank, y) if not (a.dtype == x.dtype == y.dtype): raise TypeError('Dot22Scalar requires matching dtypes', (a.dtype, x.dtype, y.dtype)) if (not a.dtype.startswith('float') and not a.dtype.startswith('complex')): raise TypeError('Dot22Scalar requires float or complex args', a.dtype) bz = [x.type.broadcastable[0], y.type.broadcastable[1]] outputs = [T.tensor(x.type.dtype, bz)] return Apply(self, [x, y, a], outputs) def perform(self, node, inp, out): x, y, scalar = inp z, = out try: z[0] = np.asarray(scalar * np.dot(x, y)) except ValueError as e: # The error raised by numpy has no shape information, we # mean to add that e.args = e.args + (x.shape, y.shape) raise def infer_shape(self, node, input_shapes): return [[input_shapes[0][0], input_shapes[1][1]]] setup_z_Nz_Sz = Dot22.setup_z_Nz_Sz check_ab_double_or_float = """ if ((PyArray_DESCR(%(_a)s)->type_num != NPY_DOUBLE) && (PyArray_DESCR(%(_a)s)->type_num != NPY_FLOAT)) {PyErr_SetString(PyExc_NotImplementedError, "type(a) is not double or float"); %(fail)s;} """ case_float_ab_constants = """ #define REAL float float a = (PyArray_DESCR(%(_a)s)->type_num == NPY_FLOAT) ? (REAL)(((float*)PyArray_DATA(%(_a)s))[0]) : (REAL)(((double*)PyArray_DATA(%(_a)s))[0]); #undef REAL float b = 0.0; """ case_double_ab_constants = """ #define REAL double double a = (PyArray_DESCR(%(_a)s)->type_num == NPY_FLOAT) ? (REAL)(((float*)PyArray_DATA(%(_a)s))[0]) : (REAL)(((double*)PyArray_DATA(%(_a)s))[0]); #undef REAL double b = 0.0; """ def c_code(self, node, name, inp, out, sub): _x, _y, _a = inp _zout, = out if node.inputs[0].type.dtype.startswith('complex'): raise utils.MethodNotDefined('%s.c_code' % self.__class__.__name__) if len(self.c_libraries()) <= 0: return super(Dot22Scalar, self).c_code(node, name, (_x, _y), (_zout, ), sub) full_code = self.build_gemm_call() % dict(locals(), **sub) return full_code def c_code_cache_version(self): gv = self.build_gemm_version() if gv: return (2,) + gv else: return gv _dot22scalar = Dot22Scalar() @local_optimizer([T.mul]) def local_dot22_to_dot22scalar(node): """ Notes ----- Previous attempts to alter this optimization to replace dot22 with gemm instead of dot22scalar resulted in some Scan nodes being duplicated and the ScanSaveMem optimization never running on them, resulting in highly increased memory usage. Until this issue is resolved, this optimization should keep using dot22scalar instead of gemm. We upcast the scalar if after the multiplication with the dot this give the same type. We execute this optimizer after the gemm optimizer. This allow to give more priority to gemm that give more speed up then this optimizer, but allow the gemm optimizer to ignore this op. TODO: support when we can reorder the mul to generate a dot22scalar or fix the canonizer to merge them(1 mul with multiple inputs) """ if node.op != T.mul: return False i_dot22 = [x.owner and x.owner.op == _dot22 for x in node.inputs] if not any(i_dot22): return False # no dot22 if i_dot22.count(True) > 1: # TODO: try each of them. pass # return False #TODO fix dot22_idx = i_dot22.index(True) d = node.inputs[dot22_idx] i_scalar = [_as_scalar(x, dtype=d.dtype) for x in node.inputs] if not any(i_scalar): # Check if we can reorder the graph as this mul have a mul in inputs. # We support only 1 additional level of mul. # The canonizer should have merged those mul together. i_mul = [x.owner and x.owner.op == T.mul and any([_as_scalar(x_i, dtype=d.dtype) for x_i in x.owner.inputs]) for x in node.inputs] if not any(i_mul): # no scalar in input and no multiplication # if their was a multiplication we couls reorder the graph # by the associativity of the graph. return False mul_idx = i_mul.index(True) # The first one should always work m = node.inputs[mul_idx] scalar_idx = -1 for i, x in enumerate(m.owner.inputs): if _as_scalar(x, dtype=d.dtype) and (theano.scalar.upcast( x.type.dtype, d.type.dtype) == d.type.dtype): scalar_idx = i break if scalar_idx < 0: _logger.info('Not optimizing dot22 with inputs %s %s, as the' ' type of the scalar cannot be upcasted to the' ' matrix type', node.inputs, [x.type for x in node.inputs]) return False a = T.cast(_as_scalar(m.owner.inputs[scalar_idx], dtype=d.dtype), d.type.dtype) assert not a.type.ndim dot = _dot22scalar(d.owner.inputs[0], d.owner.inputs[1], a) # The other inputs to the original node that were # neither part of the dot22 or this mul should be # factors in the returned "mul" node. assert dot22_idx != mul_idx other_factors = [inpt for i, inpt in enumerate(node.inputs) if i not in (dot22_idx, mul_idx)] other_m_inputs = [inpt for i, inpt in enumerate(m.owner.inputs) if i != scalar_idx] return [T.mul(dot, *(other_factors + other_m_inputs))] scalar_idx = -1 for i, x in enumerate(node.inputs): if (i != dot22_idx and i_scalar[i] is not None and (theano.scalar.upcast(x.type.dtype, d.type.dtype) == d.type.dtype)): scalar_idx = i break if scalar_idx < 0: _logger.info('Not optimizing dot22 with inputs %s %s, as the type ' 'of the scalar cannot be upcasted to the matrix type', node.inputs, [x.type for x in node.inputs]) return False assert scalar_idx < len(node.inputs) s = node.inputs[scalar_idx] o = copy.copy(node.inputs) o.remove(d) o.remove(s) a = T.cast(i_scalar[scalar_idx], d.type.dtype) assert not a.type.ndim if len(o) == 0: return [_dot22scalar(d.owner.inputs[0], d.owner.inputs[1], a)] else: return [T.mul(_dot22scalar(d.owner.inputs[0], d.owner.inputs[1], a), *o)] # must happen after gemm as the gemm optimizer don't understant # dot22scalar and gemm give more speed up then dot22scalar blas_optdb.register('local_dot22_to_dot22scalar', in2out(local_dot22_to_dot22scalar), 11, 'fast_run') class BatchedDot(Op): """ Computes the batched dot product of two variables: batched_dot(a, b)[i] = dot(a[i], b[i]) """ __props__ = () def make_node(self, *inputs): inputs = list(map(T.as_tensor_variable, inputs)) if len(inputs) != 2: raise TypeError("theano.tensor.blas.BatchedDot: 2 arguments" " required, %d given " % len(inputs)) if inputs[0].ndim not in (2, 3): raise TypeError("theano.tensor.blas.BatchedDot: input 0 (0-indexed)" " must have ndim of 2 or 3, %d given. Consider" " calling theano.tensor.batched_dot instead." % inputs[0].ndim) if inputs[1].ndim not in (2, 3): raise TypeError("theano.tensor.blas.BatchedDot: input 1 (0-indexed)" " must have ndim of 2 or 3, %d given. Consider" " calling theano.tensor.batched_dot instead." % inputs[1].ndim) dtype = theano.scalar.upcast(*[input.type.dtype for input in inputs]) # upcast inputs to common dtype if needed upcasted_inputs = [T.cast(input, dtype) for input in inputs] broadcastable = ((inputs[0].type.broadcastable[0] or inputs[1].type.broadcastable[0],) + inputs[0].type.broadcastable[1:-1] + inputs[1].type.broadcastable[2:]) return Apply(self, upcasted_inputs, [T.tensor(dtype, broadcastable)]) def perform(self, node, inp, out): x, y = inp z, = out if x.shape[0] != y.shape[0]: raise TypeError( "theano.tensor.blas.BatchedDot: inputs [%s] must have the" " same size in axis 0, but have sizes [%s]." % (", ".join(map(str, inp)), ", ".join([str(i.shape[0]) for i in inp]))) shape = self.infer_shape(node, [i.shape for i in inp])[0] dtype = node.outputs[0].dtype z0 = z[0] = np.empty(shape, dtype=dtype) for i in xrange(z0.shape[0]): z0[i] = np.dot(x[i], y[i]) def c_support_code(self): batch_gemm_defn = """ template<typename dtype> bool batch_gemm(void (*gemm)(char*, char*, const int*, const int*, const int*, const dtype*, const dtype*, const int*, const dtype*, const int*, const dtype*, dtype*, const int*), int type_size, PyArrayObject* xs, PyArrayObject* ys, PyArrayObject* zs) { npy_intp *Nx = PyArray_DIMS(xs), *Sx = PyArray_STRIDES(xs); npy_intp *Ny = PyArray_DIMS(ys), *Sy = PyArray_STRIDES(ys); npy_intp *Nz = PyArray_DIMS(zs), *Sz = PyArray_STRIDES(zs); if (Nx[0] != Ny[0]) { PyErr_Format(PyExc_ValueError, "Shape mismatch: batch sizes unequal." " x.shape is (%d, %d, %d)," " y.shape is (%d, %d, %d).", Nx[0], Nx[1], Nx[2], Ny[0], Ny[1], Ny[2]); return 1; } if (Nx[2] != Ny[1]) { PyErr_Format(PyExc_ValueError, "Shape mismatch: summation axis sizes unequal." " x.shape is (%d, %d, %d)," " y.shape is (%d, %d, %d).", Nx[0], Nx[1], Nx[2], Ny[0], Ny[1], Ny[2]); return 1; } /* encode the stride structure of _x,_y,_z into a single integer. */ int unit = 0; unit |= ((Sx[2] == type_size || Nx[2] == 1) ? 0x0 : (Sx[1] == type_size || Nx[1]==1) ? 0x1 : 0x2) << 8; unit |= ((Sy[2] == type_size || Ny[2] == 1) ? 0x0 : (Sy[1] == type_size || Ny[1]==1) ? 0x1 : 0x2) << 4; unit |= ((Sz[2] == type_size || Nz[2] == 1) ? 0x0 : (Sz[1] == type_size || Nz[1]==1) ? 0x1 : 0x2) << 0; /* create appropriate strides for malformed matrices that are row or column * vectors, or empty matrices. * In that case, the value of the stride does not really matter, but * some versions of BLAS insist that: * - they are not smaller than the number of elements in the array, * - they are not 0. */ int sx_1 = (Nx[1] > 1) ? Sx[1]/type_size : (Nx[2] + 1); int sx_2 = (Nx[2] > 1) ? Sx[2]/type_size : (Nx[1] + 1); int sy_1 = (Ny[1] > 1) ? Sy[1]/type_size : (Ny[2] + 1); int sy_2 = (Ny[2] > 1) ? Sy[2]/type_size : (Ny[1] + 1); int sz_1 = (Nz[1] > 1) ? Sz[1]/type_size : (Nz[2] + 1); int sz_2 = (Nz[2] > 1) ? Sz[2]/type_size : (Nz[1] + 1); dtype* x = (dtype*)PyArray_DATA(xs); dtype* y = (dtype*)PyArray_DATA(ys); dtype* z = (dtype*)PyArray_DATA(zs); dtype a = 1.0; dtype b = 0.0; char N = 'N'; char T = 'T'; int Nz1 = Nz[1], Nz2 = Nz[2], Nx2 = Nx[2]; // loop over batch axis for (int i = 0; i < Nz[0]; i++) { switch(unit) { case 0x000: gemm(&N, &N, &Nz2, &Nz1, &Nx2, &a, y, &sy_1, x, &sx_1, &b, z, &sz_1); break; case 0x100: gemm(&N, &T, &Nz2, &Nz1, &Nx2, &a, y, &sy_1, x, &sx_2, &b, z, &sz_1); break; case 0x010: gemm(&T, &N, &Nz2, &Nz1, &Nx2, &a, y, &sy_2, x, &sx_1, &b, z, &sz_1); break; case 0x110: gemm(&T, &T, &Nz2, &Nz1, &Nx2, &a, y, &sy_2, x, &sx_2, &b, z, &sz_1); break; case 0x001: gemm(&T, &T, &Nz1, &Nz2, &Nx2, &a, x, &sx_1, y, &sy_1, &b, z, &sz_2); break; case 0x101: gemm(&N, &T, &Nz1, &Nz2, &Nx2, &a, x, &sx_2, y, &sy_1, &b, z, &sz_2); break; case 0x011: gemm(&T, &N, &Nz1, &Nz2, &Nx2, &a, x, &sx_1, y, &sy_2, &b, z, &sz_2); break; case 0x111: gemm(&N, &N, &Nz1, &Nz2, &Nx2, &a, x, &sx_2, y, &sy_2, &b, z, &sz_2); break; default: PyErr_SetString(PyExc_ValueError, "some matrix has no unit stride"); return 1; }; x += Sx[0] / type_size; y += Sy[0] / type_size; z += Sz[0] / type_size; } return 0; } """ return blas_header_text() + batch_gemm_defn def c_libraries(self): return ldflags() def c_compile_args(self): return ldflags(libs=False, flags=True) def c_lib_dirs(self): return ldflags(libs=False, libs_dir=True) def c_header_dirs(self): return ldflags(libs=False, include_dir=True) def c_code_cleanup(self, node, name, inputs, outputs, sub): return """ // clean up views Py_XDECREF(xs); xs = 0; Py_XDECREF(ys); ys = 0; Py_XDECREF(zs); zs = 0; """ def c_code(self, node, name, inp, out, sub): _x, _y = inp _z, = out fail = sub["fail"] # generate contiguity condition def contiguous(var, ndim): strides = "PyArray_STRIDES(%s)" % var if ndim == 1: return "{strides}[0] == type_size".format(strides=strides) return " && ".join([ " && ".join("{strides}[{i}] > 0 && {strides}[{i}] % type_size == 0" .format(strides=strides, i=i) for i in range(1, ndim)), "(%s)" % " || ".join("{strides}[{i}] == type_size" .format(strides=strides, i=i) for i in range(1, ndim)), ]) x_ndim, y_ndim, z_ndim = node.inputs[0].ndim, node.inputs[1].ndim, node.outputs[0].ndim # generate code to allocate output based on runtime input shapes z_dims = ["PyArray_DIMS(%s)[0]" % _x] if x_ndim == 3: z_dims.append("PyArray_DIMS(%s)[1]" % _x) if y_ndim == 3: z_dims.append("PyArray_DIMS(%s)[2]" % _y) assert len(z_dims) == z_ndim z_shape_correct = " && ".join("PyArray_DIMS(%s)[%i] == %s" % (_z, i, dim) for i, dim in enumerate(z_dims)) z_shape = ", ".join(z_dims) z_contiguous = contiguous(_z, z_ndim) allocate = """ if (NULL == %(_z)s || !(%(z_shape_correct)s) || !(%(z_contiguous)s)) { npy_intp dims[%(z_ndim)s] = {%(z_shape)s}; Py_XDECREF(%(_z)s); %(_z)s = (PyArrayObject*)PyArray_SimpleNew( %(z_ndim)s, dims, PyArray_TYPE(%(_x)s)); if(!%(_z)s) { PyErr_SetString(PyExc_MemoryError, "failed to alloc BatchedDot output"); %(fail)s } } """ % locals() # code to reallocate inputs contiguously if necessary contiguate = [] for var, ndim in [(_x, x_ndim), (_y, y_ndim)]: _contiguous = contiguous(var, ndim) contiguate.append(""" if (!(%(_contiguous)s)) { PyArrayObject * _copy = (PyArrayObject *) PyArray_Copy(%(var)s); if (!_copy) %(fail)s Py_XDECREF(%(var)s); %(var)s = _copy; } """ % locals()) contiguate = "\n".join(contiguate) def c_dimshuffle(newname, oldname, shape): _fail = fail _shape = ", ".join("1" if axis is None else "PyArray_DIMS(%s)[%i]" % (oldname, axis) for axis in shape) return """{ npy_intp dims[3] = {%(_shape)s}; PyArray_Dims newshape = {dims, 3}; %(newname)s = (PyArrayObject*)PyArray_Newshape(%(oldname)s, &newshape, NPY_ANYORDER); if (!%(newname)s) %(_fail)s // make sure we didn't accidentally copy assert(PyArray_DATA(%(oldname)s) == PyArray_DATA(%(newname)s)); }""" % locals() # create tensor3 views for any of x, y, z that are not tensor3, so that # we only need to implement the tensor3-tensor3 batched dot product. # xs, ys and zs will point to these views, or to the original array if # it was already tensor3. # in the latter case, we artificially increase the reference count of # the original array so that the c_code_cleanup method can decref them # all indiscriminately. upcast = [] if x_ndim == 3: upcast.append("xs = %(_x)s; Py_XINCREF(xs);") elif x_ndim == 2: upcast.append(c_dimshuffle("xs", _x, (0, None, 1))) if y_ndim == 3: upcast.append("ys = %(_y)s; Py_XINCREF(ys);") elif y_ndim == 2: upcast.append(c_dimshuffle("ys", _y, (0, 1, None))) if z_ndim == 3: upcast.append("zs = %(_z)s; Py_XINCREF(zs);") else: upcast.append(c_dimshuffle( "zs", _z, (0, None if x_ndim == 2 else 1, None if y_ndim == 2 else 1))) upcast = "\n".join(upcast) % locals() return """ int type_num = PyArray_DESCR(%(_x)s)->type_num; int type_size = PyArray_DESCR(%(_x)s)->elsize; // in bytes // xs, ys, zs will point to views onto %(_x)s, %(_y)s, %(_z)s PyArrayObject *xs = 0, *ys = 0, *zs = 0; if (PyArray_NDIM(%(_x)s) != %(x_ndim)s) { PyErr_Format(PyExc_NotImplementedError, "rank(x) != %(x_ndim)s. rank(x) is %%d.", PyArray_NDIM(%(_x)s)); %(fail)s; } if (PyArray_NDIM(%(_y)s) != %(y_ndim)s) { PyErr_Format(PyExc_NotImplementedError, "rank(y) != %(y_ndim)s. rank(y) is %%d.", PyArray_NDIM(%(_y)s)); %(fail)s; } if (%(_z)s && PyArray_NDIM(%(_z)s) != %(z_ndim)s) { PyErr_Format(PyExc_NotImplementedError, "rank(z) != %(z_ndim)s. rank(z) is %%d.", PyArray_NDIM(%(_z)s)); %(fail)s; } // allocate output %(allocate)s // reallocate any noncontiguous arrays or arrays with invalid strides %(contiguate)s // add dims to make sure everything is tensor3 %(upcast)s // from here on, use xs, ys and zs as they are tensor3 and share memory // with the original %(_x)s, %(_y)s and %(_z)s arrays. if ((PyArray_DESCR(xs)->type_num != NPY_DOUBLE) && (PyArray_DESCR(xs)->type_num != NPY_FLOAT)) {PyErr_SetString(PyExc_NotImplementedError, "type(x) is not double or float"); %(fail)s;} if ((PyArray_DESCR(ys)->type_num != NPY_DOUBLE) && (PyArray_DESCR(ys)->type_num != NPY_FLOAT)) {PyErr_SetString(PyExc_NotImplementedError, "type(y) is not double or float"); %(fail)s;} if ((PyArray_DESCR(zs)->type_num != NPY_DOUBLE) && (PyArray_DESCR(zs)->type_num != NPY_FLOAT)) {PyErr_SetString(PyExc_NotImplementedError, "type(z) is not double or float"); %(fail)s;} if ((PyArray_DESCR(xs)->type_num != PyArray_DESCR(ys)->type_num) ||(PyArray_DESCR(xs)->type_num != PyArray_DESCR(zs)->type_num)) { PyErr_SetString(PyExc_NotImplementedError, "type(x), type(y), type(z) are not all the same"); %(fail)s; } switch (type_num) { case NPY_FLOAT: if (batch_gemm<float>(sgemm_, type_size, xs, ys, zs)) { %(fail)s; } break; case NPY_DOUBLE: if (batch_gemm<double>(dgemm_, type_size, xs, ys, zs)) { %(fail)s; } break; } """ % locals() def c_code_cache_version(self): from theano.tensor.blas_headers import blas_header_version return (4, blas_header_version()) def grad(self, inp, grads): x, y = inp gz, = grads xdim, ydim, gdim = x.type.ndim, y.type.ndim, gz.type.ndim # grad is a vector, so x is a matrix and y is a matrix if gdim == 1: xgrad = gz.dimshuffle(0, 'x') * y ygrad = gz.dimshuffle(0, 'x') * x # x is a matrix, y is a tensor3, grad is a matrix elif xdim == 2 and ydim == 3: xgrad = T.batched_dot(gz, y.dimshuffle(0, 2, 1)) ygrad = x.dimshuffle(0, 1, 'x') * gz.dimshuffle(0, 'x', 1) # x is a tensor3, y is a matrix, grad is a matrix elif xdim == 3 and ydim == 2: xgrad = gz.dimshuffle(0, 1, 'x') * y.dimshuffle(0, 'x', 1) ygrad = T.batched_dot(x.dimshuffle(0, 2, 1), gz) # x is a tensor3, y is a tensor3, grad is a tensor3 elif xdim == ydim == 3: xgrad = T.batched_dot(gz, y.dimshuffle(0, 2, 1)) ygrad = T.batched_dot(x.dimshuffle(0, 2, 1), gz) # If x or y contain broadcastable dimensions but only one of # them know that a matching dimensions is broadcastable, the # above code don't always return the right broadcast pattern. # This cause problem down the road. See gh-1461. if xgrad.broadcastable != x.broadcastable: xgrad = T.patternbroadcast(xgrad, x.broadcastable) if ygrad.broadcastable != y.broadcastable: ygrad = T.patternbroadcast(ygrad, y.broadcastable) return xgrad, ygrad def R_op(self, inputs, eval_points): # R_op for batched_dot(a, b) evaluted at c for a and d for b is # simply batched_dot(c, b) + batched_dot(a, d) assert len(inputs) == 2 assert len(eval_points) == 2 if eval_points[0] is None and eval_points[1] is None: return [None] debugger_available = config.compute_test_value != 'off' if debugger_available: try: iv0 = theano.gof.op.get_test_value(inputs[0]) except AttributeError: theano.gof.op.missing_test_message( 'first input passed to BatchedDot.R_op has no test value') debugger_available = False try: iv1 = theano.gof.op.get_test_value(inputs[1]) except AttributeError: theano.gof.op.missing_test_message( 'second input passed to BatchedDot.R_op has no test value') debugger_available = False if eval_points[0]: try: ev0 = theano.gof.op.get_test_value(eval_points[0]) except AttributeError: theano.gof.op.missing_test_message( 'first eval point passed to BatchedDot.R_op ' 'has no test value') debugger_available = False if eval_points[1]: try: ev1 = theano.gof.op.get_test_value(eval_points[1]) except AttributeError: theano.gof.op.missing_test_message( 'second eval point passed to BatchedDot.R_op ' 'has no test value') debugger_available = False if debugger_available: input_values = [iv0, iv1] eval_point_values = [ev0, ev1] for i in xrange(2): if eval_point_values[i] is not None and \ input_values[i].shape != eval_point_values[i].shape: raise ValueError( 'input ' + str(i) + ' and eval_point ' + str(i) + ' to BatchedDot.R_op should have the same shape, but ' 'their shapes are %s and %s, respectively' % ( str(input_values[i].shape), str(eval_point_values[i].shape))) if eval_points[0]: t1 = self(eval_points[0], inputs[1]) if eval_points[1]: t2 = self(inputs[0], eval_points[1]) if eval_points[0] and eval_points[1]: return [t1 + t2] elif eval_points[0]: return [t1] else: return [t2] def infer_shape(self, node, shapes): for shape_ in shapes: if len(shape_) not in (2, 3): raise NotImplementedError() xshp, yshp = shapes return [xshp[:-1] + yshp[2:]] batched_dot = BatchedDot() # from opt import register_specialize, register_canonicalize # @register_specialize @local_optimizer([T.sub, T.add]) def local_print_as_we_go_along(node): if node.op in (T.sub, T.add): debugprint(node)
37.962434
187
0.530859
ace7fb45eade6f5b161425e8ab8ef37376064d75
1,931
py
Python
f5/bigip/tm/asm/test/unit/test_policy_template.py
nghia-tran/f5-common-python
acb23a6e5830a119b460c19a578654113419f5c3
[ "Apache-2.0" ]
272
2016-02-23T06:05:44.000Z
2022-02-20T02:09:32.000Z
f5/bigip/tm/asm/test/unit/test_policy_template.py
nghia-tran/f5-common-python
acb23a6e5830a119b460c19a578654113419f5c3
[ "Apache-2.0" ]
1,103
2016-02-11T17:48:03.000Z
2022-02-15T17:13:37.000Z
f5/bigip/tm/asm/test/unit/test_policy_template.py
nghia-tran/f5-common-python
acb23a6e5830a119b460c19a578654113419f5c3
[ "Apache-2.0" ]
167
2016-02-11T17:48:21.000Z
2022-01-17T20:13:05.000Z
# Copyright 2017 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import mock import pytest from f5.bigip import ManagementRoot from f5.bigip.tm.asm.policy_templates import Policy_Template from f5.sdk_exception import UnsupportedOperation from six import iterkeys @pytest.fixture def FakePolicyTemplate(): fake_asm = mock.MagicMock() fake_tmpl = Policy_Template(fake_asm) fake_tmpl._meta_data['bigip'].tmos_version = '11.6.0' return fake_tmpl class TestPolicyTemplates(object): def test_create_raises(self, FakePolicyTemplate): with pytest.raises(UnsupportedOperation): FakePolicyTemplate.create() def test_modify_raises(self, FakePolicyTemplate): with pytest.raises(UnsupportedOperation): FakePolicyTemplate.modify() def test_delete_raises(self, FakePolicyTemplate): with pytest.raises(UnsupportedOperation): FakePolicyTemplate.delete() def test_collection(self, fakeicontrolsession): b = ManagementRoot('192.168.1.1', 'admin', 'admin') t = b.tm.asm.policy_templates_s test_meta = t._meta_data['attribute_registry'] test_meta2 = t._meta_data['allowed_lazy_attributes'] kind = 'tm:asm:policy-templates:policy-templatestate' assert kind in list(iterkeys(test_meta)) assert Policy_Template in test_meta2 assert t._meta_data['object_has_stats'] is False
35.109091
74
0.73796
ace7fc0079c399c0ac97278c1ec3a00679da578e
27,994
py
Python
core/domain/exp_jobs_one_off.py
ayushi0014/oppia
346ab6b84bfc3d0cab2ed0e647ad76f7a9acd399
[ "Apache-2.0" ]
1
2019-02-23T20:31:20.000Z
2019-02-23T20:31:20.000Z
core/domain/exp_jobs_one_off.py
ayushi0014/oppia
346ab6b84bfc3d0cab2ed0e647ad76f7a9acd399
[ "Apache-2.0" ]
35
2019-02-23T20:31:21.000Z
2019-08-19T12:32:13.000Z
core/domain/exp_jobs_one_off.py
ayushi0014/oppia
346ab6b84bfc3d0cab2ed0e647ad76f7a9acd399
[ "Apache-2.0" ]
1
2021-01-28T05:20:56.000Z
2021-01-28T05:20:56.000Z
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """One-off jobs for explorations.""" from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules import ast import datetime import logging import re from constants import constants from core import jobs from core.domain import exp_domain from core.domain import exp_fetchers from core.domain import exp_services from core.domain import fs_domain from core.domain import html_validation_service from core.domain import rights_domain from core.domain import rights_manager from core.platform import models import feconf import python_utils import utils ( base_models, exp_models, feedback_models, improvements_models, skill_models, stats_models, story_models, ) = models.Registry.import_models([ models.NAMES.base_model, models.NAMES.exploration, models.NAMES.feedback, models.NAMES.improvements, models.NAMES.skill, models.NAMES.statistics, models.NAMES.story, ]) class RegenerateStringPropertyIndexOneOffJob( jobs.BaseMapReduceOneOffJobManager): """One-off job for regenerating the index of models changed to use an indexed StringProperty. Cloud NDB dropped support for StringProperty(indexed=False) and TextProperty(indexed=True). Therefore, to prepare for the migration to Cloud NDB, we need to regenerate the indexes for every model that has been changed in this way. https://cloud.google.com/appengine/docs/standard/python/datastore/indexes#unindexed-properties: > changing a property from unindexed to indexed does not affect any existing > entities that may have been created before the change. Queries filtering > on the property will not return such existing entities, because the > entities weren't written to the query's index when they were created. To > make the entities accessible by future queries, you must rewrite them to > Datastore so that they will be entered in the appropriate indexes. That > is, you must do the following for each such existing entity: > 1. Retrieve (get) the entity from Datastore. > 2. Write (put) the entity back to Datastore. """ @classmethod def entity_classes_to_map_over(cls): return [ exp_models.ExplorationModel, feedback_models.GeneralFeedbackMessageModel, improvements_models.TaskEntryModel, skill_models.SkillModel, stats_models.ExplorationAnnotationsModel, story_models.StoryModel, story_models.StorySummaryModel, ] @staticmethod def map(model): model_kind = type(model).__name__ if isinstance(model, base_models.VersionedModel): # Change the method resolution order of model to use BaseModel's # implementation of `put`. model = super(base_models.VersionedModel, model) model.update_timestamps(update_last_updated_time=False) model.put() yield (model_kind, 1) @staticmethod def reduce(key, counts): yield (key, len(counts)) class ExplorationFirstPublishedOneOffJob(jobs.BaseMapReduceOneOffJobManager): """One-off job that finds first published time in milliseconds for all explorations. """ @classmethod def entity_classes_to_map_over(cls): return [exp_models.ExplorationRightsSnapshotContentModel] @staticmethod def map(item): if item.content['status'] == rights_domain.ACTIVITY_STATUS_PUBLIC: yield ( item.get_unversioned_instance_id(), utils.get_time_in_millisecs(item.created_on)) @staticmethod def reduce(exp_id, stringified_commit_times_msecs): exploration_rights = rights_manager.get_exploration_rights( exp_id, strict=False) if exploration_rights is None: return commit_times_msecs = [ ast.literal_eval(commit_time_string) for commit_time_string in stringified_commit_times_msecs] first_published_msec = min(commit_times_msecs) rights_manager.update_activity_first_published_msec( constants.ACTIVITY_TYPE_EXPLORATION, exp_id, first_published_msec) class ExplorationValidityJobManager(jobs.BaseMapReduceOneOffJobManager): """Job that checks that all explorations have appropriate validation statuses. """ @classmethod def entity_classes_to_map_over(cls): return [exp_models.ExplorationModel] @staticmethod def map(item): if item.deleted: return exploration = exp_fetchers.get_exploration_from_model(item) exp_rights = rights_manager.get_exploration_rights(item.id) try: if exp_rights.status == rights_domain.ACTIVITY_STATUS_PRIVATE: exploration.validate() else: exploration.validate(strict=True) except utils.ValidationError as e: yield (item.id, python_utils.convert_to_bytes(e)) @staticmethod def reduce(key, values): yield (key, values) class ExplorationMigrationAuditJob(jobs.BaseMapReduceOneOffJobManager): """A reusable one-off job for testing exploration migration from any exploration schema version to the latest. This job runs the state migration, but does not commit the new exploration to the store. """ @classmethod def entity_classes_to_map_over(cls): return [exp_models.ExplorationModel] @classmethod def enqueue(cls, job_id, additional_job_params=None): super(ExplorationMigrationAuditJob, cls).enqueue( job_id, shard_count=64) @staticmethod def map(item): if item.deleted: return current_state_schema_version = feconf.CURRENT_STATE_SCHEMA_VERSION states_schema_version = item.states_schema_version versioned_exploration_states = { 'states_schema_version': states_schema_version, 'states': item.states } while states_schema_version < current_state_schema_version: try: exp_domain.Exploration.update_states_from_model( versioned_exploration_states, states_schema_version, item.id) states_schema_version += 1 except Exception as e: error_message = ( 'Exploration %s failed migration to states v%s: %s' % (item.id, states_schema_version + 1, e)) logging.exception(error_message) yield ('MIGRATION_ERROR', error_message.encode('utf-8')) break if states_schema_version == current_state_schema_version: yield ('SUCCESS', 1) @staticmethod def reduce(key, values): if key == 'SUCCESS': yield (key, len(values)) else: yield (key, values) class ExplorationMigrationJobManager(jobs.BaseMapReduceOneOffJobManager): """A reusable one-time job that may be used to migrate exploration schema versions. This job will load all existing explorations from the data store and immediately store them back into the data store. The loading process of an exploration in exp_services automatically performs schema updating. This job persists that conversion work, keeping explorations up-to-date and improving the load time of new explorations. """ @classmethod def entity_classes_to_map_over(cls): return [exp_models.ExplorationModel] @classmethod def enqueue(cls, job_id, additional_job_params=None): super(ExplorationMigrationJobManager, cls).enqueue( job_id, shard_count=64) @staticmethod def map(item): if item.deleted: return # Do not upgrade explorations that fail non-strict validation. old_exploration = exp_fetchers.get_exploration_by_id(item.id) try: old_exploration.validate() except Exception as e: logging.error( 'Exploration %s failed non-strict validation: %s' % (item.id, e)) return # If the exploration model being stored in the datastore is not the # most up-to-date states schema version, then update it. if (item.states_schema_version != feconf.CURRENT_STATE_SCHEMA_VERSION): # Note: update_exploration does not need to apply a change list in # order to perform a migration. See the related comment in # exp_services.apply_change_list for more information. # # Note: from_version and to_version really should be int, but left # as str to conform with legacy data. commit_cmds = [exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION, 'from_version': python_utils.UNICODE( item.states_schema_version), 'to_version': python_utils.UNICODE( feconf.CURRENT_STATE_SCHEMA_VERSION) })] exp_services.update_exploration( feconf.MIGRATION_BOT_USERNAME, item.id, commit_cmds, 'Update exploration states from schema version %d to %d.' % ( item.states_schema_version, feconf.CURRENT_STATE_SCHEMA_VERSION)) yield ('SUCCESS', item.id) @staticmethod def reduce(key, values): yield (key, len(values)) class ExplorationMathSvgFilenameValidationOneOffJob( jobs.BaseMapReduceOneOffJobManager): """Job that checks the html content of an exploration and validates the svg_filename fields in each math rich-text components. """ @classmethod def entity_classes_to_map_over(cls): return [exp_models.ExplorationModel] @staticmethod def map(item): if item.deleted: return exploration = exp_fetchers.get_exploration_from_model(item) invalid_tags_info_in_exp = [] for state_name, state in exploration.states.items(): html_string = ''.join(state.get_all_html_content_strings()) error_list = ( html_validation_service. validate_svg_filenames_in_math_rich_text( feconf.ENTITY_TYPE_EXPLORATION, item.id, html_string)) if len(error_list) > 0: invalid_tags_info_in_state = { 'state_name': state_name, 'error_list': error_list, 'no_of_invalid_tags': len(error_list) } invalid_tags_info_in_exp.append(invalid_tags_info_in_state) if len(invalid_tags_info_in_exp) > 0: yield ('Found invalid tags', (item.id, invalid_tags_info_in_exp)) @staticmethod def reduce(key, values): final_values = [ast.literal_eval(value) for value in values] no_of_invalid_tags = 0 invalid_tags_info = {} for exp_id, invalid_tags_info_in_exp in final_values: invalid_tags_info[exp_id] = [] for value in invalid_tags_info_in_exp: no_of_invalid_tags += value['no_of_invalid_tags'] del value['no_of_invalid_tags'] invalid_tags_info[exp_id].append(value) final_value_dict = { 'no_of_explorations_with_no_svgs': len(final_values), 'no_of_invalid_tags': no_of_invalid_tags, } yield ('Overall result.', final_value_dict) yield ('Detailed information on invalid tags. ', invalid_tags_info) class ExplorationRteMathContentValidationOneOffJob( jobs.BaseMapReduceOneOffJobManager): """Job that checks the html content of an exploration and validates the Math content object for each math rich-text components. """ @classmethod def entity_classes_to_map_over(cls): return [exp_models.ExplorationModel] @staticmethod def map(item): if item.deleted: return exploration = exp_fetchers.get_exploration_from_model(item) invalid_tags_info_in_exp = [] for state_name, state in exploration.states.items(): html_string = ''.join(state.get_all_html_content_strings()) error_list = ( html_validation_service. validate_math_content_attribute_in_html(html_string)) if len(error_list) > 0: invalid_tags_info_in_state = { 'state_name': state_name, 'error_list': error_list, 'no_of_invalid_tags': len(error_list) } invalid_tags_info_in_exp.append(invalid_tags_info_in_state) if len(invalid_tags_info_in_exp) > 0: yield ('Found invalid tags', (item.id, invalid_tags_info_in_exp)) @staticmethod def reduce(key, values): final_values = [ast.literal_eval(value) for value in values] no_of_invalid_tags = 0 invalid_tags_info = {} for exp_id, invalid_tags_info_in_exp in final_values: invalid_tags_info[exp_id] = [] for value in invalid_tags_info_in_exp: no_of_invalid_tags += value['no_of_invalid_tags'] del value['no_of_invalid_tags'] invalid_tags_info[exp_id].append(value) final_value_dict = { 'no_of_explorations_with_no_svgs': len(final_values), 'no_of_invalid_tags': no_of_invalid_tags, } yield ('Overall result.', final_value_dict) yield ('Detailed information on invalid tags.', invalid_tags_info) class ViewableExplorationsAuditJob(jobs.BaseMapReduceOneOffJobManager): """Job that outputs a list of private explorations which are viewable.""" @classmethod def entity_classes_to_map_over(cls): return [exp_models.ExplorationModel] @staticmethod def map(item): if item.deleted: return exploration_rights = rights_manager.get_exploration_rights( item.id, strict=False) if exploration_rights is None: return if (exploration_rights.status == constants.ACTIVITY_STATUS_PRIVATE and exploration_rights.viewable_if_private): yield (item.id, item.title.encode('utf-8')) @staticmethod def reduce(key, values): yield (key, values) class HintsAuditOneOffJob(jobs.BaseMapReduceOneOffJobManager): """Job that tabulates the number of hints used by each state of an exploration. """ @classmethod def entity_classes_to_map_over(cls): return [exp_models.ExplorationModel] @staticmethod def map(item): if item.deleted: return exploration = exp_fetchers.get_exploration_from_model(item) for state_name, state in exploration.states.items(): hints_length = len(state.interaction.hints) if hints_length > 0: exp_and_state_key = '%s %s' % ( item.id, state_name.encode('utf-8')) yield (python_utils.UNICODE(hints_length), exp_and_state_key) @staticmethod def reduce(key, values): yield (key, values) class ExplorationContentValidationJobForCKEditor( jobs.BaseMapReduceOneOffJobManager): """Job that checks the html content of an exploration and validates it for CKEditor. """ @classmethod def entity_classes_to_map_over(cls): return [exp_models.ExplorationModel] @staticmethod def map(item): if item.deleted: return try: exploration = exp_fetchers.get_exploration_from_model(item) except Exception as e: yield ( 'Error %s when loading exploration' % python_utils.convert_to_bytes(e), [item.id]) return html_list = exploration.get_all_html_content_strings() err_dict = html_validation_service.validate_rte_format( html_list, feconf.RTE_FORMAT_CKEDITOR) for key in err_dict: if err_dict[key]: yield ('%s Exp Id: %s' % (key, item.id), err_dict[key]) @staticmethod def reduce(key, values): final_values = [ast.literal_eval(value) for value in values] # Combine all values from multiple lists into a single list # for that error type. output_values = list(set().union(*final_values)) exp_id_index = key.find('Exp Id:') if exp_id_index == -1: yield (key, output_values) else: output_values.append(key[exp_id_index:]) yield (key[:exp_id_index - 1], output_values) class RTECustomizationArgsValidationOneOffJob( jobs.BaseMapReduceOneOffJobManager): """One-off job for validating all the customizations arguments of Rich Text Components. """ @classmethod def entity_classes_to_map_over(cls): return [exp_models.ExplorationModel] @staticmethod def map(item): if item.deleted: return err_dict = {} try: exploration = exp_fetchers.get_exploration_from_model(item) except Exception as e: yield ( 'Error %s when loading exploration' % python_utils.UNICODE(e), [item.id]) return html_list = exploration.get_all_html_content_strings() err_dict = html_validation_service.validate_customization_args( html_list) for key in err_dict: err_value_with_exp_id = err_dict[key] err_value_with_exp_id.append('Exp ID: %s' % item.id) yield (key, err_value_with_exp_id) @staticmethod def reduce(key, values): final_values = [ast.literal_eval(value) for value in values] flattened_values = [ item for sublist in final_values for item in sublist] # Errors produced while loading exploration only contain exploration id # in error message, so no further formatting is required. For errors # from validation the output is in format [err1, expid1, err2, expid2]. # So, we further format it as [(expid1, err1), (expid2, err2)]. if 'loading exploration' in key: yield (key, flattened_values) return output_values = [] index = 0 while index < len(flattened_values): # flattened_values[index] = error message. # flattened_values[index + 1] = exp id in which error message # is present. output_values.append(( flattened_values[index + 1], flattened_values[index])) index += 2 output_values.sort() yield (key, output_values) class XmlnsAttributeInExplorationMathSvgImagesAuditJob( jobs.BaseMapReduceOneOffJobManager): """One-off job to audit math SVGs on the server that do not have xmlns attribute. """ @classmethod def entity_classes_to_map_over(cls): return [exp_models.ExplorationModel] @staticmethod def map(item): if item.deleted: return fs = fs_domain.AbstractFileSystem(fs_domain.GcsFileSystem( feconf.ENTITY_TYPE_EXPLORATION, item.id)) filepaths = fs.listdir('image') for filepath in filepaths: filename = filepath.split('/')[-1] if not re.match(constants.MATH_SVG_FILENAME_REGEX, filename): continue old_svg_image = fs.get(filepath) xmlns_attribute_is_present = ( html_validation_service.does_svg_tag_contains_xmlns_attribute( old_svg_image)) if not xmlns_attribute_is_present: yield (item.id, filename) @staticmethod def reduce(key, values): yield (key, values) class RemoveTranslatorIdsOneOffJob(jobs.BaseMapReduceOneOffJobManager): """Job that deletes the translator_ids from the ExpSummaryModel. """ @classmethod def entity_classes_to_map_over(cls): return [exp_models.ExpSummaryModel] @staticmethod def map(exp_summary_model): # This is the only way to remove the field from the model, # see https://stackoverflow.com/a/15116016/3688189 and # https://stackoverflow.com/a/12701172/3688189. if 'translator_ids' in exp_summary_model._properties: # pylint: disable=protected-access del exp_summary_model._properties['translator_ids'] # pylint: disable=protected-access if 'translator_ids' in exp_summary_model._values: # pylint: disable=protected-access del exp_summary_model._values['translator_ids'] # pylint: disable=protected-access exp_summary_model.update_timestamps(update_last_updated_time=False) exp_summary_model.put() yield ('SUCCESS_REMOVED - ExpSummaryModel', exp_summary_model.id) else: yield ( 'SUCCESS_ALREADY_REMOVED - ExpSummaryModel', exp_summary_model.id) @staticmethod def reduce(key, values): """Implements the reduce function for this job.""" yield (key, len(values)) def regenerate_exp_commit_log_model(exp_model, version): """Helper function to regenerate a commit log model for an exploration model. NOTE TO DEVELOPERS: Do not delete this function until issue #10808 is fixed. Args: exp_model: ExplorationModel. The exploration model for which commit log model is to be generated. version: int. The commit log version to be generated. Returns: ExplorationCommitLogEntryModel. The regenerated commit log model. """ metadata_model = ( exp_models.ExplorationSnapshotMetadataModel.get_by_id( '%s-%s' % (exp_model.id, version))) required_rights_model = exp_models.ExplorationRightsModel.get( exp_model.id, strict=True, version=1) for rights_version in python_utils.RANGE(2, version + 1): rights_model = exp_models.ExplorationRightsModel.get( exp_model.id, strict=False, version=rights_version) if rights_model is None: break if rights_model.created_on <= metadata_model.created_on: required_rights_model = rights_model else: break commit_log_model = ( exp_models.ExplorationCommitLogEntryModel.create( exp_model.id, version, metadata_model.committer_id, metadata_model.commit_type, metadata_model.commit_message, metadata_model.commit_cmds, required_rights_model.status, required_rights_model.community_owned)) commit_log_model.exploration_id = exp_model.id commit_log_model.created_on = metadata_model.created_on commit_log_model.last_updated = metadata_model.last_updated return commit_log_model class RegenerateMissingExpCommitLogModels(jobs.BaseMapReduceOneOffJobManager): """Job that regenerates missing commit log models for an exploration. NOTE TO DEVELOPERS: Do not delete this job until issue #10808 is fixed. """ @classmethod def entity_classes_to_map_over(cls): return [exp_models.ExplorationModel] @staticmethod def map(item): if item.deleted: return for version in python_utils.RANGE(1, item.version + 1): commit_log_model = ( exp_models.ExplorationCommitLogEntryModel.get_by_id( 'exploration-%s-%s' % (item.id, version))) if commit_log_model is None: commit_log_model = regenerate_exp_commit_log_model( item, version) commit_log_model.update_timestamps( update_last_updated_time=False) commit_log_model.put() yield ( 'Regenerated Exploration Commit Log Model: version %s' % ( version), item.id) @staticmethod def reduce(key, values): yield (key, values) class ExpCommitLogModelRegenerationValidator( jobs.BaseMapReduceOneOffJobManager): """Job that validates the process of regeneration of commit log models for an exploration. NOTE TO DEVELOPERS: Do not delete this job until issue #10808 is fixed. """ @classmethod def entity_classes_to_map_over(cls): return [exp_models.ExplorationModel] @staticmethod def map(item): if item.deleted: return # This is done to ensure that all explorations are not checked and # a random sample of the explorations is checked. last_char_in_id = item.id[-1] if last_char_in_id < 'a' or last_char_in_id > 'j': return for version in python_utils.RANGE(1, item.version + 1): commit_log_model = ( exp_models.ExplorationCommitLogEntryModel.get_by_id( 'exploration-%s-%s' % (item.id, version))) if commit_log_model is None: continue regenerated_commit_log_model = regenerate_exp_commit_log_model( item, version) fields = [ 'user_id', 'commit_type', 'commit_message', 'commit_cmds', 'version', 'post_commit_status', 'post_commit_community_owned', 'post_commit_is_private', 'exploration_id' ] for field in fields: commit_model_field_val = getattr(commit_log_model, field) regenerated_commit_log_model_field_val = getattr( regenerated_commit_log_model, field) if commit_model_field_val != ( regenerated_commit_log_model_field_val): yield ( 'Mismatch between original model and regenerated model', '%s in original model: %s, in regenerated model: %s' % ( field, commit_model_field_val, regenerated_commit_log_model_field_val)) time_fields = ['created_on', 'last_updated'] for field in time_fields: commit_model_field_val = getattr(commit_log_model, field) regenerated_commit_log_model_field_val = getattr( regenerated_commit_log_model, field) max_allowed_val = regenerated_commit_log_model_field_val + ( datetime.timedelta(minutes=1)) min_allowed_val = regenerated_commit_log_model_field_val - ( datetime.timedelta(minutes=1)) if commit_model_field_val > max_allowed_val or ( commit_model_field_val < min_allowed_val): yield ( 'Mismatch between original model and regenerated model', '%s in original model: %s, in regenerated model: %s' % ( field, commit_model_field_val, regenerated_commit_log_model_field_val)) @staticmethod def reduce(key, values): yield (key, values)
36.785808
99
0.650532
ace7fc24f6afa5468439057d06aa7fbe63bcfabe
154
py
Python
assignment/assignment3/hw3a_template.py
sowmyamanojna/BT3051-Data-Structures-and-Algorithms
09c17e42c2e173a6ab10339f08fbc1505db8ea56
[ "MIT" ]
1
2021-05-13T13:10:42.000Z
2021-05-13T13:10:42.000Z
assignment/assignment3/hw3a_template.py
sowmyamanojna/BT3051-Data-Structures-and-Algorithms
09c17e42c2e173a6ab10339f08fbc1505db8ea56
[ "MIT" ]
null
null
null
assignment/assignment3/hw3a_template.py
sowmyamanojna/BT3051-Data-Structures-and-Algorithms
09c17e42c2e173a6ab10339f08fbc1505db8ea56
[ "MIT" ]
null
null
null
#Header information def RegularLattice(nodes, k): #function body #statements to plot the lattice if __name__ == '__main__': RegularLattice(50,3)
11.846154
32
0.733766
ace7fd16750bc479900b206b6029a884b62a1798
1,057
py
Python
operator_api/ledger/migrations/0045_tokenpair.py
liquidity-network/nocust-hub
76f49f9b8a6c264fcbe9e0c110e98031d463c0a8
[ "MIT" ]
1
2021-08-04T06:09:46.000Z
2021-08-04T06:09:46.000Z
operator_api/ledger/migrations/0045_tokenpair.py
liquidity-network/nocust-hub
76f49f9b8a6c264fcbe9e0c110e98031d463c0a8
[ "MIT" ]
8
2020-11-01T19:48:21.000Z
2022-02-10T14:12:25.000Z
operator_api/ledger/migrations/0045_tokenpair.py
liquidity-network/nocust-hub
76f49f9b8a6c264fcbe9e0c110e98031d463c0a8
[ "MIT" ]
3
2020-11-01T15:59:56.000Z
2021-09-16T07:18:18.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.11.8 on 2019-01-21 14:57 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('ledger', '0044_auto_20180930_1027'), ] operations = [ migrations.CreateModel( name='TokenPair', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('conduit', models.CharField(blank=True, max_length=40, unique=True)), ('token_from', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='from_pairs', to='ledger.Token')), ('token_to', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='to_pairs', to='ledger.Token')), ], ), ]
36.448276
96
0.558184
ace7fdefdf813fb4995b99309a1ea8526ff805dc
3,086
py
Python
Semester 2/Programs/PyShop/pyshop/settings.py
sufiyaanusmani/FAST-NUCES
885fa821fada467884fe37fd75a3ced5af3d9e47
[ "MIT" ]
null
null
null
Semester 2/Programs/PyShop/pyshop/settings.py
sufiyaanusmani/FAST-NUCES
885fa821fada467884fe37fd75a3ced5af3d9e47
[ "MIT" ]
null
null
null
Semester 2/Programs/PyShop/pyshop/settings.py
sufiyaanusmani/FAST-NUCES
885fa821fada467884fe37fd75a3ced5af3d9e47
[ "MIT" ]
null
null
null
""" Django settings for pyshop project. Generated by 'django-admin startproject' using Django 2.1. For more information on this file, see https://docs.djangoproject.com/en/2.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.1/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '%l3rl5i-*rvrqw+5gl(m2$jz$#=7#wwb%6&%do$ks5e!#707&+' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'pyshop.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'pyshop.wsgi.application' # Database # https://docs.djangoproject.com/en/2.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.1/howto/static-files/ STATIC_URL = '/static/'
25.504132
91
0.694102
ace7fe77c78dfd782f576a855b16c790bec03b1f
1,625
py
Python
exchange_sockets/exchange_websocket.py
SpiralDevelopment/crypto-hft-data
205f01fd555eab4f636ffbb701dfcde53d27becc
[ "MIT" ]
31
2020-07-20T14:11:39.000Z
2022-03-17T03:18:33.000Z
exchange_sockets/exchange_websocket.py
SpiralDevelopment/crypto-hft-data
205f01fd555eab4f636ffbb701dfcde53d27becc
[ "MIT" ]
null
null
null
exchange_sockets/exchange_websocket.py
SpiralDevelopment/crypto-hft-data
205f01fd555eab4f636ffbb701dfcde53d27becc
[ "MIT" ]
11
2020-07-20T14:11:52.000Z
2022-03-14T04:20:19.000Z
from singletones.custom_logger import MyLogger from managers.file_manager import FileManager import platform from time import time logger = MyLogger() class ExchangeWebSocket(object): def __init__(self, exchange, pairs_n_streams): if pairs_n_streams: self.pairs_n_streams = pairs_n_streams else: raise Exception('pairs and streams/channels are empty') self.exited = False self.on_error = False self.ws = None self.last_msg_time = 0 self.exchange = exchange.lower() self.depth_last_update = {} self.file_manager = FileManager() self.file_manager.create_dirs(self.exchange, self.pairs_n_streams) self.possible_streams = [] self.node = platform.node() self.max_delay = 90 def start_multiple_websocket(self, init_streams=True): logger.info('Starting multiple websocket for %s', self.exchange) if init_streams: self.init_streams() self.on_error = False self.last_msg_time = int(time()) def get_possible_streams(self): if len(self.possible_streams) > 0: return self.possible_streams else: raise NotImplementedError('Possible streams are not implemented') def has_stream(self, streams): for stream in streams.split(','): if stream not in self.get_possible_streams(): return False return True def init_streams(self): raise NotImplementedError('init_streams') def close_socket(self): raise NotImplementedError('close_socket')
29.545455
77
0.654769
ace7fedfb726a8d371159b8738670f8182403057
2,579
py
Python
ble2mqtt/devices/presence.py
snickerjp/ble2mqtt
e45ca8ac20ec592f05cc035b5abc2b12ca8b0fb7
[ "MIT" ]
33
2020-11-10T07:04:12.000Z
2022-01-29T16:19:46.000Z
ble2mqtt/devices/presence.py
snickerjp/ble2mqtt
e45ca8ac20ec592f05cc035b5abc2b12ca8b0fb7
[ "MIT" ]
27
2020-12-27T08:51:24.000Z
2022-03-21T11:06:24.000Z
ble2mqtt/devices/presence.py
snickerjp/ble2mqtt
e45ca8ac20ec592f05cc035b5abc2b12ca8b0fb7
[ "MIT" ]
12
2021-01-06T08:13:14.000Z
2022-03-21T16:38:01.000Z
import logging import typing as ty from dataclasses import dataclass from datetime import datetime, timedelta from bleak.backends.device import BLEDevice from .base import BINARY_SENSOR_DOMAIN, DEVICE_TRACKER_DOMAIN, Sensor _LOGGER = logging.getLogger(__name__) @dataclass class SensorState: presence: bool = False last_check: ty.Optional[datetime] = None @property def device_tracker(self): return 'home' if self.presence else 'not_home' class Presence(Sensor): NAME = 'presence' SENSOR_CLASS = SensorState SUPPORT_PASSIVE = True SUPPORT_ACTIVE = False MANUFACTURER = 'Generic' THRESHOLD = 300 # if no activity more than THRESHOLD, consider presence=OFF PASSIVE_SLEEP_INTERVAL = 1 SEND_DATA_PERIOD = 60 def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) cls = self.SENSOR_CLASS self._state: cls = None self._threshold = int(kwargs.get('threshold', self.THRESHOLD)) @property def entities(self): return { BINARY_SENSOR_DOMAIN: [ { 'name': 'presence', 'device_class': 'presence', }, ], DEVICE_TRACKER_DOMAIN: [ { 'name': 'device_tracker', }, ], } def handle_advert(self, scanned_device: BLEDevice, adv_data): self._state = self.SENSOR_CLASS( presence=True, last_check=datetime.now(), ) _LOGGER.debug( f'Advert received for {self}, current state: {self._state}', ) async def handle_passive(self, *args, **kwargs): self.last_sent_value = None self.last_sent_time = None await super().handle_passive(*args, **kwargs) async def do_passive_loop(self, publish_topic): if self._state.presence and \ self._state.last_check + \ timedelta(seconds=self._threshold) < datetime.now(): self._state.presence = False # send if changed or update value every SEND_DATA_PERIOD secs if self.last_sent_value is None or \ self.last_sent_value != self._state.presence or \ (datetime.now() - self.last_sent_time).seconds > \ self.SEND_DATA_PERIOD: _LOGGER.debug(f'Try publish {self._state}') await self._notify_state(publish_topic) self.last_sent_value = self._state.presence self.last_sent_time = datetime.now()
30.702381
80
0.606437
ace7ff59b397196a4721fe07cb78ebde9835c62e
3,534
py
Python
qiskit/transpiler/passes/basis/unroll_custom_definitions.py
gadial/qiskit-terra
0fc83f44a6e80969875c738b2cee7bc33223e45f
[ "Apache-2.0" ]
1
2021-10-05T11:56:53.000Z
2021-10-05T11:56:53.000Z
qiskit/transpiler/passes/basis/unroll_custom_definitions.py
gadial/qiskit-terra
0fc83f44a6e80969875c738b2cee7bc33223e45f
[ "Apache-2.0" ]
24
2021-01-27T08:20:27.000Z
2021-07-06T09:42:28.000Z
qiskit/transpiler/passes/basis/unroll_custom_definitions.py
gadial/qiskit-terra
0fc83f44a6e80969875c738b2cee7bc33223e45f
[ "Apache-2.0" ]
4
2021-10-05T12:07:27.000Z
2022-01-28T18:37:28.000Z
# This code is part of Qiskit. # # (C) Copyright IBM 2017, 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Unrolls instructions with custom definitions.""" from qiskit.exceptions import QiskitError from qiskit.transpiler.basepasses import TransformationPass from qiskit.circuit import ControlledGate from qiskit.converters.circuit_to_dag import circuit_to_dag class UnrollCustomDefinitions(TransformationPass): """Unrolls instructions with custom definitions.""" def __init__(self, equivalence_library, basis_gates): """Unrolls instructions with custom definitions. Args: equivalence_library (EquivalenceLibrary): The equivalence library which will be used by the BasisTranslator pass. (Instructions in this library will not be unrolled by this pass.) basis_gates (list[str]): Target basis names to unroll to, e.g. `['u3', 'cx']`. """ super().__init__() self._equiv_lib = equivalence_library self._basis_gates = basis_gates def run(self, dag): """Run the UnrollCustomDefinitions pass on `dag`. Args: dag (DAGCircuit): input dag Raises: QiskitError: if unable to unroll given the basis due to undefined decomposition rules (such as a bad basis) or excessive recursion. Returns: DAGCircuit: output unrolled dag """ if self._basis_gates is None: return dag basic_insts = {'measure', 'reset', 'barrier', 'snapshot', 'delay'} device_insts = basic_insts | set(self._basis_gates) for node in dag.op_nodes(): if node.op._directive: continue if dag.has_calibration_for(node): continue if node.name in device_insts or self._equiv_lib.has_entry(node.op): if isinstance(node.op, ControlledGate) and node.op._open_ctrl: pass else: continue try: rule = node.op.definition.data except TypeError as err: raise QiskitError(f'Error decomposing node {node.name}: {err}') from err except AttributeError: # definition is None rule = None if not rule: if rule == []: dag.remove_op_node(node) continue # opaque node raise QiskitError("Cannot unroll the circuit to the given basis, %s. " "Instruction %s not found in equivalence library " "and no rule found to expand." % (str(self._basis_gates), node.op.name)) decomposition = circuit_to_dag(node.op.definition) unrolled_dag = UnrollCustomDefinitions(self._equiv_lib, self._basis_gates).run( decomposition) dag.substitute_node_with_dag(node, unrolled_dag) return dag
36.43299
90
0.59451
ace7ffdf2777556927e9e0167728ccb76073aef1
739
py
Python
tests/unit/providers/async/test_factory_aggregate_py36.py
YelloFam/python-dependency-injector
541131e33858ee1b8b5a7590d2bb9f929740ea1e
[ "BSD-3-Clause" ]
null
null
null
tests/unit/providers/async/test_factory_aggregate_py36.py
YelloFam/python-dependency-injector
541131e33858ee1b8b5a7590d2bb9f929740ea1e
[ "BSD-3-Clause" ]
null
null
null
tests/unit/providers/async/test_factory_aggregate_py36.py
YelloFam/python-dependency-injector
541131e33858ee1b8b5a7590d2bb9f929740ea1e
[ "BSD-3-Clause" ]
null
null
null
"""FactoryAggregate provider async mode tests.""" from dependency_injector import providers from pytest import mark @mark.asyncio async def test_async_mode(): object1 = object() object2 = object() async def _get_object1(): return object1 def _get_object2(): return object2 provider = providers.FactoryAggregate( object1=providers.Factory(_get_object1), object2=providers.Factory(_get_object2), ) assert provider.is_async_mode_undefined() is True created_object1 = await provider("object1") assert created_object1 is object1 assert provider.is_async_mode_enabled() is True created_object2 = await provider("object2") assert created_object2 is object2
23.83871
53
0.722598
ace7ffe63b2d44a132d6708df871f43e9111c509
5,197
py
Python
tzlocal/tests.py
jean/tzlocal
37b49de83103f81c5e3f414eacf265972b85f9af
[ "MIT" ]
9
2018-06-10T20:32:10.000Z
2021-11-21T03:54:41.000Z
tzlocal/tests.py
jean/tzlocal
37b49de83103f81c5e3f414eacf265972b85f9af
[ "MIT" ]
479
2019-07-30T11:47:46.000Z
2021-08-03T10:43:11.000Z
tzlocal/tests.py
jean/tzlocal
37b49de83103f81c5e3f414eacf265972b85f9af
[ "MIT" ]
21
2019-03-11T04:25:23.000Z
2022-02-03T08:54:33.000Z
import mock import os import pytz import sys import tzlocal.unix import unittest from datetime import datetime class TzLocalTests(unittest.TestCase): def setUp(self): if 'TZ' in os.environ: del os.environ['TZ'] self.path = os.path.split(__file__)[0] def test_env(self): tz_harare = tzlocal.unix._tz_from_env(':Africa/Harare') self.assertEqual(tz_harare.zone, 'Africa/Harare') # Some Unices allow this as well, so we must allow it: tz_harare = tzlocal.unix._tz_from_env('Africa/Harare') self.assertEqual(tz_harare.zone, 'Africa/Harare') tz_local = tzlocal.unix._tz_from_env(':' + os.path.join(self.path, 'test_data', 'Harare')) self.assertEqual(tz_local.zone, 'local') # Make sure the local timezone is the same as the Harare one above. # We test this with a past date, so that we don't run into future changes # of the Harare timezone. dt = datetime(2012, 1, 1, 5) self.assertEqual(tz_harare.localize(dt), tz_local.localize(dt)) # Non-zoneinfo timezones are not supported in the TZ environment. self.assertRaises(pytz.UnknownTimeZoneError, tzlocal.unix._tz_from_env, 'GMT+03:00') # Test the _try function os.environ['TZ'] = 'Africa/Harare' tz_harare = tzlocal.unix._try_tz_from_env() self.assertEqual(tz_harare.zone, 'Africa/Harare') # With a zone that doesn't exist os.environ['TZ'] = 'Just Nonsense' tz_harare = tzlocal.unix._try_tz_from_env() self.assertIsNone(tz_harare) def test_timezone(self): # Most versions of Ubuntu tz = tzlocal.unix._get_localzone(_root=os.path.join(self.path, 'test_data', 'timezone')) self.assertEqual(tz.zone, 'Africa/Harare') def test_zone_setting(self): # A ZONE setting in /etc/sysconfig/clock, f ex CentOS tz = tzlocal.unix._get_localzone(_root=os.path.join(self.path, 'test_data', 'zone_setting')) self.assertEqual(tz.zone, 'Africa/Harare') def test_timezone_setting(self): # A ZONE setting in /etc/conf.d/clock, f ex Gentoo tz = tzlocal.unix._get_localzone(_root=os.path.join(self.path, 'test_data', 'timezone_setting')) self.assertEqual(tz.zone, 'Africa/Harare') def test_symlink_localtime(self): # A ZONE setting in the target path of a symbolic linked localtime, f ex systemd distributions tz = tzlocal.unix._get_localzone(_root=os.path.join(self.path, 'test_data', 'symlink_localtime')) self.assertEqual(tz.zone, 'Africa/Harare') def test_vardbzoneinfo_setting(self): # A ZONE setting in /etc/conf.d/clock, f ex Gentoo tz = tzlocal.unix._get_localzone(_root=os.path.join(self.path, 'test_data', 'vardbzoneinfo')) self.assertEqual(tz.zone, 'Africa/Harare') def test_only_localtime(self): tz = tzlocal.unix._get_localzone(_root=os.path.join(self.path, 'test_data', 'localtime')) self.assertEqual(tz.zone, 'local') dt = datetime(2012, 1, 1, 5) self.assertEqual(pytz.timezone('Africa/Harare').localize(dt), tz.localize(dt)) def test_get_reload(self): os.environ['TZ'] = 'Africa/Harare' tz_harare = tzlocal.unix.get_localzone() self.assertEqual(tz_harare.zone, 'Africa/Harare') # Changing the TZ makes no difference, because it's cached os.environ['TZ'] = 'Africa/Johannesburg' tz_harare = tzlocal.unix.get_localzone() self.assertEqual(tz_harare.zone, 'Africa/Harare') # So we reload it tz_harare = tzlocal.unix.reload_localzone() self.assertEqual(tz_harare.zone, 'Africa/Johannesburg') def test_fail(self): with self.assertRaises(pytz.exceptions.UnknownTimeZoneError): tz = tzlocal.unix._get_localzone(_root=os.path.join(self.path, 'test_data')) if sys.platform == 'win32': import tzlocal.win32 class TzWin32Tests(unittest.TestCase): def test_win32(self): tzlocal.win32.get_localzone() else: class TzWin32Tests(unittest.TestCase): def test_win32_on_unix(self): # Yes, winreg is all mocked out, but this test means we at least # catch syntax errors, etc. winreg = mock.MagicMock() winreg.OpenKey = mock.MagicMock() winreg.OpenKey.close = mock.MagicMock() winreg.QueryInfoKey = mock.MagicMock(return_value=(1, 1)) winreg.EnumValue = mock.MagicMock( return_value=('TimeZoneKeyName','Belarus Standard Time')) winreg.EnumKey = mock.Mock(return_value='Bahia Standard Time') sys.modules['winreg'] = winreg import tzlocal.win32 tz = tzlocal.win32.get_localzone() self.assertEqual(tz.zone, 'Europe/Minsk') tzlocal.win32.valuestodict = mock.Mock(return_value={ 'StandardName': 'Mocked Standard Time', 'Std': 'Mocked Standard Time', }) tz = tzlocal.win32.reload_localzone() self.assertEqual(tz.zone, 'America/Bahia') if __name__ == '__main__': unittest.main()
38.496296
105
0.651722
ace80076f0512494c0891d5b8ae137491a3f4ccd
1,536
py
Python
builders/specs/cli/PyCOMPSsCLIResources/setup.py
eflows4hpc/compss
c497f6d34722103c6c8f83ebc314b495573ce054
[ "Apache-2.0" ]
null
null
null
builders/specs/cli/PyCOMPSsCLIResources/setup.py
eflows4hpc/compss
c497f6d34722103c6c8f83ebc314b495573ce054
[ "Apache-2.0" ]
null
null
null
builders/specs/cli/PyCOMPSsCLIResources/setup.py
eflows4hpc/compss
c497f6d34722103c6c8f83ebc314b495573ce054
[ "Apache-2.0" ]
null
null
null
import setuptools setuptools.setup( # Metadata name="pycompss-cli", version=open("VERSION.txt").read().strip(), description="PyCOMPSs cli", long_description=open("README.rst").read(), long_description_content_type="text/x-rst", author="Workflows and Distributed Computing Group (WDC) - Barcelona Supercomputing Center (BSC)", author_email="support-compss@bsc.es", url="https://compss.bsc.es", # License license="Apache 2.0", # Build include_package_data=True, packages=setuptools.find_packages(), classifiers=["Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: POSIX :: Linux", "Operating System :: Unix", "Operating System :: MacOS", "Programming Language :: Python :: 3 :: Only", "Topic :: Software Development", "Topic :: Scientific/Engineering", "Topic :: System :: Distributed Computing", "Topic :: Utilities"], install_requires=["setuptools"], # Executable entry_points={ "console_scripts": [ "compss=pycompss_cli.cli.compss:main", "dislib=pycompss_cli.cli.dislib:main", "pycompss=pycompss_cli.cli.pycompss:main", ], } )
34.909091
101
0.58138
ace8014d1cc4715098f6ea582cc65766c86698d5
7,870
py
Python
tests/forms_tests/field_tests/test_urlfield.py
Yoann-Vie/esgi-hearthstone
115d03426c7e8e80d89883b78ac72114c29bed12
[ "PSF-2.0", "BSD-3-Clause" ]
null
null
null
tests/forms_tests/field_tests/test_urlfield.py
Yoann-Vie/esgi-hearthstone
115d03426c7e8e80d89883b78ac72114c29bed12
[ "PSF-2.0", "BSD-3-Clause" ]
null
null
null
tests/forms_tests/field_tests/test_urlfield.py
Yoann-Vie/esgi-hearthstone
115d03426c7e8e80d89883b78ac72114c29bed12
[ "PSF-2.0", "BSD-3-Clause" ]
null
null
null
from django.forms import URLField, ValidationError from django.test import SimpleTestCase from . import FormFieldAssertionsMixin class URLFieldTest(FormFieldAssertionsMixin, SimpleTestCase): def test_urlfield_1(self): f = URLField() self.assertWidgetRendersTo(f, '<input type="url" name="f" id="id_f" required>') with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean('') with self.assertRaisesMessage(ValidationError, "'This field is required.'"): f.clean(None) self.assertEqual('http://localhost', f.clean('http://localhost')) self.assertEqual('http://example.com', f.clean('http://example.com')) self.assertEqual('http://example.com.', f.clean('http://example.com.')) self.assertEqual('http://www.example.com', f.clean('http://www.example.com')) self.assertEqual('http://www.example.com:8000/test', f.clean('http://www.example.com:8000/test')) self.assertEqual('http://valid-with-hyphens.com', f.clean('valid-with-hyphens.com')) self.assertEqual('http://subdomain.domain.com', f.clean('subdomain.domain.com')) self.assertEqual('http://200.8.9.10', f.clean('http://200.8.9.10')) self.assertEqual('http://200.8.9.10:8000/test', f.clean('http://200.8.9.10:8000/test')) with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('foo') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://example') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://example.') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('com.') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('.') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://.com') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://invalid-.com') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://-invalid.com') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://inv-.alid-.com') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://inv-.-alid.com') self.assertEqual('http://valid-----hyphens.com', f.clean('http://valid-----hyphens.com')) self.assertEqual( 'http://some.idn.xyz\xe4\xf6\xfc\xdfabc.domain.com:123/blah', f.clean('http://some.idn.xyzäöüßabc.domain.com:123/blah') ) self.assertEqual( 'http://www.example.com/s/http://code.djangoproject.com/ticket/13804', f.clean('www.example.com/s/http://code.djangoproject.com/ticket/13804') ) with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('[a') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://[a') def test_url_regex_ticket11198(self): f = URLField() # hangs "forever" if catastrophic backtracking in ticket:#11198 not fixed with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://%s' % ("X" * 200,)) # a second test, to make sure the problem is really addressed, even on # domains that don't fail the domain label length check in the regex with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://%s' % ("X" * 60,)) def test_urlfield_2(self): f = URLField(required=False) self.assertEqual('', f.clean('')) self.assertEqual('', f.clean(None)) self.assertEqual('http://example.com', f.clean('http://example.com')) self.assertEqual('http://www.example.com', f.clean('http://www.example.com')) with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('foo') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://example') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://example.') with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean('http://.com') def test_urlfield_5(self): f = URLField(min_length=15, max_length=20) self.assertWidgetRendersTo(f, '<input id="id_f" type="url" name="f" maxlength="20" minlength="15" required>') with self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 15 characters (it has 12).'"): f.clean('http://f.com') self.assertEqual('http://example.com', f.clean('http://example.com')) with self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 20 characters (it has 37).'"): f.clean('http://abcdefghijklmnopqrstuvwxyz.com') def test_urlfield_6(self): f = URLField(required=False) self.assertEqual('http://example.com', f.clean('example.com')) self.assertEqual('', f.clean('')) self.assertEqual('https://example.com', f.clean('https://example.com')) def test_urlfield_7(self): f = URLField() self.assertEqual('http://example.com', f.clean('http://example.com')) self.assertEqual('http://example.com/test', f.clean('http://example.com/test')) self.assertEqual( 'http://example.com?some_param=some_value', f.clean('http://example.com?some_param=some_value') ) def test_urlfield_9(self): f = URLField() urls = ( 'http://עברית.idn.icann.org/', 'http://sãopaulo.com/', 'http://sãopaulo.com.br/', 'http://пример.испытание/', 'http://مثال.إختبار/', 'http://例子.测试/', 'http://例子.測試/', 'http://उदाहरण.परीक्षा/', 'http://例え.テスト/', 'http://مثال.آزمایشی/', 'http://실례.테스트/', 'http://العربية.idn.icann.org/', ) for url in urls: with self.subTest(url=url): # Valid IDN self.assertEqual(url, f.clean(url)) def test_urlfield_10(self): """URLField correctly validates IPv6 (#18779).""" f = URLField() urls = ( 'http://[12:34::3a53]/', 'http://[a34:9238::]:8080/', ) for url in urls: with self.subTest(url=url): self.assertEqual(url, f.clean(url)) def test_urlfield_not_string(self): f = URLField(required=False) with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"): f.clean(23) def test_urlfield_normalization(self): f = URLField() self.assertEqual(f.clean('http://example.com/ '), 'http://example.com/') def test_urlfield_strip_on_none_value(self): f = URLField(required=False, empty_value=None) self.assertIsNone(f.clean(None)) def test_urlfield_unable_to_set_strip_kwarg(self): msg = "__init__() got multiple values for keyword argument 'strip'" with self.assertRaisesMessage(TypeError, msg): URLField(strip=False)
48.580247
119
0.592376
ace801c9234a60a1de72dd7ce0082afe200f316d
2,992
py
Python
reconbf/modules/test_firewall.py
fallenpegasus/reconbf
bfd15bef549f011a3de885c3267d4f718223b798
[ "Apache-2.0" ]
45
2016-08-12T21:37:25.000Z
2022-03-29T00:21:29.000Z
reconbf/modules/test_firewall.py
fallenpegasus/reconbf
bfd15bef549f011a3de885c3267d4f718223b798
[ "Apache-2.0" ]
20
2016-08-11T07:42:28.000Z
2016-09-09T13:33:47.000Z
reconbf/modules/test_firewall.py
fallenpegasus/reconbf
bfd15bef549f011a3de885c3267d4f718223b798
[ "Apache-2.0" ]
6
2016-08-25T06:31:38.000Z
2019-09-11T04:29:36.000Z
# Copyright 2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from reconbf.lib import test_class from reconbf.lib.result import Result from reconbf.lib.result import TestResult from reconbf.lib import utils import collections import subprocess def _list_rules(): try: output = subprocess.check_output(['iptables-save']) except (IOError, subprocess.CalledProcessError): # cannot get the list of rules for some reason return None lines = [line.strip() for line in output.splitlines() if not line.startswith(b'#')] return lines def _get_default_policy(rules): """Get the default policy for each table/chain.""" tables = collections.defaultdict(dict) current_table = None for rule in rules: if rule.startswith(b'*'): current_table = rule[1:] if rule.startswith(b':'): parts = rule[1:].split() tables[current_table][parts[0]] = parts[1] return tables @test_class.explanation(""" Protection name: Firewall whitelisting Check: Make sure that the firewall is configured to reject packets by default. Purpose: Creating whitelists is usually more secure than blacklists. Defaulting to dropping unknown traffic is a safer option in case of missed rules. """) def firewall_whitelisting(): if not utils.have_command('iptables-save'): return TestResult(Result.SKIP, "iptables not available") rules = _list_rules() if rules is None: return TestResult(Result.SKIP, "Cannot retrieve iptables rules") targets = _get_default_policy(rules) if b'filter' not in targets: return TestResult(Result.SKIP, "Cannot find the filter table") failures = [] filter_table = targets[b'filter'] if b'INPUT' not in filter_table: return TestResult(Result.SKIP, "Filter table doesn't include INPUT") if b'FORWARD' not in filter_table: return TestResult(Result.SKIP, "Filter table doesn't include FORWARD") if filter_table[b'INPUT'] == b'ACCEPT': failures.append('INPUT') if filter_table[b'FORWARD'] == b'ACCEPT': failures.append('FORWARD') if failures: return TestResult(Result.FAIL, "The following chains accept packets by " "default: %s" % ', '.join(failures)) else: return TestResult(Result.PASS, "Filter chains whitelist by default")
32.172043
78
0.684158
ace802bfc44b061d8721afd546482a038f783199
1,441
py
Python
tests/mr_spark_method_wordcount.py
ukwa/mrjob
091572e87bc24cc64be40278dd0f5c3617c98d4b
[ "Apache-2.0" ]
1,538
2015-01-02T10:22:17.000Z
2022-03-29T16:42:33.000Z
tests/mr_spark_method_wordcount.py
ukwa/mrjob
091572e87bc24cc64be40278dd0f5c3617c98d4b
[ "Apache-2.0" ]
1,027
2015-01-09T21:30:37.000Z
2022-02-26T18:21:42.000Z
tests/mr_spark_method_wordcount.py
ukwa/mrjob
091572e87bc24cc64be40278dd0f5c3617c98d4b
[ "Apache-2.0" ]
403
2015-01-06T15:49:44.000Z
2022-03-29T16:42:34.000Z
# Copyright 2016 Yelp # Copyright 2019 Yelp # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from operator import add from mrjob.job import MRJob WORD_RE = re.compile(r"[\w']+") class MRSparkMethodWordcount(MRJob): # like MRSparkWordcount, except that we pass methods to Spark, # which means we have to be able to serialize *self*. def spark(self, input_path, output_path): # Spark may not be available where script is launched from pyspark import SparkContext sc = SparkContext(appName='mrjob Spark wordcount script') lines = sc.textFile(input_path) counts = ( lines.flatMap(self.line_to_words) .map(lambda word: (word, 1)) .reduceByKey(add)) counts.saveAsTextFile(output_path) sc.stop() def line_to_words(self, line): return WORD_RE.findall(line) if __name__ == '__main__': MRSparkMethodWordcount.run()
28.82
74
0.70229
ace802ddf6465d3ab13dd9b42ecaba2204431a5e
3,116
py
Python
src/powerbidedicated/azext_powerbidedicated/vendored_sdks/powerbidedicated/aio/_power_bi_dedicated.py
haroonf/azure-cli-extensions
61c044d34c224372f186934fa7c9313f1cd3a525
[ "MIT" ]
207
2017-11-29T06:59:41.000Z
2022-03-31T10:00:53.000Z
src/powerbidedicated/azext_powerbidedicated/vendored_sdks/powerbidedicated/aio/_power_bi_dedicated.py
haroonf/azure-cli-extensions
61c044d34c224372f186934fa7c9313f1cd3a525
[ "MIT" ]
4,061
2017-10-27T23:19:56.000Z
2022-03-31T23:18:30.000Z
src/powerbidedicated/azext_powerbidedicated/vendored_sdks/powerbidedicated/aio/_power_bi_dedicated.py
haroonf/azure-cli-extensions
61c044d34c224372f186934fa7c9313f1cd3a525
[ "MIT" ]
802
2017-10-11T17:36:26.000Z
2022-03-31T22:24:32.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Optional, TYPE_CHECKING from azure.mgmt.core import AsyncARMPipelineClient from msrest import Deserializer, Serializer if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials_async import AsyncTokenCredential from ._configuration import PowerBIDedicatedConfiguration from .operations import CapacitiesOperations from .operations import Operations from .. import models class PowerBIDedicated(object): """PowerBI Dedicated Web API provides a RESTful set of web services that enables users to create, retrieve, update, and delete Power BI dedicated capacities. :ivar capacities: CapacitiesOperations operations :vartype capacities: azure.mgmt.powerbidedicated.aio.operations.CapacitiesOperations :ivar operations: Operations operations :vartype operations: azure.mgmt.powerbidedicated.aio.operations.Operations :param credential: Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: A unique identifier for a Microsoft Azure subscription. The subscription ID forms part of the URI for every service call. :type subscription_id: str :param str base_url: Service URL :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. """ def __init__( self, credential: "AsyncTokenCredential", subscription_id: str, base_url: Optional[str] = None, **kwargs: Any ) -> None: if not base_url: base_url = 'https://management.azure.com' self._config = PowerBIDedicatedConfiguration(credential, subscription_id, **kwargs) self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._serialize.client_side_validation = False self._deserialize = Deserializer(client_models) self.capacities = CapacitiesOperations( self._client, self._config, self._serialize, self._deserialize) self.operations = Operations( self._client, self._config, self._serialize, self._deserialize) async def close(self) -> None: await self._client.close() async def __aenter__(self) -> "PowerBIDedicated": await self._client.__aenter__() return self async def __aexit__(self, *exc_details) -> None: await self._client.__aexit__(*exc_details)
44.514286
161
0.70475
ace80341686fc49faea23a90f1cf32cb52c82209
14,397
py
Python
onmt/translate/translation_server.py
oserikov/opennmt-inspection
0686a43aceb2620272ecda44ad08ba3bc9cb6e9b
[ "MIT" ]
2
2019-02-14T20:29:36.000Z
2020-03-11T13:22:08.000Z
onmt/translate/translation_server.py
oserikov/opennmt-inspection
0686a43aceb2620272ecda44ad08ba3bc9cb6e9b
[ "MIT" ]
6
2020-01-28T22:48:37.000Z
2020-08-17T16:09:03.000Z
onmt/translate/translation_server.py
oserikov/opennmt-inspection
0686a43aceb2620272ecda44ad08ba3bc9cb6e9b
[ "MIT" ]
1
2019-03-18T00:19:54.000Z
2019-03-18T00:19:54.000Z
#!/usr/bin/env python """ REST Translation server """ from __future__ import print_function import sys import os import argparse import time import json import threading import torch from onmt.translate.translator import build_translator import onmt.opts class Timer: def __init__(self, start=False): self.stime = -1 self.prev = -1 self.times = {} if start: self.start() def start(self): self.stime = time.time() self.prev = self.stime self.times = {} def tick(self, name=None, tot=False): t = time.time() if not tot: elapsed = t - self.prev else: elapsed = t - self.stime self.prev = t if name is not None: self.times[name] = elapsed return elapsed class ServerModelError(Exception): pass class TranslationServer(): def __init__(self): self.models = {} self.next_id = 0 def start(self, config_file): """Read the config file and pre-/load the models """ self.config_file = config_file with open(self.config_file) as f: self.confs = json.load(f) self.models_root = self.confs.get('models_root', './available_models') for i, conf in enumerate(self.confs["models"]): if "model" not in conf: raise ValueError("""Incorrect config file: missing 'model' parameter for model #%d""" % i) kwargs = {'timeout': conf.get('timeout', None), 'load': conf.get('load', None), 'tokenizer_opt': conf.get('tokenizer', None), 'on_timeout': conf.get('on_timeout', None), 'model_root': conf.get('model_root', self.models_root) } kwargs = {k: v for (k, v) in kwargs.items() if v is not None} model_id = conf.get("id", None) opt = conf["opt"] opt["model"] = conf["model"] self.preload_model(opt, model_id=model_id, **kwargs) def clone_model(self, model_id, opt, timeout=-1): """Clone a model `model_id`. Different options may be passed. If `opt` is None, it will use the same set of options """ if model_id in self.models: if opt is None: opt = self.models[model_id].user_opt opt["model"] = self.models[model_id].opt.model return self.load_model(opt, timeout) else: raise ServerModelError("No such model '%s'" % str(model_id)) def load_model(self, opt, model_id=None, **model_kwargs): """Loading a model given a set of options """ model_id = self.preload_model(opt, model_id=model_id, **model_kwargs) load_time = self.models[model_id].load_time return model_id, load_time def preload_model(self, opt, model_id=None, **model_kwargs): """Preloading the model: updating internal datastructure It will effectively load the model if `load` is set """ if model_id is not None: if model_id in self.models.keys(): raise ValueError("Model ID %d already exists" % model_id) else: model_id = self.next_id while model_id in self.models.keys(): model_id += 1 self.next_id = model_id + 1 print("Pre-loading model %d" % model_id) model = ServerModel(opt, model_id, **model_kwargs) self.models[model_id] = model return model_id def run(self, inputs): """Translate `inputs` We keep the same format as the Lua version i.e. [{"id": model_id, "src": "sequence to translate"},{ ...}] We use inputs[0]["id"] as the model id """ model_id = inputs[0].get("id", 0) if model_id in self.models and self.models[model_id] is not None: return self.models[model_id].run(inputs) else: print("Error No such model '%s'" % str(model_id)) raise ServerModelError("No such model '%s'" % str(model_id)) def unload_model(self, model_id): """Manually unload a model. It will free the memory and cancel the timer """ if model_id in self.models and self.models[model_id] is not None: self.models[model_id].unload() else: raise ServerModelError("No such model '%s'" % str(model_id)) def list_models(self): """Return the list of available models """ models = [] for _, model in self.models.items(): models += [model.to_dict()] return models class ServerModel: def __init__(self, opt, model_id, tokenizer_opt=None, load=False, timeout=-1, on_timeout="to_cpu", model_root="./"): """ Args: opt: (dict) options for the Translator model_id: (int) model id tokenizer_opt: (dict) options for the tokenizer or None load: (bool) whether to load the model during __init__ timeout: (int) seconds before running `do_timeout` Negative values means no timeout on_timeout: (str) in ["to_cpu", "unload"] set what to do on timeout (see function `do_timeout`) model_root: (str) path to the model directory it must contain de model and tokenizer file """ self.model_root = model_root self.opt = self.parse_opt(opt) if self.opt.n_best > 1: raise ValueError("Values of n_best > 1 are not supported") self.model_id = model_id self.tokenizer_opt = tokenizer_opt self.timeout = timeout self.on_timeout = on_timeout self.unload_timer = None self.user_opt = opt self.tokenizer = None if load: self.load() def parse_opt(self, opt): """Parse the option set passed by the user using `onmt.opts` Args: opt: (dict) options passed by the user Returns: opt: (Namespace) full set of options for the Translator """ prec_argv = sys.argv sys.argv = sys.argv[:1] parser = argparse.ArgumentParser() onmt.opts.translate_opts(parser) opt['model'] = os.path.join(self.model_root, opt['model']) opt['src'] = "dummy_src" for (k, v) in opt.items(): sys.argv += ['-%s' % k, str(v)] opt = parser.parse_args() opt.cuda = opt.gpu > -1 sys.argv = prec_argv return opt @property def loaded(self): return hasattr(self, 'translator') def load(self): timer = Timer() print("Loading model %d" % self.model_id) timer.start() try: self.translator = build_translator(self.opt, report_score=False, out_file=open(os.devnull, "w")) except RuntimeError as e: raise ServerModelError("Runtime Error: %s" % str(e)) timer.tick("model_loading") if self.tokenizer_opt is not None: print("Loading tokenizer") mandatory = ["type", "model"] for m in mandatory: if m not in self.tokenizer_opt: raise ValueError("Missing mandatory tokenizer option '%s'" % m) if self.tokenizer_opt['type'] == 'sentencepiece': import sentencepiece as spm sp = spm.SentencePieceProcessor() model_path = os.path.join(self.model_root, self.tokenizer_opt['model']) sp.Load(model_path) self.tokenizer = sp else: raise ValueError("Invalid value for tokenizer type") self.load_time = timer.tick() self.reset_unload_timer() def run(self, inputs, intervention=None): """Translate `inputs` using this model Args: inputs: [{"src": "..."},{"src": ...}] Returns: result: (list) translations times: (dict) containing times """ timer = Timer() print("\nRunning translation using %d" % self.model_id) timer.start() if not self.loaded: self.load() timer.tick(name="load") elif self.opt.cuda: self.to_gpu() timer.tick(name="to_gpu") texts = [] whitespace_segments = {} subsegment = {} sscount = 0 sslength = [] for (i, inp) in enumerate(inputs): src = inp['src'] lines = src.split("\n") subsegment[i] = slice(sscount, sscount + len(lines)) for line in lines: tok = self.maybe_tokenize(line) if len(''.join(line.split())) == 0: whitespace_segments[sscount] = line else: texts += [tok] sslength += [len(tok.split())] sscount += 1 timer.tick(name="writing") scores = [] predictions = [] if sscount > 0: try: scores, predictions = self.translator.translate( src_data_iter=texts, batch_size=self.opt.batch_size, intervention=intervention) except RuntimeError as e: raise ServerModelError("Runtime Error: %s" % str(e)) timer.tick(name="translation") print("""Using model #%d\t%d inputs (%d subsegment) \ttranslation time: %f""" % (self.model_id, len(subsegment), sscount, timer.times['translation'])) self.reset_unload_timer() # NOTE: translator returns lists of `n_best` list # we can ignore that (i.e. flatten lists) only because # we restrict `n_best=1` def flatten_list(_list): return sum(_list, []) results = flatten_list(predictions) scores = [score_tensor.item() for score_tensor in flatten_list(scores)] print("Translation Results: ", len(results)) if len(whitespace_segments) > 0: print("Whitespace segments: %d" % len(whitespace_segments)) for k in sorted(whitespace_segments.keys()): results.insert(k, whitespace_segments[k]) scores.insert(k, 0.0) results = ['\n'.join([self.maybe_detokenize(_) for _ in results[subsegment[i]]]) for i in sorted(subsegment.keys())] avg_scores = [sum([s * l for s, l in zip(scores[sub], sslength[sub])]) / sum(sslength[sub]) if sum(sslength[sub]) != 0 else 0.0 for k, sub in sorted(subsegment.items(), key=lambda x: x[0])] return results, avg_scores, self.opt.n_best, timer.times def do_timeout(self): """Timeout function that free GPU memory by moving the model to CPU or unloading it; depending on `self.on_timemout` value """ if self.on_timeout == "unload": print("Timeout: unloading model %d" % self.model_id) self.unload() if self.on_timeout == "to_cpu": print("Timeout: sending model %d to CPU" % self.model_id) self.to_cpu() def unload(self): print("Unloading model %d" % self.model_id) del self.translator if self.opt.cuda: torch.cuda.empty_cache() self.unload_timer = None def reset_unload_timer(self): if self.timeout < 0: return if self.unload_timer is not None: self.unload_timer.cancel() self.unload_timer = threading.Timer(self.timeout, self.do_timeout) self.unload_timer.start() def to_dict(self): hide_opt = ["model", "src"] d = {"model_id": self.model_id, "opt": {k: self.user_opt[k] for k in self.user_opt.keys() if k not in hide_opt}, "model": self.user_opt["model"], "loaded": self.loaded, "timeout": self.timeout, } if self.tokenizer_opt is not None: d["tokenizer"] = self.tokenizer_opt return d def to_cpu(self): """Move the model to CPU and clear CUDA cache """ self.translator.model.cpu() if self.opt.cuda: torch.cuda.empty_cache() def to_gpu(self): """Move the model to GPU """ torch.cuda.set_device(self.opt.gpu) self.translator.model.cuda() def maybe_tokenize(self, sequence): """Tokenize the sequence (or not) Same args/returns as `tokenize` """ if self.tokenizer_opt is not None: return self.tokenize(sequence) return sequence def tokenize(self, sequence): """Tokenize a single sequence Args: sequence: (str) the sequence to tokenize Returns: tok: (str) the tokenized sequence """ if self.tokenizer is None: raise ValueError("No tokenizer loaded") if self.tokenizer_opt["type"] == "sentencepiece": tok = self.tokenizer.EncodeAsPieces(sequence) tok = " ".join(tok) return tok def maybe_detokenize(self, sequence): """De-tokenize the sequence (or not) Same args/returns as `tokenize` """ if self.tokenizer_opt is not None: return self.detokenize(sequence) return sequence def detokenize(self, sequence): """Detokenize a single sequence Same args/returns as `tokenize` """ if self.tokenizer is None: raise ValueError("No tokenizer loaded") if self.tokenizer_opt["type"] == "sentencepiece": detok = self.tokenizer.DecodePieces(sequence.split()) return detok
33.559441
78
0.538654
ace803bd10a4492ac4d65b579c6939e7ea873760
9,939
py
Python
src/python_src/rh_transitions/load_training_datum.py
stevenjj/icra2020locomanipulation
414085b68cc1b3b24f7b920b543bba9d95350c16
[ "MIT" ]
5
2020-01-06T11:43:18.000Z
2021-12-14T22:59:09.000Z
src/python_src/rh_transitions/load_training_datum.py
stevenjj/icra2020locomanipulation
414085b68cc1b3b24f7b920b543bba9d95350c16
[ "MIT" ]
null
null
null
src/python_src/rh_transitions/load_training_datum.py
stevenjj/icra2020locomanipulation
414085b68cc1b3b24f7b920b543bba9d95350c16
[ "MIT" ]
2
2020-09-03T16:08:34.000Z
2022-02-17T11:13:49.000Z
#!/usr/bin/env python # This will execute the python script in the current environment import yaml import sys import numpy as np # Ensure that we have sourced our virtual environment #import tensorflow as tf def convert_quat_to_3vec(quat): # Normalize and extract quaternion x, y, z, w = quat/np.linalg.norm(quat) # Define vector part and its norm qv = np.array([x,y,z]) n = np.linalg.norm(qv) eps = 1e-6 m_angle = 0.0 m_axis = np.array([0,0,0]) # Extract axis angle components if the norm is greater than epsilon if (n > eps): m_angle = 2.0*np.arctan2(n, w) if (w < 0): n = -n m_axis = qv/n return m_angle*m_axis class ContactTransitionData: def __init__(self, yaml_file_path=""): self.stance_origin_to_num = { "left_foot": 0, "right_foot":1 } self.manipulation_type_to_num = { "left_hand": 0, "right_hand":1, "both_hands":2 } # use stance origin data # use right hand data # use left hand data self.use_stance_origin_data = True self.use_right_hand_data = True self.use_left_hand_data = False self.path = "" self.stance_origin = "" self.stance_origin_num = 0 self.manipulation_type = "" self.manipulation_type_num = 0 self.result = 0 self.swing_foot_starting_position = np.array([0,0,0], dtype='f') #x, y, z self.swing_foot_starting_orientation_quat = np.array([0,0,0,0], dtype='f') #x,y,z,w self.swing_foot_starting_orientation_vec = np.array([0,0,0], dtype='f') #rx,ry,rz self.pelvis_starting_position = np.array([0,0,0], dtype='f') #x, y, z self.pelvis_starting_orientation_quat = np.array([0, 0, 0, 0], dtype='f') #x,y,z,w self.pelvis_starting_orientation_vec = np.array([0,0,0], dtype='f') #rx,ry,rz self.left_hand_starting_position = np.array([0,0,0], dtype='f') self.left_hand_starting_orientation_quat = np.array([0,0,0,0], dtype='f') self.left_hand_starting_orientation_vec = np.array([0,0,0], dtype='f') #rx,ry,rz self.right_hand_starting_position = np.array([0,0,0], dtype='f') self.right_hand_starting_orientation_quat = np.array([0,0,0,0], dtype='f') self.right_hand_starting_orientation_vec = np.array([0,0,0], dtype='f') #rx,ry,rz self.landing_foot_position = np.array([0,0,0], dtype='f') self.landing_foot_orientation_quat = np.array([0,0,0,0], dtype='f') self.landing_foot_orientation_vec = np.array([0,0,0], dtype='f') #rx,ry,rz self.set_x() self.set_y() if (yaml_file_path != ""): self.load_yaml_file(yaml_file_path) def enable_right_hand_data(self, bool_input): self.use_right_hand_data = bool_input def enable_left_hand_data(self, bool_input): self.use_left_hand_data = bool_input def enable_stance_origin_data(self, bool_input): self.use_stance_origin_data = bool_input def set_x(self): # sets the x vector for the training data # stance_origin, manipulation_type stance_origin_type = np.array([self.stance_origin_num]) manipulation_type = np.array([self.manipulation_type_num]) # swingfoot_xy = self.swing_foot_starting_position[:2] # swingfoot_theta = self.swing_foot_starting_orientation_vec[2:3] # pelvis_theta = self.pelvis_starting_orientation_vec[2:3] # landingfoot_xy = self.landing_foot_position[:2] # landingfoot_theta = self.landing_foot_orientation_vec[2:3] # self.x = np.concatenate( (swingfoot_xy, swingfoot_theta, # self.pelvis_starting_position, pelvis_theta, # landingfoot_xy, landingfoot_theta) ) # # Add right hand data if enabled # if self.use_right_hand_data: # self.x = np.concatenate( (self.x, self.right_hand_starting_position,self.right_hand_starting_orientation_vec) ) # # Add left hand data if enabled # if self.use_left_hand_data: # self.x = np.concatenate( (self.x, self.left_hand_starting_position,self.left_hand_starting_orientation_vec) ) # # Add origin data if enabled # if self.use_stance_origin_data: # self.x = np.concatenate( (self.x, stance_origin_type)) self.x = np.concatenate( (stance_origin_type,manipulation_type, self.swing_foot_starting_position, self.swing_foot_starting_orientation_vec, self.pelvis_starting_position, self.pelvis_starting_orientation_vec, self.landing_foot_position, self.landing_foot_orientation_vec, self.right_hand_starting_position,self.right_hand_starting_orientation_vec, self.left_hand_starting_position,self.left_hand_starting_orientation_vec) ) # self.x = np.concatenate( (stance_origin_type, manipulation_type, # swingfoot_xy, swingfoot_theta, # self.pelvis_starting_position, pelvis_theta, # landingfoot_xy, landingfoot_theta, # self.right_hand_starting_position,self.right_hand_starting_orientation_vec, # self.left_hand_starting_position,self.left_hand_starting_orientation_vec) ) def set_y(self): self.y = self.result def get_x(self): return self.x def get_y(self): return self.y def loadPosition(self, data_loaded, key, pos): pos[0] = data_loaded[key]["x"] pos[1] = data_loaded[key]["y"] pos[2] = data_loaded[key]["z"] def loadOrientation(self, data_loaded, key, ori): ori[0] = data_loaded[key]["x"] ori[1] = data_loaded[key]["y"] ori[2] = data_loaded[key]["z"] ori[3] = data_loaded[key]["w"] def load_yaml_file(self, yaml_file_path, stance_type = None, manipulation_type = None): with open(yaml_file_path, 'r') as stream: data_loaded = yaml.load(stream) self.path = yaml_file_path try: # Only load the requested stance type if ((stance_type != None) and (data_loaded["stance_origin"] != stance_type)): return False # Only load the requested manipulation type if ((manipulation_type != None) and (data_loaded["manipulation_type"] != manipulation_type)): return False except: print " Exception occurred in reading the yaml file", yaml_file_path return False self.loadData(data_loaded) return True def loadData(self, data_loaded): self.stance_origin = data_loaded["stance_origin"] self.manipulation_type = data_loaded["manipulation_type"] self.result = 1 if data_loaded["result"] == "success" else 0 self.stance_origin_num = self.stance_origin_to_num[self.stance_origin] self.manipulation_type_num = self.manipulation_type_to_num[self.manipulation_type] self.loadPosition(data_loaded, "swing_foot_starting_position", self.swing_foot_starting_position) self.loadOrientation(data_loaded, "swing_foot_starting_orientation", self.swing_foot_starting_orientation_quat) self.swing_foot_starting_orientation_vec = convert_quat_to_3vec(self.swing_foot_starting_orientation_quat) self.loadPosition(data_loaded, "pelvis_starting_position", self.pelvis_starting_position) self.loadOrientation(data_loaded, "pelvis_starting_orientation", self.pelvis_starting_orientation_quat) self.pelvis_starting_orientation_vec = convert_quat_to_3vec(self.pelvis_starting_orientation_quat) self.loadPosition(data_loaded, "left_hand_starting_position", self.left_hand_starting_position) self.loadOrientation(data_loaded, "left_hand_starting_orientation", self.left_hand_starting_orientation_quat) self.left_hand_starting_orientation_vec = convert_quat_to_3vec(self.left_hand_starting_orientation_quat) self.loadPosition(data_loaded, "right_hand_starting_position", self.right_hand_starting_position) self.loadOrientation(data_loaded, "right_hand_starting_orientation", self.right_hand_starting_orientation_quat) self.right_hand_starting_orientation_vec = convert_quat_to_3vec(self.right_hand_starting_orientation_quat) self.loadPosition(data_loaded, "landing_foot_position", self.landing_foot_position) self.loadOrientation(data_loaded, "landing_foot_orientation", self.landing_foot_orientation_quat) self.landing_foot_orientation_vec = convert_quat_to_3vec(self.landing_foot_orientation_quat) self.set_x() self.set_y() def printData(self): print "yaml file:", self.path print " " print " stance_origin:", self.stance_origin, ", num representation = ", self.stance_origin_num print " manipulation_type:" , self.manipulation_type, ", num representation = ", self.manipulation_type_num print " result:" , self.result print " " print " swing_foot_starting_position:" , self.swing_foot_starting_position print " swing_foot_starting_orientation_quat:" , self.swing_foot_starting_orientation_quat print " swing_foot_starting_orientation_vec:" , self.swing_foot_starting_orientation_vec print " " print " pelvis_starting_position:" , self.pelvis_starting_position print " pelvis_starting_orientation_quat:" , self.pelvis_starting_orientation_quat print " pelvis_starting_orientation_vec:" , self.pelvis_starting_orientation_vec print " " print " left_hand_starting_position:" , self.left_hand_starting_position print " left_hand_starting_orientation_quat:" , self.left_hand_starting_orientation_quat print " left_hand_starting_orientation_vec:" , self.left_hand_starting_orientation_vec print " " print " right_hand_starting_position:" , self.right_hand_starting_position print " right_hand_starting_orientation_quat:" , self.right_hand_starting_orientation_quat print " right_hand_starting_orientation_vec:" , self.right_hand_starting_orientation_vec print " " print " landing_foot_position:" , self.landing_foot_position print " landing_foot_orientation_quat:" , self.landing_foot_orientation_quat print " landing_foot_orientation_vec:" , self.landing_foot_orientation_vec def test_load_file(filepath): training_data = ContactTransitionData(filepath) training_data.printData() print "x = ", training_data.get_x() print "y = ", training_data.get_y() # training_data = ContactTransitionData() #training_data.load_yaml_file(filepath) if __name__ == "__main__": # print (sys.argv) for item in sys.argv: if '.yaml' in item: filepath = item print "loading yaml file:", item test_load_file(filepath) break
38.824219
116
0.761445
ace8046a0bb8ca94e16c84ce1240581698447637
2,539
py
Python
tensorflow/python/layers/utils_test.py
garston2/tensorflow
bbe056e5a0ab81b67fcb6053400812b3d5805fc7
[ "Apache-2.0" ]
22
2017-06-26T01:27:45.000Z
2021-06-23T10:00:31.000Z
tensorflow/python/layers/utils_test.py
asadziach/Honda-Pre-Collision-Assist
18adfb637a4672e450748b2a408b551d4f3b8500
[ "Apache-2.0" ]
7
2017-07-13T09:40:59.000Z
2019-04-08T22:46:51.000Z
tensorflow/python/layers/utils_test.py
asadziach/Honda-Pre-Collision-Assist
18adfb637a4672e450748b2a408b551d4f3b8500
[ "Apache-2.0" ]
11
2017-08-17T05:52:35.000Z
2021-06-19T04:39:45.000Z
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.layers.core.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.layers import utils from tensorflow.python.platform import test class ConvUtilsTest(test.TestCase): def testConvertDataFormat(self): self.assertEqual(utils.convert_data_format('channels_first', 5), 'NCDHW') self.assertEqual(utils.convert_data_format('channels_first', 4), 'NCHW') self.assertEqual(utils.convert_data_format('channels_first', 3), 'NCW') self.assertEqual(utils.convert_data_format('channels_last', 4), 'NHWC') self.assertEqual(utils.convert_data_format('channels_last', 3), 'NWC') self.assertEqual(utils.convert_data_format('channels_last', 5), 'NDHWC') with self.assertRaises(ValueError): utils.convert_data_format('invalid', 2) def testNormalizeTuple(self): self.assertEqual(utils.normalize_tuple(2, n=3, name='strides'), (2, 2, 2)) self.assertEqual( utils.normalize_tuple((2, 1, 2), n=3, name='strides'), (2, 1, 2)) with self.assertRaises(ValueError): utils.normalize_tuple((2, 1), n=3, name='strides') with self.assertRaises(ValueError): utils.normalize_tuple(None, n=3, name='strides') def testNormalizeDataFormat(self): self.assertEqual( utils.normalize_data_format('Channels_Last'), 'channels_last') self.assertEqual( utils.normalize_data_format('CHANNELS_FIRST'), 'channels_first') with self.assertRaises(ValueError): utils.normalize_data_format('invalid') def testNormalizePadding(self): self.assertEqual(utils.normalize_padding('SAME'), 'same') self.assertEqual(utils.normalize_padding('VALID'), 'valid') with self.assertRaises(ValueError): utils.normalize_padding('invalid') if __name__ == '__main__': test.main()
37.338235
80
0.716424
ace805dd234d2639cc7cf3f94a6d8aa71c8ed843
9,957
py
Python
docx/oxml/__init__.py
Volker-ES/python-docx
9bea8c9cc4bdb17eff3230a9a98c115286e49896
[ "MIT" ]
null
null
null
docx/oxml/__init__.py
Volker-ES/python-docx
9bea8c9cc4bdb17eff3230a9a98c115286e49896
[ "MIT" ]
null
null
null
docx/oxml/__init__.py
Volker-ES/python-docx
9bea8c9cc4bdb17eff3230a9a98c115286e49896
[ "MIT" ]
null
null
null
# encoding: utf-8 """ Initializes oxml sub-package, including registering custom element classes corresponding to Open XML elements. """ from __future__ import absolute_import from lxml import etree from .ns import NamespacePrefixedTag, nsmap # configure XML parser element_class_lookup = etree.ElementNamespaceClassLookup() oxml_parser = etree.XMLParser(remove_blank_text=True, resolve_entities=False) oxml_parser.set_element_class_lookup(element_class_lookup) def parse_xml(xml): """ Return root lxml element obtained by parsing XML character string in *xml*, which can be either a Python 2.x string or unicode. The custom parser is used, so custom element classes are produced for elements in *xml* that have them. """ root_element = etree.fromstring(xml, oxml_parser) return root_element def register_element_cls(tag, cls): """ Register *cls* to be constructed when the oxml parser encounters an element with matching *tag*. *tag* is a string of the form ``nspfx:tagroot``, e.g. ``'w:document'``. """ nspfx, tagroot = tag.split(':') namespace = element_class_lookup.get_namespace(nsmap[nspfx]) namespace[tagroot] = cls def OxmlElement(nsptag_str, attrs=None, nsdecls=None): """ Return a 'loose' lxml element having the tag specified by *nsptag_str*. *nsptag_str* must contain the standard namespace prefix, e.g. 'a:tbl'. The resulting element is an instance of the custom element class for this tag name if one is defined. A dictionary of attribute values may be provided as *attrs*; they are set if present. All namespaces defined in the dict *nsdecls* are declared in the element using the key as the prefix and the value as the namespace name. If *nsdecls* is not provided, a single namespace declaration is added based on the prefix on *nsptag_str*. """ nsptag = NamespacePrefixedTag(nsptag_str) if nsdecls is None: nsdecls = nsptag.nsmap return oxml_parser.makeelement( nsptag.clark_name, attrib=attrs, nsmap=nsdecls ) # =========================================================================== # custom element class mappings # =========================================================================== from .shared import CT_DecimalNumber, CT_OnOff, CT_String # noqa register_element_cls("w:evenAndOddHeaders", CT_OnOff) register_element_cls("w:titlePg", CT_OnOff) from docx.oxml.bookmark import CT_Bookmark, CT_MarkupRange # noqa register_element_cls("w:bookmarkEnd", CT_MarkupRange) register_element_cls("w:bookmarkStart", CT_Bookmark) from .coreprops import CT_CoreProperties # noqa register_element_cls('cp:coreProperties', CT_CoreProperties) from .document import CT_Body, CT_Document # noqa register_element_cls('w:body', CT_Body) register_element_cls('w:document', CT_Document) from docx.oxml.endnotes import CT_Endnotes # noqa register_element_cls('w:endnotes', CT_Endnotes) from docx.oxml.footnotes import CT_Footnotes # noqa register_element_cls('w:footnotes', CT_Footnotes) from .numbering import CT_Num, CT_Numbering, CT_NumLvl, CT_NumPr # noqa register_element_cls('w:abstractNumId', CT_DecimalNumber) register_element_cls('w:ilvl', CT_DecimalNumber) register_element_cls('w:lvlOverride', CT_NumLvl) register_element_cls('w:num', CT_Num) register_element_cls('w:numId', CT_DecimalNumber) register_element_cls('w:numPr', CT_NumPr) register_element_cls('w:numbering', CT_Numbering) register_element_cls('w:startOverride', CT_DecimalNumber) from docx.oxml.section import CT_PageMar # noqa from docx.oxml.section import CT_HdrFtr, CT_HdrFtrRef, CT_PageSz, CT_SectPr, CT_SectType from .section import ( # noqa CT_HdrFtr, CT_HdrFtrRef, CT_PageMar, CT_PageSz, CT_SectPr, CT_SectType, ) register_element_cls("w:footerReference", CT_HdrFtrRef) register_element_cls("w:ftr", CT_HdrFtr) register_element_cls("w:hdr", CT_HdrFtr) register_element_cls("w:headerReference", CT_HdrFtrRef) register_element_cls("w:pgMar", CT_PageMar) register_element_cls("w:pgSz", CT_PageSz) register_element_cls("w:sectPr", CT_SectPr) register_element_cls("w:type", CT_SectType) from .settings import CT_Settings # noqa register_element_cls("w:settings", CT_Settings) from docx.oxml.shape import ( # noqa CT_Blip, CT_BlipFillProperties, CT_GraphicalObject, CT_GraphicalObjectData, CT_Inline, CT_NonVisualDrawingProps, CT_Picture, CT_PictureNonVisual, CT_Point2D, CT_PositiveSize2D, CT_ShapeProperties, CT_Transform2D, ) register_element_cls('a:blip', CT_Blip) register_element_cls('a:ext', CT_PositiveSize2D) register_element_cls('a:graphic', CT_GraphicalObject) register_element_cls('a:graphicData', CT_GraphicalObjectData) register_element_cls('a:off', CT_Point2D) register_element_cls('a:xfrm', CT_Transform2D) register_element_cls('pic:blipFill', CT_BlipFillProperties) register_element_cls('pic:cNvPr', CT_NonVisualDrawingProps) register_element_cls('pic:nvPicPr', CT_PictureNonVisual) register_element_cls('pic:pic', CT_Picture) register_element_cls('pic:spPr', CT_ShapeProperties) register_element_cls('wp:docPr', CT_NonVisualDrawingProps) register_element_cls('wp:extent', CT_PositiveSize2D) register_element_cls('wp:inline', CT_Inline) from .styles import CT_LatentStyles, CT_LsdException, CT_Style, CT_Styles # noqa register_element_cls('w:basedOn', CT_String) register_element_cls('w:latentStyles', CT_LatentStyles) register_element_cls('w:locked', CT_OnOff) register_element_cls('w:lsdException', CT_LsdException) register_element_cls('w:name', CT_String) register_element_cls('w:next', CT_String) register_element_cls('w:qFormat', CT_OnOff) register_element_cls('w:semiHidden', CT_OnOff) register_element_cls('w:style', CT_Style) register_element_cls('w:styles', CT_Styles) register_element_cls('w:uiPriority', CT_DecimalNumber) register_element_cls('w:unhideWhenUsed', CT_OnOff) from .table import ( # noqa CT_Height, CT_Row, CT_Tbl, CT_TblGrid, CT_TblGridCol, CT_TblLayoutType, CT_TblPr, CT_TblWidth, CT_Tc, CT_TcPr, CT_TrPr, CT_VMerge, CT_VerticalJc, CT_Shd, CT_TcBorders, CT_Border, ) register_element_cls('w:bidiVisual', CT_OnOff) register_element_cls('w:gridCol', CT_TblGridCol) register_element_cls('w:gridSpan', CT_DecimalNumber) register_element_cls('w:tbl', CT_Tbl) register_element_cls('w:tblGrid', CT_TblGrid) register_element_cls('w:tblLayout', CT_TblLayoutType) register_element_cls('w:tblPr', CT_TblPr) register_element_cls('w:tblStyle', CT_String) register_element_cls('w:tc', CT_Tc) register_element_cls('w:tcPr', CT_TcPr) register_element_cls('w:tcW', CT_TblWidth) register_element_cls('w:tr', CT_Row) register_element_cls('w:trHeight', CT_Height) register_element_cls('w:trPr', CT_TrPr) register_element_cls('w:vAlign', CT_VerticalJc) register_element_cls('w:vMerge', CT_VMerge) register_element_cls('w:shd', CT_Shd) register_element_cls('w:tcBorders', CT_TcBorders) register_element_cls('w:top', CT_Border) register_element_cls('w:left', CT_Border) register_element_cls('w:bottom', CT_Border) register_element_cls('w:right', CT_Border) from .text.font import ( # noqa CT_Color, CT_Fonts, CT_Highlight, CT_HpsMeasure, CT_RPr, CT_Underline, CT_VerticalAlignRun, ) register_element_cls('w:b', CT_OnOff) register_element_cls('w:bCs', CT_OnOff) register_element_cls('w:caps', CT_OnOff) register_element_cls('w:color', CT_Color) register_element_cls('w:cs', CT_OnOff) register_element_cls('w:dstrike', CT_OnOff) register_element_cls('w:emboss', CT_OnOff) register_element_cls('w:highlight', CT_Highlight) register_element_cls('w:i', CT_OnOff) register_element_cls('w:iCs', CT_OnOff) register_element_cls('w:imprint', CT_OnOff) register_element_cls('w:noProof', CT_OnOff) register_element_cls('w:oMath', CT_OnOff) register_element_cls('w:outline', CT_OnOff) register_element_cls('w:rFonts', CT_Fonts) register_element_cls('w:rPr', CT_RPr) register_element_cls('w:rStyle', CT_String) register_element_cls('w:rtl', CT_OnOff) register_element_cls('w:shadow', CT_OnOff) register_element_cls('w:smallCaps', CT_OnOff) register_element_cls('w:snapToGrid', CT_OnOff) register_element_cls('w:specVanish', CT_OnOff) register_element_cls('w:strike', CT_OnOff) register_element_cls('w:sz', CT_HpsMeasure) register_element_cls('w:u', CT_Underline) register_element_cls('w:vanish', CT_OnOff) register_element_cls('w:vertAlign', CT_VerticalAlignRun) register_element_cls('w:webHidden', CT_OnOff) from .text.paragraph import CT_P # noqa register_element_cls('w:p', CT_P) from .text.parfmt import ( # noqa CT_Ind, CT_Jc, CT_PPr, CT_Spacing, CT_TabStop, CT_TabStops, ) register_element_cls('w:ind', CT_Ind) register_element_cls('w:jc', CT_Jc) register_element_cls('w:keepLines', CT_OnOff) register_element_cls('w:keepNext', CT_OnOff) register_element_cls('w:pageBreakBefore', CT_OnOff) register_element_cls('w:pPr', CT_PPr) register_element_cls('w:pStyle', CT_String) register_element_cls('w:spacing', CT_Spacing) register_element_cls('w:tab', CT_TabStop) register_element_cls('w:tabs', CT_TabStops) register_element_cls('w:widowControl', CT_OnOff) from .text.run import CT_Br, CT_R, CT_Text # noqa register_element_cls('w:br', CT_Br) register_element_cls('w:r', CT_R) register_element_cls('w:t', CT_Text) from docx.oxml.field import CT_SimpleField register_element_cls("w:fldSimple", CT_SimpleField)
36.339416
88
0.732349
ace80635216f618e81b63d58de9a589d8d6c21f7
4,090
py
Python
env/lib/python3.7/site-packages/colorhash.py
Ianis-Bordrez/Streamify
60194f56533ba8434de14496aa91b0e148c6ad8d
[ "MIT" ]
null
null
null
env/lib/python3.7/site-packages/colorhash.py
Ianis-Bordrez/Streamify
60194f56533ba8434de14496aa91b0e148c6ad8d
[ "MIT" ]
null
null
null
env/lib/python3.7/site-packages/colorhash.py
Ianis-Bordrez/Streamify
60194f56533ba8434de14496aa91b0e148c6ad8d
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright (c) 2016 Felix Krull <f_krull@gmx.de> # Released under the terms of the MIT license; see README.rst. """ Generate a color based on an object's hash value. Quick start: >>> from colorhash import ColorHash >>> c = ColorHash('Hello World') >>> c.hsl (131, 0.65, 0.5) >>> c.rgb (45, 210, 75) >>> c.hex '#2dd24b' """ from __future__ import division from binascii import crc32 from numbers import Number import sys PY2 = sys.version_info[0] <= 2 def crc32_hash(obj): """Generate a hash for ``obj``. This function first converts the object to a string and encodes it into UTF-8, then calculates and returns the CRC-32 checksum of the result. The hash is guaranteed to be as stable as the result of the object's ``__str__`` method. """ if PY2: bs = str(obj) else: bs = str(obj).encode('utf-8') return crc32(bs) & 0xffffffff def hsl2rgb(hsl): """Convert an HSL color value into RGB. >>> hsl2rgb((0, 1, 0.5)) (255, 0, 0) """ try: h, s, l = hsl except TypeError: raise ValueError(hsl) try: h /= 360 q = l * (1 + s) if l < 0.5 else l + s - l * s p = 2 * l - q except TypeError: raise ValueError(hsl) rgb = [] for c in (h + 1 / 3, h, h - 1 / 3): if c < 0: c += 1 elif c > 1: c -= 1 if c < 1 / 6: c = p + (q - p) * 6 * c elif c < 0.5: c = q elif c < 2 / 3: c = p + (q - p) * 6 * (2 / 3 - c) else: c = p rgb.append(round(c * 255)) return tuple(rgb) def rgb2hex(rgb): """Format an RGB color value into a hexadecimal color string. >>> rgb2hex((255, 0, 0)) '#ff0000' """ try: return '#%02x%02x%02x' % rgb except TypeError: raise ValueError(rgb) def color_hash(obj, hashfunc=crc32_hash, lightness=(0.35, 0.5, 0.65), saturation=(0.35, 0.5, 0.65), min_h=None, max_h=None): """Calculate the color for the given object. Args: obj: the value. hashfunc: the hash function to use. Must be a unary function returning an integer. Defaults to ``crc32_hash``. lightness: a range of values, one of which will be picked for the lightness component of the result. Can also be a single number. saturation: a range of values, one of which will be picked for the saturation component of the result. Can also be a single number. min_h: if set, limit the hue component to this lower value. max_h: if set, limit the hue component to this upper value. Returns: A ``(H, S, L)`` tuple. """ if isinstance(lightness, Number): lightness = [lightness] if isinstance(saturation, Number): saturation = [saturation] if min_h is None and max_h is not None: min_h = 0 if min_h is not None and max_h is None: max_h = 360 hash = hashfunc(obj) h = (hash % 359) if min_h is not None and max_h is not None: h = (h / 1000) * (max_h - min_h) + min_h hash //= 360 s = saturation[hash % len(saturation)] hash //= len(saturation) l = lightness[hash % len(lightness)] return (h, s, l) class ColorHash: """Generate a color value and provide it in several format. This class takes the same arguments as the ``color_hash`` function. Attributes: hsl: HSL representation of the color value. rgb: RGB representation of the color value. hex: hex-formatted RGB color value. """ def __init__(self, *args, **kwargs): self.hsl = color_hash(*args, **kwargs) @property def rgb(self): return hsl2rgb(self.hsl) @property def hex(self): return rgb2hex(self.rgb)
26.050955
81
0.54132
ace8071e489a634d0935fcac2b1490b7d8d21c01
1,696
py
Python
tests/test_selection.py
rawg/levis
33cd6c915f51134f79f3586dc0e4a6072247b568
[ "MIT" ]
42
2016-06-29T21:13:02.000Z
2022-01-23T03:23:59.000Z
tests/test_selection.py
rawg/levis
33cd6c915f51134f79f3586dc0e4a6072247b568
[ "MIT" ]
null
null
null
tests/test_selection.py
rawg/levis
33cd6c915f51134f79f3586dc0e4a6072247b568
[ "MIT" ]
12
2016-07-18T20:46:55.000Z
2021-06-13T16:08:37.000Z
"""Unit tests for the base GeneticAlgorithm class. """ from __future__ import absolute_import import unittest from . import context from . import harness from levis.selection import * class ProportionateGAUT(ProportionateGA, harness.DummyBinaryGA): pass class ScalingProportionateGAUT(ScalingProportionateGA, harness.DummyBinaryGA): pass class TournamentGAUT(TournamentGA, harness.DummyBinaryGA): pass class ProportionateGATestCase(harness.BaseGATestCase): def gacls(self): return ProportionateGAUT class TournamentGATestCase(harness.BaseGATestCase): def gacls(self): return TournamentGAUT class ScalingProportionateGATestCase(harness.BaseGATestCase): def gacls(self): return ScalingProportionateGAUT class ElitistGAUT(ElitistGA, harness.DummyBinaryGA): pass class ElitistGATestCase(harness.BaseGATestCase): def gacls(self): return ElitistGAUT def test_init(self): ga = self.mkga({"population_size": 10, "elitism_pct": .5}) self.assertEqual(ga.num_elites, 5) def test_insert(self): ga = self.mkga({"population_size": 10, "elitism_pct": .4}) ga.fitness(50) self.assertEqual(ga.elites[0][1], 50) ga.fitness(100) self.assertEqual(ga.elites[0][1], 100) self.assertEqual(ga.elites[1][1], 50) ga.fitness(75) self.assertEqual(ga.elites[0][1], 100) self.assertEqual(ga.elites[1][1], 75) self.assertEqual(ga.elites[2][1], 50) ga.fitness(60) ga.fitness(80) self.assertEqual(len(ga.elites), 4) self.assertEqual(ga.elites[3][1], 60) if __name__ == '__main__': unittest.main()
22.315789
78
0.685731
ace80765cc9dff966cd5fcfb055e5a9d1d78a991
317
py
Python
main2.py
rampa3/MetaGenBrain
37f29ef629489a792e11ffe0f7a0d78b060b70a0
[ "MIT" ]
null
null
null
main2.py
rampa3/MetaGenBrain
37f29ef629489a792e11ffe0f7a0d78b060b70a0
[ "MIT" ]
null
null
null
main2.py
rampa3/MetaGenBrain
37f29ef629489a792e11ffe0f7a0d78b060b70a0
[ "MIT" ]
null
null
null
import capture2 import csv print("HI") with open('settings.csv') as csv_file: print("HI") csv_reader = csv.reader(csv_file, delimiter=',') next(csv_reader) lang_processing_settings = next(csv_reader) capture2.main(str(lang_processing_settings[3]), str(lang_processing_settings[4]))
24.384615
86
0.700315
ace8079fca68a74476f7db07d6257b4f9e85f7f5
145
py
Python
ARC_A/ARC099_A.py
ryosuke0825/atcoder_python
185cdbe7db44ecca1aaf357858d16d31ce515ddb
[ "MIT" ]
null
null
null
ARC_A/ARC099_A.py
ryosuke0825/atcoder_python
185cdbe7db44ecca1aaf357858d16d31ce515ddb
[ "MIT" ]
null
null
null
ARC_A/ARC099_A.py
ryosuke0825/atcoder_python
185cdbe7db44ecca1aaf357858d16d31ce515ddb
[ "MIT" ]
null
null
null
import math n, k = map(int, input().split()) A = list(map(int, input().split())) mi = min(A) cnt = A.count(mi) print(math.ceil((n-cnt)/(k-1)))
16.111111
35
0.586207
ace807a38009bebe2f4ae6441d5dda7fba7890a9
2,291
py
Python
pgzabbix/cmd.py
bilbolodz/PgZabbix
a7f184c522580d46e952cd83d7e5944f04f96585
[ "Apache-2.0" ]
3
2017-12-01T02:58:22.000Z
2020-07-10T07:12:29.000Z
pgzabbix/cmd.py
bilbolodz/PgZabbix
a7f184c522580d46e952cd83d7e5944f04f96585
[ "Apache-2.0" ]
8
2019-03-14T13:11:28.000Z
2019-06-07T09:07:30.000Z
pgzabbix/cmd.py
bilbolodz/PgZabbix
a7f184c522580d46e952cd83d7e5944f04f96585
[ "Apache-2.0" ]
2
2019-04-03T10:40:07.000Z
2019-05-31T12:33:26.000Z
# vim: set nobomb: import argparse import psycopg2 import pgzabbix try: import ConfigParser as configparser except ImportError: import configparser def parseConfig(inifile): config = configparser.SafeConfigParser() config.readfp(inifile) if not config.sections(): print("No sections in %s. Exiting" % inifile) exit(1) opt = {} for item in ('host', 'password', 'dbname', 'user'): opt[item] = config.get("postgres", item) return opt def get_connection(config): conn_string = "host={host} dbname={dbname} user={user} password={password}" conn = psycopg2.connect(conn_string.format(**config)) conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) return conn def commandline(): parser = argparse.ArgumentParser( prog="PgZabbix", description="Fiddle with Postgres for Zabbix" ) parser.add_argument('--config', nargs='?', type=argparse.FileType('r'), default='/etc/pgzabbix.ini' ) group = parser.add_mutually_exclusive_group(required=False) group.add_argument('--read', action='store_true', default=False) group.add_argument('--tables', action='store_true', default=False) group.add_argument('--discover', action='store_true', default=False) group.add_argument('--discover_tables', action='store_true', default=False) group.add_argument('--discover_db', action='store_true', default=False) group.add_argument('--discover_sr', action='store_true', default=False) args = parser.parse_args() return args def main(): args = commandline() config = parseConfig(args.config) conn = get_connection(config) cur = conn.cursor() if args.read: pgzabbix.all_generic(cur) pgzabbix.all_perdb(cur) pgzabbix.all_sr(cur) if args.tables: pgzabbix.tables_stat(config) if args.discover_db: pgzabbix.discover_db(cur) if args.discover_sr: pgzabbix.discover_sr(cur) if args.discover_tables: pgzabbix.discover_tables(config) if args.discover: pgzabbix.discover_all(config, cur) cur.close() conn.close() if __name__ == "__main__": main()
27.27381
79
0.652117
ace8088d80b3dbaad490bb72a965a805b776f602
5,316
py
Python
tests/contrib/io/azure/test_csv_blob.py
andromeida-maritime-solutions-pvt-ltd/kedro
005cec83ca52a8cdadc9cb428ec1c4cd86682da3
[ "Apache-2.0" ]
3
2019-06-06T15:36:10.000Z
2019-06-09T22:27:55.000Z
tests/contrib/io/azure/test_csv_blob.py
andromeida-maritime-solutions-pvt-ltd/kedro
005cec83ca52a8cdadc9cb428ec1c4cd86682da3
[ "Apache-2.0" ]
null
null
null
tests/contrib/io/azure/test_csv_blob.py
andromeida-maritime-solutions-pvt-ltd/kedro
005cec83ca52a8cdadc9cb428ec1c4cd86682da3
[ "Apache-2.0" ]
null
null
null
# Copyright 2018-2019 QuantumBlack Visual Analytics Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND # NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS # BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # The QuantumBlack Visual Analytics Limited (“QuantumBlack”) name and logo # (either separately or in combination, “QuantumBlack Trademarks”) are # trademarks of QuantumBlack. The License does not grant you any right or # license to the QuantumBlack Trademarks. You may not use the QuantumBlack # Trademarks or any confusingly similar mark as a trademark for your product, # or use the QuantumBlack Trademarks in any other manner that might cause # confusion in the marketplace, including but not limited to in advertising, # on websites, or on software. # # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=unused-argument from unittest.mock import patch import pandas as pd import pytest from kedro.contrib.io.azure import CSVBlobDataSet from kedro.io import DataSetError TEST_FILE_NAME = "test.csv" TEST_CONTAINER_NAME = "test_bucket" TEST_CREDENTIALS = {"account_name": "ACCOUNT_NAME", "account_key": "ACCOUNT_KEY"} @pytest.fixture() def dummy_dataframe(): return pd.DataFrame({"col1": [1, 2], "col2": [4, 5], "col3": [5, 6]}) @pytest.fixture def blob_csv_data_set(): def make_data_set(load_args=None, save_args=None): return CSVBlobDataSet( filepath=TEST_FILE_NAME, container_name=TEST_CONTAINER_NAME, blob_to_text_args={"to_extra": 42}, blob_from_text_args={"from_extra": 42}, credentials=TEST_CREDENTIALS, load_args=load_args, save_args=save_args, ) return make_data_set @patch("kedro.contrib.io.azure.csv_blob.BlockBlobService") def test_pass_credentials_load(blob_service, blob_csv_data_set): try: blob_csv_data_set().load() except DataSetError: # We don't care about it for this test, we just want to know that # azure service was called with the right parameters pass blob_service.assert_called_with( account_name="ACCOUNT_NAME", account_key="ACCOUNT_KEY" ) @patch("kedro.contrib.io.azure.csv_blob.BlockBlobService") def test_pass_credentials_save(blob_service, blob_csv_data_set, dummy_dataframe): blob_csv_data_set().save(dummy_dataframe) blob_service.assert_called_with( account_name="ACCOUNT_NAME", account_key="ACCOUNT_KEY" ) @patch("kedro.contrib.io.azure.csv_blob.BlockBlobService.get_blob_to_text") def test_load_blob_args(get_blob_mock, blob_csv_data_set): try: blob_csv_data_set().load() except DataSetError: pass get_blob_mock.assert_called_with( container_name=TEST_CONTAINER_NAME, blob_name=TEST_FILE_NAME, to_extra=42 ) class BlobMock: def __init__(self): self._content = "name,age\ntom,3\nbob,4" @property def content(self): return self._content def mock_load_func(): def mocked(*args, **kwargs): return BlobMock() return mocked @patch( "kedro.contrib.io.azure.csv_blob.BlockBlobService.get_blob_to_text", new_callable=mock_load_func, ) def test_load(get_blob_mock, blob_csv_data_set): result = blob_csv_data_set().load()[["name", "age"]] expected = pd.DataFrame({"name": ["tom", "bob"], "age": [3, 4]}) expected = expected[["name", "age"]] assert result.equals(expected) @patch("kedro.contrib.io.azure.csv_blob.BlockBlobService.create_blob_from_text") def test_save_blob_args(blob_from_mock, blob_csv_data_set, dummy_dataframe): blob_csv_data_set().save(dummy_dataframe) blob_from_mock.assert_called_with( container_name=TEST_CONTAINER_NAME, blob_name=TEST_FILE_NAME, text=dummy_dataframe.to_csv(index=False), from_extra=42, ) # pylint: disable=protected-access def test_load_extra_params(blob_csv_data_set): expected_keys = ["option"] data_set = blob_csv_data_set(load_args={"option": "value"}) for expected_key in expected_keys: assert expected_key in data_set._load_args.keys() # pylint: disable=protected-access def test_save_with_extra_params(blob_csv_data_set): expected_keys = ["index", "option"] data_set = blob_csv_data_set(save_args={"option": "value"}) for expected_key in expected_keys: assert expected_key in data_set._save_args.keys() def test_str_representation(blob_csv_data_set): data_set = blob_csv_data_set(save_args={"option": "value"}) assert "CSVBlobDataSet" in str(data_set) assert TEST_CREDENTIALS["account_name"] not in str(data_set) assert TEST_CREDENTIALS["account_key"] not in str(data_set)
33.859873
81
0.734951
ace80914e2eb41f037c9e6317d82f45dce7639d4
828
py
Python
simulations/validation/param_sweep_run.py
oscardavidtorres1994/cadCAD
229e2dac585eb6c1644cf277e3a7807883f10d13
[ "MIT" ]
3
2020-05-26T12:17:48.000Z
2020-06-25T12:03:37.000Z
simulations/validation/param_sweep_run.py
oscardavidtorres1994/cadCAD
229e2dac585eb6c1644cf277e3a7807883f10d13
[ "MIT" ]
4
2020-05-26T21:03:44.000Z
2020-06-30T12:13:15.000Z
simulations/validation/param_sweep_run.py
oscardavidtorres1994/cadCAD
229e2dac585eb6c1644cf277e3a7807883f10d13
[ "MIT" ]
1
2019-12-11T18:28:18.000Z
2019-12-11T18:28:18.000Z
import pandas as pd from tabulate import tabulate # The following imports NEED to be in the exact order from cadCAD.engine import ExecutionMode, ExecutionContext, Executor from simulations.validation import sweep_config from cadCAD import configs exec_mode = ExecutionMode() print("Simulation Execution: Concurrent Execution") multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc) run = Executor(exec_context=multi_proc_ctx, configs=configs) i = 0 config_names = ['sweep_config_A', 'sweep_config_B'] for raw_result, tensor_field in run.execute(): result = pd.DataFrame(raw_result) print() print("Tensor Field: " + config_names[i]) print(tabulate(tensor_field, headers='keys', tablefmt='psql')) print("Output:") print(tabulate(result, headers='keys', tablefmt='psql')) print() i += 1
33.12
67
0.759662
ace8091b187d81062f1b4d2e91ef481e2e9b2a80
212
py
Python
login/admin.py
yuxiaoYX/xiaoshuo
5652703521aa99774e8e0667c5e6b9f24a6d90ac
[ "MIT" ]
null
null
null
login/admin.py
yuxiaoYX/xiaoshuo
5652703521aa99774e8e0667c5e6b9f24a6d90ac
[ "MIT" ]
null
null
null
login/admin.py
yuxiaoYX/xiaoshuo
5652703521aa99774e8e0667c5e6b9f24a6d90ac
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import User # Register your models here. class userAdmin(admin.ModelAdmin): fields = ['username', 'password', 'bookrack'] admin.site.register(User, userAdmin)
21.2
49
0.75
ace80a973e13c004a2831d72d4abed0847db156b
464
py
Python
talesDB/failSend.py
Gabe397/Tales
0487843a85e8e70273978638b58e1f3f07d90e70
[ "MIT" ]
null
null
null
talesDB/failSend.py
Gabe397/Tales
0487843a85e8e70273978638b58e1f3f07d90e70
[ "MIT" ]
null
null
null
talesDB/failSend.py
Gabe397/Tales
0487843a85e8e70273978638b58e1f3f07d90e70
[ "MIT" ]
null
null
null
import pika import sqlCommands credentials = pika.PlainCredentials('gabe','gabe') parameters = pika.ConnectionParameters('192.168.1.160',5672,'/',credentials) connection = pika.BlockingConnection(parameters) channel = connection.channel() channel.queue_declare(queue='SQL') channel.basic_publish(exchange='', routing_key='SQL', body='Authentication Failed') print("Authentication Failed") connection.close()
18.56
76
0.700431
ace80b8e6bbfd61c42953d7265f7b94abcbd5eed
21,568
py
Python
pdm/project/core.py
linw1995/pdm
f2f67f17efd9cd8593ce06a4933cc2303890dcec
[ "MIT" ]
null
null
null
pdm/project/core.py
linw1995/pdm
f2f67f17efd9cd8593ce06a4933cc2303890dcec
[ "MIT" ]
null
null
null
pdm/project/core.py
linw1995/pdm
f2f67f17efd9cd8593ce06a4933cc2303890dcec
[ "MIT" ]
null
null
null
from __future__ import annotations import hashlib import json import os import re import shutil import sys from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Type, Union import tomlkit from pythonfinder import Finder from pythonfinder.environment import PYENV_INSTALLED, PYENV_ROOT from pythonfinder.models.python import PythonVersion from tomlkit.items import Comment, Whitespace from pdm import termui from pdm._types import Source from pdm.exceptions import NoPythonVersion, PdmUsageError, ProjectError from pdm.models import pip_shims from pdm.models.caches import CandidateInfoCache, HashCache from pdm.models.candidates import Candidate from pdm.models.environment import Environment, GlobalEnvironment from pdm.models.in_process import get_python_version from pdm.models.repositories import BaseRepository, PyPIRepository from pdm.models.requirements import Requirement, parse_requirement from pdm.models.specifiers import PySpecSet from pdm.project.config import Config from pdm.project.metadata import MutableMetadata as Metadata from pdm.utils import ( atomic_open_for_write, cached_property, cd, find_project_root, find_python_in_path, get_in_project_venv_python, is_venv_python, setdefault, ) if TYPE_CHECKING: from resolvelib.reporters import BaseReporter from pdm._vendor import halo from pdm.core import Core from pdm.resolver.providers import BaseProvider class Project: """Core project class""" PYPROJECT_FILENAME = "pyproject.toml" DEPENDENCIES_RE = re.compile(r"(?:(.+?)-)?dependencies") PYPROJECT_VERSION = "2" GLOBAL_PROJECT = Path.home() / ".pdm" / "global-project" core: Core @classmethod def create_global(cls, root_path: Optional[str] = None) -> "Project": if root_path is None: root_path = cls.GLOBAL_PROJECT.as_posix() project = cls(root_path) project.is_global = True project.init_global_project() return project def __init__(self, root_path: Optional[str] = None) -> None: self.is_global = False self._pyproject: Optional[Dict] = None self._lockfile: Optional[Dict] = None self._environment: Optional[Environment] = None self._python_executable: Optional[str] = None if root_path is None: root_path = find_project_root() if root_path is None and self.global_config["auto_global"]: self.root = self.GLOBAL_PROJECT self.is_global = True self.init_global_project() else: self.root = Path(root_path or "").absolute() def __repr__(self) -> str: return f"<Project '{self.root.as_posix()}'>" @property def pyproject_file(self) -> Path: return self.root / self.PYPROJECT_FILENAME @property def lockfile_file(self) -> Path: return self.root / "pdm.lock" @property def pyproject(self) -> Optional[dict]: if not self._pyproject and self.pyproject_file.exists(): data = tomlkit.parse(self.pyproject_file.read_text("utf-8")) self._pyproject = data return self._pyproject @pyproject.setter def pyproject(self, data: Dict[str, Any]) -> None: self._pyproject = data @property def tool_settings(self) -> dict: data = self.pyproject if not data: return {} return setdefault(setdefault(data, "tool", {}), "pdm", {}) @property def lockfile(self) -> dict: if not self._lockfile: if not self.lockfile_file.is_file(): raise ProjectError("Lock file does not exist.") data = tomlkit.parse(self.lockfile_file.read_text("utf-8")) self._lockfile = data return self._lockfile @lockfile.setter def lockfile(self, data: Dict[str, Any]) -> None: self._lockfile = data @property def config(self) -> Dict[str, Any]: """A read-only dict configuration, any modifications won't land in the file.""" result = dict(self.global_config) result.update(self.project_config) return result @property def scripts(self) -> Dict[str, Union[str, Dict[str, str]]]: return self.tool_settings.get("scripts") @cached_property def global_config(self) -> Config: """Read-and-writable configuration dict for global settings""" return Config(Path.home() / ".pdm" / "config.toml", is_global=True) @cached_property def project_config(self) -> Config: """Read-and-writable configuration dict for project settings""" return Config(self.root / ".pdm.toml") @property def python_executable(self) -> str: if not self._python_executable: self._python_executable = self.resolve_interpreter() return self._python_executable @python_executable.setter def python_executable(self, value: str) -> None: self._python_executable = value self.project_config["python.path"] = value def resolve_interpreter(self) -> str: """Get the Python interpreter path.""" config = self.config if self.project_config.get("python.path") and not os.getenv( "PDM_IGNORE_SAVED_PYTHON" ): saved_path = self.project_config["python.path"] if not os.path.isfile(saved_path): del self.project_config["python.path"] else: return saved_path if os.name == "nt": suffix = ".exe" scripts = "Scripts" else: suffix = "" scripts = "bin" virtual_env = os.getenv("VIRTUAL_ENV") if config["use_venv"] and virtual_env: return os.path.join(virtual_env, scripts, f"python{suffix}") for py_version in self.find_interpreters(): if self.python_requires.contains(str(py_version.version)): self.python_executable = py_version.executable return self.python_executable raise NoPythonVersion( "No Python that satisfies {} is found on the system.".format( self.python_requires ) ) def get_environment(self) -> Environment: """Get the environment selected by this project""" if self.is_global: env = GlobalEnvironment(self) # Rewrite global project's python requires to be # compatible with the exact version env.python_requires = PySpecSet( "==" + get_python_version(self.python_executable, True)[0] ) return env if self.config["use_venv"] and is_venv_python(self.python_executable): # Only recognize venv created by python -m venv and virtualenv>20 return GlobalEnvironment(self) return Environment(self) @property def environment(self) -> Environment: if not self._environment: self._environment = self.get_environment() return self._environment @property def python_requires(self) -> PySpecSet: return PySpecSet(self.meta.requires_python) def get_dependencies(self, section: Optional[str] = None) -> Dict[str, Requirement]: metadata = self.meta optional_dependencies = metadata.get("optional-dependencies", {}) dev_dependencies = self.tool_settings.get("dev-dependencies", {}) if section in (None, "default"): deps = metadata.get("dependencies", []) else: if section in optional_dependencies and section in dev_dependencies: self.core.ui.echo( f"The {section} section exists in both [optional-dependencies] " "and [dev-dependencies], the former is taken.", err=True, fg="yellow", ) if section in optional_dependencies: deps = optional_dependencies[section] elif section in dev_dependencies: deps = dev_dependencies[section] else: raise PdmUsageError(f"Non-exist section {section}") result = {} with cd(self.root): for line in deps: if line.startswith("-e "): req = parse_requirement(line[3:].strip(), True) else: req = parse_requirement(line) req.from_section = section or "default" # make editable packages behind normal ones to override correctly. result[req.identify()] = req return result @property def dependencies(self) -> Dict[str, Requirement]: return self.get_dependencies() @property def dev_dependencies(self) -> Dict[str, Requirement]: """All development dependencies""" dev_group = self.tool_settings.get("dev-dependencies", {}) if not dev_group: return {} result = {} with cd(self.root): for section, deps in dev_group.items(): for line in deps: if line.startswith("-e "): req = parse_requirement(line[3:].strip(), True) else: req = parse_requirement(line) req.from_section = section result[req.identify()] = req return result def iter_sections(self) -> Iterable[str]: result = {"default"} if self.meta.optional_dependencies: result.update(self.meta.optional_dependencies.keys()) if self.tool_settings.get("dev-dependencies"): result.update(self.tool_settings["dev-dependencies"].keys()) return result @property def all_dependencies(self) -> Dict[str, Dict[str, Requirement]]: return { section: self.get_dependencies(section) for section in self.iter_sections() } @property def allow_prereleases(self) -> Optional[bool]: return self.tool_settings.get("allow_prereleases") @property def sources(self) -> List[Source]: sources = list(self.tool_settings.get("source", [])) if not any(source.get("name") == "pypi" for source in sources): sources.insert( 0, { "url": self.config["pypi.url"], "verify_ssl": self.config["pypi.verify_ssl"], "name": "pypi", }, ) return sources def get_repository( self, cls: Optional[Type[BaseRepository]] = None ) -> BaseRepository: """Get the repository object""" if cls is None: cls = PyPIRepository sources = self.sources or [] return cls(sources, self.environment) def get_provider( self, strategy: str = "all", tracked_names: Optional[Iterable[str]] = None, ) -> BaseProvider: """Build a provider class for resolver. :param strategy: the resolve strategy :param tracked_names: the names of packages that needs to update :returns: The provider object """ from pdm.resolver.providers import ( BaseProvider, EagerUpdateProvider, ReusePinProvider, ) repository = self.get_repository(cls=self.core.repository_class) allow_prereleases = self.allow_prereleases requires_python = self.environment.python_requires if strategy == "all": provider = BaseProvider(repository, requires_python, allow_prereleases) else: provider_class = ( ReusePinProvider if strategy == "reuse" else EagerUpdateProvider ) preferred_pins = self.get_locked_candidates("__all__") provider = provider_class( preferred_pins, tracked_names or (), repository, requires_python, allow_prereleases, ) return provider def get_reporter( self, requirements: List[Requirement], tracked_names: Optional[Iterable[str]] = None, spinner: Optional[halo.Halo] = None, ) -> BaseReporter: """Return the reporter object to construct a resolver. :param requirements: requirements to resolve :param tracked_names: the names of packages that needs to update :param spinner: optional spinner object :returns: a reporter """ from pdm.resolver.reporters import SpinnerReporter return SpinnerReporter(spinner, requirements) def get_lock_metadata(self) -> Dict[str, Any]: content_hash = tomlkit.string("sha256:" + self.get_content_hash("sha256")) content_hash.trivia.trail = "\n\n" data = {"lock_version": self.PYPROJECT_VERSION, "content_hash": content_hash} return data def write_lockfile(self, toml_data: Dict, show_message: bool = True) -> None: toml_data["metadata"].update(self.get_lock_metadata()) with atomic_open_for_write(self.lockfile_file) as fp: fp.write(tomlkit.dumps(toml_data)) if show_message: self.core.ui.echo(f"Changes are written to {termui.green('pdm.lock')}.") self._lockfile = None def make_self_candidate(self, editable: bool = True) -> Candidate: req = parse_requirement(pip_shims.path_to_url(self.root.as_posix()), editable) req.name = self.meta.name return Candidate( req, self.environment, name=self.meta.name, version=self.meta.version ) def get_locked_candidates( self, section: Optional[str] = None ) -> Dict[str, Candidate]: if not self.lockfile_file.is_file(): return {} section = section or "default" result = {} for package in [dict(p) for p in self.lockfile.get("package", [])]: if section != "__all__" and section not in package["sections"]: continue version = package.get("version") if version: package["version"] = f"=={version}" package_name = package.pop("name") req = Requirement.from_req_dict(package_name, dict(package)) can = Candidate(req, self.environment, name=package_name, version=version) can.sections = package.get("sections", []) can.marker = req.marker can.hashes = { item["file"]: item["hash"] for item in self.lockfile["metadata"] .get("files", {}) .get(f"{req.key} {version}", []) } or None result[req.identify()] = can if section in ("default", "__all__") and self.meta.name and self.meta.version: result[self.meta.project_name.lower()] = self.make_self_candidate(True) return result def get_content_hash(self, algo: str = "md5") -> str: # Only calculate sources and dependencies sections. Otherwise lock file is # considered as unchanged. dump_data = { "sources": self.tool_settings.get("source", []), "dependencies": self.meta.get("dependencies", []), "dev-dependencies": self.tool_settings.get("dev-dependencies", {}), "optional-dependencies": self.meta.get("optional-dependencies", {}), "requires-python": self.meta.get("requires-python", ""), } pyproject_content = json.dumps(dump_data, sort_keys=True) hasher = hashlib.new(algo) hasher.update(pyproject_content.encode("utf-8")) return hasher.hexdigest() def is_lockfile_hash_match(self) -> bool: if not self.lockfile_file.exists(): return False hash_in_lockfile = str( self.lockfile.get("metadata", {}).get("content_hash", "") ) if not hash_in_lockfile: return False algo, hash_value = hash_in_lockfile.split(":") content_hash = self.get_content_hash(algo) return content_hash == hash_value def get_pyproject_dependencies(self, section: str, dev: bool = False) -> List[str]: """Get the dependencies array in the pyproject.toml""" if section == "default": return setdefault(self.meta, "dependencies", []) elif dev: return setdefault( setdefault(self.tool_settings, "dev-dependencies", {}), section, [] ) else: return setdefault( setdefault(self.meta, "optional-dependencies", {}), section, [] ) def add_dependencies( self, requirements: Dict[str, Requirement], to_section: str = "default", dev: bool = False, show_message: bool = True, ) -> None: deps = self.get_pyproject_dependencies(to_section, dev) for _, dep in requirements.items(): matched_index = next( (i for i, r in enumerate(deps) if dep.matches(r)), None ) if matched_index is None: deps.append(dep.as_line()) else: req = dep.as_line() deps[matched_index] = req # XXX: This dirty part is for tomlkit.Array.__setitem__() j = 0 for i in range(len(deps._value)): if isinstance(deps._value[i], (Comment, Whitespace)): continue if j == matched_index: deps._value[i] = tomlkit.item(req) break j += 1 deps.multiline(True) self.write_pyproject(show_message) def write_pyproject(self, show_message: bool = True) -> None: with atomic_open_for_write( self.pyproject_file.as_posix(), encoding="utf-8" ) as f: f.write(tomlkit.dumps(self.pyproject)) if show_message: self.core.ui.echo( f"Changes are written to {termui.green('pyproject.toml')}." ) self._pyproject = None @property def meta(self) -> Metadata: if not self.pyproject: self.pyproject = {"project": tomlkit.table()} return Metadata(self.pyproject_file, self.pyproject.get("project", {})) def init_global_project(self) -> None: if not self.is_global: return if not self.pyproject_file.exists(): self.root.mkdir(parents=True, exist_ok=True) self.pyproject_file.write_text( """\ [project] dependencies = ["pip", "setuptools", "wheel"] """ ) self._pyproject = None @property def cache_dir(self) -> Path: return Path(self.config.get("cache_dir")) def cache(self, name: str) -> Path: path = self.cache_dir / name path.mkdir(parents=True, exist_ok=True) return path def make_wheel_cache(self) -> pip_shims.WheelCache: return pip_shims.WheelCache( self.cache_dir.as_posix(), pip_shims.FormatControl(set(), set()) ) def make_candidate_info_cache(self) -> CandidateInfoCache: python_hash = hashlib.sha1( str(self.environment.python_requires).encode() ).hexdigest() file_name = f"package_meta_{python_hash}.json" return CandidateInfoCache(self.cache("metadata") / file_name) def make_hash_cache(self) -> HashCache: return HashCache(directory=self.cache("hashes").as_posix()) def find_interpreters( self, python_spec: Optional[str] = None ) -> Iterable[PythonVersion]: """Return an iterable of interpreter paths that matches the given specifier, which can be: 1. a version specifier like 3.7 2. an absolute path 3. a short name like python3 4. None that returns all possible interpreters """ config = self.config PythonVersion.__hash__ = lambda self: hash(self.executable) if not python_spec: if config.get("python.use_pyenv", True) and PYENV_INSTALLED: yield PythonVersion.from_path( os.path.join(PYENV_ROOT, "shims", "python") ) if config.get("use_venv"): python = get_in_project_venv_python(self.root) if python: yield PythonVersion.from_path(python) python = shutil.which("python") if python: yield PythonVersion.from_path(python) args = () else: if not all(c.isdigit() for c in python_spec.split(".")): if Path(python_spec).exists(): python = find_python_in_path(python_spec) if python: yield PythonVersion.from_path(str(python)) else: python = shutil.which(python_spec) if python: yield PythonVersion.from_path(python) return args = [int(v) for v in python_spec.split(".") if v != ""] finder = Finder() for entry in finder.find_all_python_versions(*args): yield entry.py_version if not python_spec: this_python = getattr(sys, "_base_executable", sys.executable) yield PythonVersion.from_path(this_python)
36.994854
88
0.598386
ace80bd3eb8d3cf1639be03c244b43aacbba2ec4
5,537
py
Python
paddlespeech/s2t/decoders/scorers/ctc.py
JiehangXie/PaddleSpeech
60090b49ec27437127ab62358026dd5bb95fccc7
[ "Apache-2.0" ]
1,540
2017-11-14T13:26:33.000Z
2021-11-09T14:05:08.000Z
paddlespeech/s2t/decoders/scorers/ctc.py
JiehangXie/PaddleSpeech
60090b49ec27437127ab62358026dd5bb95fccc7
[ "Apache-2.0" ]
599
2017-11-14T13:19:12.000Z
2021-11-09T01:58:26.000Z
paddlespeech/s2t/decoders/scorers/ctc.py
JiehangXie/PaddleSpeech
60090b49ec27437127ab62358026dd5bb95fccc7
[ "Apache-2.0" ]
449
2017-11-14T12:48:46.000Z
2021-11-06T09:34:33.000Z
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Modified from espnet(https://github.com/espnet/espnet) """ScorerInterface implementation for CTC.""" import numpy as np import paddle from .ctc_prefix_score import CTCPrefixScore from .ctc_prefix_score import CTCPrefixScorePD from .scorer_interface import BatchPartialScorerInterface class CTCPrefixScorer(BatchPartialScorerInterface): """Decoder interface wrapper for CTCPrefixScore.""" def __init__(self, ctc: paddle.nn.Layer, eos: int): """Initialize class. Args: ctc (paddle.nn.Layer): The CTC implementation. For example, :class:`paddlespeech.s2t.modules.ctc.CTC` eos (int): The end-of-sequence id. """ self.ctc = ctc self.eos = eos self.impl = None def init_state(self, x: paddle.Tensor): """Get an initial state for decoding. Args: x (paddle.Tensor): The encoded feature tensor Returns: initial state """ logp = self.ctc.log_softmax(x.unsqueeze(0)).squeeze(0).numpy() # TODO(karita): use CTCPrefixScorePD self.impl = CTCPrefixScore(logp, 0, self.eos, np) return 0, self.impl.initial_state() def select_state(self, state, i, new_id=None): """Select state with relative ids in the main beam search. Args: state: Decoder state for prefix tokens i (int): Index to select a state in the main beam search new_id (int): New label id to select a state if necessary Returns: state: pruned state """ if type(state) == tuple: if len(state) == 2: # for CTCPrefixScore sc, st = state return sc[i], st[i] else: # for CTCPrefixScorePD (need new_id > 0) r, log_psi, f_min, f_max, scoring_idmap = state s = log_psi[i, new_id].expand(log_psi.size(1)) if scoring_idmap is not None: return r[:, :, i, scoring_idmap[i, new_id]], s, f_min, f_max else: return r[:, :, i, new_id], s, f_min, f_max return None if state is None else state[i] def score_partial(self, y, ids, state, x): """Score new token. Args: y (paddle.Tensor): 1D prefix token next_tokens (paddle.Tensor): paddle.int64 next token to score state: decoder state for prefix tokens x (paddle.Tensor): 2D encoder feature that generates ys Returns: tuple[paddle.Tensor, Any]: Tuple of a score tensor for y that has a shape `(len(next_tokens),)` and next state for ys """ prev_score, state = state presub_score, new_st = self.impl(y.cpu(), ids.cpu(), state) tscore = paddle.to_tensor( presub_score - prev_score, place=x.place, dtype=x.dtype) return tscore, (presub_score, new_st) def batch_init_state(self, x: paddle.Tensor): """Get an initial state for decoding. Args: x (paddle.Tensor): The encoded feature tensor Returns: initial state """ logp = self.ctc.log_softmax(x.unsqueeze(0)) # assuming batch_size = 1 xlen = paddle.to_tensor([logp.size(1)]) self.impl = CTCPrefixScorePD(logp, xlen, 0, self.eos) return None def batch_score_partial(self, y, ids, state, x): """Score new token. Args: y (paddle.Tensor): 1D prefix token ids (paddle.Tensor): paddle.int64 next token to score state: decoder state for prefix tokens x (paddle.Tensor): 2D encoder feature that generates ys Returns: tuple[paddle.Tensor, Any]: Tuple of a score tensor for y that has a shape `(len(next_tokens),)` and next state for ys """ batch_state = ( (paddle.stack([s[0] for s in state], axis=2), paddle.stack([s[1] for s in state]), state[0][2], state[0][3], ) if state[0] is not None else None) return self.impl(y, batch_state, ids) def extend_prob(self, x: paddle.Tensor): """Extend probs for decoding. This extension is for streaming decoding as in Eq (14) in https://arxiv.org/abs/2006.14941 Args: x (paddle.Tensor): The encoded feature tensor """ logp = self.ctc.log_softmax(x.unsqueeze(0)) self.impl.extend_prob(logp) def extend_state(self, state): """Extend state for decoding. This extension is for streaming decoding as in Eq (14) in https://arxiv.org/abs/2006.14941 Args: state: The states of hyps Returns: exteded state """ new_state = [] for s in state: new_state.append(self.impl.extend_state(s)) return new_state
33.557576
84
0.601951
ace80ca454a8b00a542ce33d48d4d1f601f98534
1,062
py
Python
service/google_cloud_storage.py
moshebeeri/datap
9ff99bb435728cd69f2589e3ee858a06768ea85e
[ "Apache-2.0" ]
null
null
null
service/google_cloud_storage.py
moshebeeri/datap
9ff99bb435728cd69f2589e3ee858a06768ea85e
[ "Apache-2.0" ]
null
null
null
service/google_cloud_storage.py
moshebeeri/datap
9ff99bb435728cd69f2589e3ee858a06768ea85e
[ "Apache-2.0" ]
null
null
null
import json from data import Data from google.cloud import storage from .service import Service class GCloudStorage(Service): def __init__(self, region='US-EAST1', bucket='backup', path='/'): self.region = region self.bucket = bucket self.path = path def connect(self, connection={}): self.storage_client = storage.Client() self.bucket = self.storage_client.bucket(self.bucket) def read(self, job: Job) -> Data: data = Data() # get bucket data as blob blob = self.bucket.get_blob(self.path+job.from_time+'.jsonl') # convert to string jsonl_data = blob.download_as_string() for line in jsonl_data.splitlines(): doc_data = json.loads(line) data.add_doc(doc_data) return data def write(self, data: Data, job: Job) -> Data: docs = data.get_docs() content=[] for doc in docs: l = json.dumps(doc, sort_keys=True, default=str) content.append(l) blob = self.bucket.blob(self.path+job.from_time+'.jsonl') blob.upload_from_string('\n'.join(content)) return data
27.947368
67
0.673258
ace80d583e120f7b52d6397f695bdf6e1922892c
2,427
py
Python
setup.py
canavandl/colour
a453cd37b6135a9092d5ea5b2aafb8d19134bdff
[ "BSD-3-Clause" ]
null
null
null
setup.py
canavandl/colour
a453cd37b6135a9092d5ea5b2aafb8d19134bdff
[ "BSD-3-Clause" ]
null
null
null
setup.py
canavandl/colour
a453cd37b6135a9092d5ea5b2aafb8d19134bdff
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Pypi Setup ========== """ from __future__ import unicode_literals import sys from setuptools import setup from setuptools import find_packages __author__ = 'Colour Developers' __copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers' __license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause' __maintainer__ = 'Colour Developers' __email__ = 'colour-science@googlegroups.com' __status__ = 'Production' __all__ = ['SHORT_DESCRIPTION', 'LONG_DESCRIPTION', 'INSTALLATION_REQUIREMENTS', 'OPTIONAL_REQUIREMENTS', 'DOCS_REQUIREMENTS', 'TESTS_REQUIREMENTS'] SHORT_DESCRIPTION = 'Colour Science for Python' LONG_DESCRIPTION = open('README.rst').read() INSTALLATION_REQUIREMENTS = [ 'numpy>=1.8.1', 'matplotlib>=1.3.1'] if sys.version_info[:2] <= (2, 7): INSTALLATION_REQUIREMENTS += [ 'backports.functools_lru_cache>=1.0.1'] if sys.version_info[:2] <= (2, 6): INSTALLATION_REQUIREMENTS += [ 'ordereddict>=1.1', 'unittest2>=0.5.1'] OPTIONAL_REQUIREMENTS = ['scipy>=0.14.0'] DOCS_REQUIREMENTS = ['sphinx>=1.2.2', 'sphinxcontrib-napoleon>0.2.8', 'sphinx_bootstrap_theme>0.4.1'] TESTS_REQUIREMENTS = ['coverage>=3.7.1', 'flake8>=2.1.0', 'nose>=1.3.4'] setup(name='colour-science', version='0.3.2', author=__author__, author_email=__email__, include_package_data=True, packages=find_packages(), scripts=[], url='http://github.com/colour-science/colour', license='', description=SHORT_DESCRIPTION, long_description=LONG_DESCRIPTION, install_requires=INSTALLATION_REQUIREMENTS, extras_require={ 'docs': DOCS_REQUIREMENTS, 'optional': OPTIONAL_REQUIREMENTS, 'tests': TESTS_REQUIREMENTS}, classifiers=['Development Status :: 3 - Alpha', 'Environment :: Console', 'Intended Audience :: Developers', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Topic :: Utilities'])
29.597561
77
0.603626
ace80de216c794456f413989003bec4e4ab8eb30
1,122
py
Python
toai/data/utils.py
mindaugo1/toai
9f0cc109d1fa596d514a74da6b7e113039d09029
[ "MIT" ]
1
2019-11-22T13:12:11.000Z
2019-11-22T13:12:11.000Z
toai/data/utils.py
mindaugo1/toai
9f0cc109d1fa596d514a74da6b7e113039d09029
[ "MIT" ]
null
null
null
toai/data/utils.py
mindaugo1/toai
9f0cc109d1fa596d514a74da6b7e113039d09029
[ "MIT" ]
null
null
null
from typing import Tuple import pandas as pd from sklearn.model_selection import train_test_split def split_df( data: pd.DataFrame, test_size: float, target_col: str = None, random_state: int = 42 ) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: stratify = data[target_col] if target_col else None train_data, test_data = train_test_split( data, test_size=test_size, stratify=stratify, random_state=random_state ) test_stratify = test_data[target_col] if target_col else None val_data, test_data = train_test_split( test_data, test_size=0.5, stratify=test_stratify, random_state=random_state ) for df in train_data, val_data, test_data: df.reset_index(drop=True, inplace=True) return train_data, val_data, test_data def balance_df_labels(df: pd.DataFrame, col_name: str) -> pd.DataFrame: max_value = df[col_name].value_counts().max() result = [ df[df[col_name] == value] for value, count in df[col_name].value_counts().items() for _ in range(max_value // count) ] return pd.concat(result).reset_index(drop=True)
36.193548
88
0.715686
ace80ea16900891e3937cc8d8e06f998b4661e92
5,168
py
Python
44 Hangman/python/hangman.py
aretche/basic-computer-games
ee35b11be7b29667ce7ac1ab851de0142c3c56cd
[ "Unlicense" ]
null
null
null
44 Hangman/python/hangman.py
aretche/basic-computer-games
ee35b11be7b29667ce7ac1ab851de0142c3c56cd
[ "Unlicense" ]
null
null
null
44 Hangman/python/hangman.py
aretche/basic-computer-games
ee35b11be7b29667ce7ac1ab851de0142c3c56cd
[ "Unlicense" ]
null
null
null
#!/usr/bin/env python3 # HANGMAN # # Converted from BASIC to Python by Trevor Hobson import random print(" " * 32 + "HANGMAN") print(" " * 15 + "CREATIVE COMPUTING MORRISTOWN, NEW JERSEY\n") words = ["GUM", "SIN", "FOR", "CRY", "LUG", "BYE", "FLY", "UGLY", "EACH", "FROM", "WORK", "TALK", "WITH", "SELF", "PIZZA", "THING", "FEIGN", "FIEND", "ELBOW", "FAULT", "DIRTY", "BUDGET", "SPIRIT", "QUAINT", "MAIDEN", "ESCORT", "PICKAX", "EXAMPLE", "TENSION", "QUININE", "KIDNEY", "REPLICA", "SLEEPER", "TRIANGLE", "KANGAROO", "MAHOGANY", "SERGEANT", "SEQUENCE", "MOUSTACHE", "DANGEROUS", "SCIENTIST", "DIFFERENT", "QUIESCENT", "MAGISTRATE", "ERRONEOUSLY", "LOUDSPEAKER", "PHYTOTOXIC", "MATRIMONIAL", "PARASYMPATHOMIMETIC", "THIGMOTROPISM"] def play_game(guessTarget): """Play the game""" guessWrong = 0 guessProgress = ["-"] * len(guessTarget) guessList = [] gallows = [([" "] * 12) for i in range(12)] for i in range(12): gallows[i][0] = "X" for i in range(7): gallows[0][i] = "X" gallows[1][6] = "X" guessCount = 0 while True: print("Here are the letters you used:") print(",".join(guessList) + "\n") print("".join(guessProgress) + "\n") guessLetter = "" guessWord = "" while guessLetter == "": guessLetter = input("What is your guess? ").upper()[0] if not guessLetter.isalpha(): guessLetter = "" print("Only letters are allowed!") elif guessLetter in guessList: guessLetter = "" print("You guessed that letter before!") guessList.append(guessLetter) guessCount = guessCount + 1 if guessLetter in guessTarget: indices = [i for i, letter in enumerate(guessTarget) if letter == guessLetter] for i in indices: guessProgress[i] = guessLetter if guessProgress == guessTarget: print("You found the word!") break else: print("\n" + "".join(guessProgress) + "\n") while guessWord == "": guessWord = input("What is your guess for the word? ").upper() if not guessWord.isalpha(): guessWord = "" print("Only words are allowed!") if guessWord == guessTarget: print("Right!! It took you", guessCount, "guesses!") break else: guessWrong = guessWrong + 1 print("Sorry, that letter isn't in the word.") if guessWrong == 1: print("First, we draw the head.") for i in range(5, 8): gallows[2][i] = "-" gallows[4][i] = "-" gallows[3][4] = "(" gallows[3][5] = "." gallows[3][7] = "." gallows[3][8] = ")" elif guessWrong == 2: print("Now we draw a body.") for i in range(5, 9): gallows[i][6] = "X" elif guessWrong == 3: print("Next we draw an arm.") for i in range(3, 7): gallows[i][i-1] = "\\" elif guessWrong == 4: print("This time it's the other arm.") for i in range(3, 7): gallows[i][13-i] = "/" elif guessWrong == 5: print("Now, let's draw the right leg.") gallows[9][5] = "/" gallows[10][4] = "/" elif guessWrong == 6: print("This time we draw the left leg.") gallows[9][7] = "\\" gallows[10][8] = "\\" elif guessWrong == 7: print("Now we put up a hand.") gallows[2][10] = "\\" elif guessWrong == 8: print("Next the other hand.") gallows[2][2] = "/" elif guessWrong == 9: print("Now we draw one foot.") gallows[11][9] = "\\" gallows[11][10] = "-" elif guessWrong == 10: print("Here's the other foot -- You're hung!!.") gallows[11][2] = "-" gallows[11][3] = "/" for i in range(12): print("".join(gallows[i])) print("\n") if guessWrong == 10: print("Sorry, you lose. The word was " + guessTarget) break def main(): """Main""" random.shuffle(words) wordCurrent = 0 wordCount = 49 keep_playing = True while keep_playing: play_game(words[wordCurrent]) wordCurrent = wordCurrent + 1 if wordCurrent >= wordCount: print("You did all the words!!") keep_playing = False else: keep_playing = input("Want another word? (yes or no) ").lower().startswith("y") print("It's been fun! Bye for now.") if __name__ == "__main__": main()
36.13986
91
0.467299
ace80f13e8e73c2f04472de86bf0a3a9da65eb6c
594
py
Python
telraam_data/tests/test_download.py
barentsen/telraam_data
84f4a5669a1e81e1c99594880137f13f51adb447
[ "MIT" ]
2
2020-04-09T23:05:06.000Z
2021-07-08T10:07:48.000Z
telraam_data/tests/test_download.py
barentsen/telraam_data
84f4a5669a1e81e1c99594880137f13f51adb447
[ "MIT" ]
1
2022-03-02T20:37:10.000Z
2022-03-03T06:38:31.000Z
telraam_data/tests/test_download.py
barentsen/telraam_data
84f4a5669a1e81e1c99594880137f13f51adb447
[ "MIT" ]
1
2021-09-21T11:51:14.000Z
2021-09-21T11:51:14.000Z
from .. import list_segments, list_segments_by_coordinates def test_list_segments(): # As of April 2020 there were more than 900 active segments. segments = list_segments() assert len(segments) > 900 def test_list_segments_by_coordinates(): # As of April 2020 there are more than 30 active segments in Schaarbeek segments = list_segments_by_coordinates(lat=50.867, lon=4.373, radius=2) assert len(segments) > 30 # 1003073114 should be one of them assert 1003073114 in segments # 1003063473 should not be one of them assert 1003063473 not in segments
33
76
0.744108
ace80f14b8bdac8dca7c3d87e08179ead17c5c02
109,275
py
Python
youtube_dl/YoutubeDL.py
snsjajnnz/hjj
2ca7ed41fed73cf37581b07d0c67d3bad8a6acc3
[ "Unlicense" ]
null
null
null
youtube_dl/YoutubeDL.py
snsjajnnz/hjj
2ca7ed41fed73cf37581b07d0c67d3bad8a6acc3
[ "Unlicense" ]
null
null
null
youtube_dl/YoutubeDL.py
snsjajnnz/hjj
2ca7ed41fed73cf37581b07d0c67d3bad8a6acc3
[ "Unlicense" ]
null
null
null
#!/usr/bin/env python # coding: utf-8 from __future__ import absolute_import, unicode_literals import collections import contextlib import copy import datetime import errno import fileinput import io import itertools import json import locale import operator import os import platform import re import shutil import subprocess import socket import sys import time import tokenize import traceback import random from string import ascii_letters from .compat import ( compat_basestring, compat_cookiejar, compat_get_terminal_size, compat_http_client, compat_kwargs, compat_numeric_types, compat_os_name, compat_str, compat_tokenize_tokenize, compat_urllib_error, compat_urllib_request, compat_urllib_request_DataHandler, ) from .utils import ( age_restricted, args_to_str, ContentTooShortError, date_from_str, DateRange, DEFAULT_OUTTMPL, determine_ext, determine_protocol, DownloadError, encode_compat_str, encodeFilename, error_to_compat_str, expand_path, ExtractorError, format_bytes, formatSeconds, GeoRestrictedError, int_or_none, ISO3166Utils, locked_file, make_HTTPS_handler, MaxDownloadsReached, orderedSet, PagedList, parse_filesize, PerRequestProxyHandler, platform_name, PostProcessingError, preferredencoding, prepend_extension, register_socks_protocols, render_table, replace_extension, SameFileError, sanitize_filename, sanitize_path, sanitize_url, sanitized_Request, std_headers, subtitles_filename, UnavailableVideoError, url_basename, version_tuple, write_json_file, write_string, YoutubeDLCookieProcessor, YoutubeDLHandler, ) from .cache import Cache from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER from .extractor.openload import PhantomJSwrapper from .downloader import get_suitable_downloader from .downloader.rtmp import rtmpdump_version from .postprocessor import ( FFmpegFixupM3u8PP, FFmpegFixupM4aPP, FFmpegFixupStretchedPP, FFmpegMergerPP, FFmpegPostProcessor, get_postprocessor, ) from .version import __version__ if compat_os_name == 'nt': import ctypes class YoutubeDL(object): """YoutubeDL class. YoutubeDL objects are the ones responsible of downloading the actual video file and writing it to disk if the user has requested it, among some other tasks. In most cases there should be one per program. As, given a video URL, the downloader doesn't know how to extract all the needed information, task that InfoExtractors do, it has to pass the URL to one of them. For this, YoutubeDL objects have a method that allows InfoExtractors to be registered in a given order. When it is passed a URL, the YoutubeDL object handles it to the first InfoExtractor it finds that reports being able to handle it. The InfoExtractor extracts all the information about the video or videos the URL refers to, and YoutubeDL process the extracted information, possibly using a File Downloader to download the video. YoutubeDL objects accept a lot of parameters. In order not to saturate the object constructor with arguments, it receives a dictionary of options instead. These options are available through the params attribute for the InfoExtractors to use. The YoutubeDL also registers itself as the downloader in charge for the InfoExtractors that are added to it, so this is a "mutual registration". Available options: username: Username for authentication purposes. password: Password for authentication purposes. videopassword: Password for accessing a video. ap_mso: Adobe Pass multiple-system operator identifier. ap_username: Multiple-system operator account username. ap_password: Multiple-system operator account password. usenetrc: Use netrc for authentication instead. verbose: Print additional info to stdout. quiet: Do not print messages to stdout. no_warnings: Do not print out anything for warnings. forceurl: Force printing final URL. forcetitle: Force printing title. forceid: Force printing ID. forcethumbnail: Force printing thumbnail URL. forcedescription: Force printing description. forcefilename: Force printing final filename. forceduration: Force printing duration. forcejson: Force printing info_dict as JSON. dump_single_json: Force printing the info_dict of the whole playlist (or video) as a single JSON line. simulate: Do not download the video files. format: Video format code. See options.py for more information. outtmpl: Template for output names. restrictfilenames: Do not allow "&" and spaces in file names ignoreerrors: Do not stop on download errors. force_generic_extractor: Force downloader to use the generic extractor nooverwrites: Prevent overwriting files. playliststart: Playlist item to start at. playlistend: Playlist item to end at. playlist_items: Specific indices of playlist to download. playlistreverse: Download playlist items in reverse order. playlistrandom: Download playlist items in random order. matchtitle: Download only matching titles. rejecttitle: Reject downloads for matching titles. logger: Log messages to a logging.Logger instance. logtostderr: Log messages to stderr instead of stdout. writedescription: Write the video description to a .description file writeinfojson: Write the video description to a .info.json file writeannotations: Write the video annotations to a .annotations.xml file writethumbnail: Write the thumbnail image to a file write_all_thumbnails: Write all thumbnail formats to files writesubtitles: Write the video subtitles to a file writeautomaticsub: Write the automatically generated subtitles to a file allsubtitles: Downloads all the subtitles of the video (requires writesubtitles or writeautomaticsub) listsubtitles: Lists all available subtitles for the video subtitlesformat: The format code for subtitles subtitleslangs: List of languages of the subtitles to download keepvideo: Keep the video file after post-processing daterange: A DateRange object, download only if the upload_date is in the range. skip_download: Skip the actual download of the video file cachedir: Location of the cache files in the filesystem. False to disable filesystem cache. noplaylist: Download single video instead of a playlist if in doubt. age_limit: An integer representing the user's age in years. Unsuitable videos for the given age are skipped. min_views: An integer representing the minimum view count the video must have in order to not be skipped. Videos without view count information are always downloaded. None for no limit. max_views: An integer representing the maximum view count. Videos that are more popular than that are not downloaded. Videos without view count information are always downloaded. None for no limit. download_archive: File name of a file where all downloads are recorded. Videos already present in the file are not downloaded again. cookiefile: File name where cookies should be read from and dumped to. nocheckcertificate:Do not verify SSL certificates prefer_insecure: Use HTTP instead of HTTPS to retrieve information. At the moment, this is only supported by YouTube. proxy: URL of the proxy server to use geo_verification_proxy: URL of the proxy to use for IP address verification on geo-restricted sites. (Experimental) socket_timeout: Time to wait for unresponsive hosts, in seconds bidi_workaround: Work around buggy terminals without bidirectional text support, using fridibi debug_printtraffic:Print out sent and received HTTP traffic include_ads: Download ads as well default_search: Prepend this string if an input url is not valid. 'auto' for elaborate guessing encoding: Use this encoding instead of the system-specified. extract_flat: Do not resolve URLs, return the immediate result. Pass in 'in_playlist' to only show this behavior for playlist items. postprocessors: A list of dictionaries, each with an entry * key: The name of the postprocessor. See youtube_dl/postprocessor/__init__.py for a list. as well as any further keyword arguments for the postprocessor. progress_hooks: A list of functions that get called on download progress, with a dictionary with the entries * status: One of "downloading", "error", or "finished". Check this first and ignore unknown values. If status is one of "downloading", or "finished", the following properties may also be present: * filename: The final filename (always present) * tmpfilename: The filename we're currently writing to * downloaded_bytes: Bytes on disk * total_bytes: Size of the whole file, None if unknown * total_bytes_estimate: Guess of the eventual file size, None if unavailable. * elapsed: The number of seconds since download started. * eta: The estimated time in seconds, None if unknown * speed: The download speed in bytes/second, None if unknown * fragment_index: The counter of the currently downloaded video fragment. * fragment_count: The number of fragments (= individual files that will be merged) Progress hooks are guaranteed to be called at least once (with status "finished") if the download is successful. merge_output_format: Extension to use when merging formats. fixup: Automatically correct known faults of the file. One of: - "never": do nothing - "warn": only emit a warning - "detect_or_warn": check whether we can do anything about it, warn otherwise (default) source_address: (Experimental) Client-side IP address to bind to. call_home: Boolean, true iff we are allowed to contact the youtube-dl servers for debugging. sleep_interval: Number of seconds to sleep before each download when used alone or a lower bound of a range for randomized sleep before each download (minimum possible number of seconds to sleep) when used along with max_sleep_interval. max_sleep_interval:Upper bound of a range for randomized sleep before each download (maximum possible number of seconds to sleep). Must only be used along with sleep_interval. Actual sleep time will be a random float from range [sleep_interval; max_sleep_interval]. listformats: Print an overview of available video formats and exit. list_thumbnails: Print a table of all thumbnails and exit. match_filter: A function that gets called with the info_dict of every video. If it returns a message, the video is ignored. If it returns None, the video is downloaded. match_filter_func in utils.py is one example for this. no_color: Do not emit color codes in output. geo_bypass: Bypass geographic restriction via faking X-Forwarded-For HTTP header (experimental) geo_bypass_country: Two-letter ISO 3166-2 country code that will be used for explicit geographic restriction bypassing via faking X-Forwarded-For HTTP header (experimental) The following options determine which downloader is picked: external_downloader: Executable of the external downloader to call. None or unset for standard (built-in) downloader. hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv if True, otherwise use ffmpeg/avconv if False, otherwise use downloader suggested by extractor if None. The following parameters are not used by YoutubeDL itself, they are used by the downloader (see youtube_dl/downloader/common.py): nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test, noresizebuffer, retries, continuedl, noprogress, consoletitle, xattr_set_filesize, external_downloader_args, hls_use_mpegts. The following options are used by the post processors: prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available, otherwise prefer avconv. postprocessor_args: A list of additional command-line arguments for the postprocessor. The following options are used by the Youtube extractor: youtube_include_dash_manifest: If True (default), DASH manifests and related data will be downloaded and processed by extractor. You can reduce network I/O by disabling it if you don't care about DASH. """ _NUMERIC_FIELDS = set(( 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx', 'timestamp', 'upload_year', 'upload_month', 'upload_day', 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count', 'average_rating', 'comment_count', 'age_limit', 'start_time', 'end_time', 'chapter_number', 'season_number', 'episode_number', 'track_number', 'disc_number', 'release_year', 'playlist_index', )) params = None _ies = [] _pps = [] _download_retcode = None _num_downloads = None _screen_file = None def __init__(self, params=None, auto_init=True): """Create a FileDownloader object with the given options.""" if params is None: params = {} self._ies = [] self._ies_instances = {} self._pps = [] self._progress_hooks = [] self._download_retcode = 0 self._num_downloads = 0 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] self._err_file = sys.stderr self.params = { # Default parameters 'nocheckcertificate': False, } self.params.update(params) self.cache = Cache(self) def check_deprecated(param, option, suggestion): if self.params.get(param) is not None: self.report_warning( '%s is deprecated. Use %s instead.' % (option, suggestion)) return True return False if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'): if self.params.get('geo_verification_proxy') is None: self.params['geo_verification_proxy'] = self.params['cn_verification_proxy'] check_deprecated('autonumber_size', '--autonumber-size', 'output template with %(autonumber)0Nd, where N in the number of digits') check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"') check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"') if params.get('bidi_workaround', False): try: import pty master, slave = pty.openpty() width = compat_get_terminal_size().columns if width is None: width_args = [] else: width_args = ['-w', str(width)] sp_kwargs = dict( stdin=subprocess.PIPE, stdout=slave, stderr=self._err_file) try: self._output_process = subprocess.Popen( ['bidiv'] + width_args, **sp_kwargs ) except OSError: self._output_process = subprocess.Popen( ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs) self._output_channel = os.fdopen(master, 'rb') except OSError as ose: if ose.errno == errno.ENOENT: self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.') else: raise if (sys.platform != 'win32' and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and not params.get('restrictfilenames', False)): # Unicode filesystem API will throw errors (#1474, #13027) self.report_warning( 'Assuming --restrict-filenames since file system encoding ' 'cannot encode all characters. ' 'Set the LC_ALL environment variable to fix this.') self.params['restrictfilenames'] = True if isinstance(params.get('outtmpl'), bytes): self.report_warning( 'Parameter outtmpl is bytes, but should be a unicode string. ' 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.') self._setup_opener() if auto_init: self.print_debug_header() self.add_default_info_extractors() for pp_def_raw in self.params.get('postprocessors', []): pp_class = get_postprocessor(pp_def_raw['key']) pp_def = dict(pp_def_raw) del pp_def['key'] pp = pp_class(self, **compat_kwargs(pp_def)) self.add_post_processor(pp) for ph in self.params.get('progress_hooks', []): self.add_progress_hook(ph) register_socks_protocols() def warn_if_short_id(self, argv): # short YouTube ID starting with dash? idxs = [ i for i, a in enumerate(argv) if re.match(r'^-[0-9A-Za-z_-]{10}$', a)] if idxs: correct_argv = ( ['youtube-dl'] + [a for i, a in enumerate(argv) if i not in idxs] + ['--'] + [argv[i] for i in idxs] ) self.report_warning( 'Long argument string detected. ' 'Use -- to separate parameters and URLs, like this:\n%s\n' % args_to_str(correct_argv)) def add_info_extractor(self, ie): """Add an InfoExtractor object to the end of the list.""" self._ies.append(ie) if not isinstance(ie, type): self._ies_instances[ie.ie_key()] = ie ie.set_downloader(self) def get_info_extractor(self, ie_key): """ Get an instance of an IE with name ie_key, it will try to get one from the _ies list, if there's no instance it will create a new one and add it to the extractor list. """ ie = self._ies_instances.get(ie_key) if ie is None: ie = get_info_extractor(ie_key)() self.add_info_extractor(ie) return ie def add_default_info_extractors(self): """ Add the InfoExtractors returned by gen_extractors to the end of the list """ for ie in gen_extractor_classes(): self.add_info_extractor(ie) def add_post_processor(self, pp): """Add a PostProcessor object to the end of the chain.""" self._pps.append(pp) pp.set_downloader(self) def add_progress_hook(self, ph): """Add the progress hook (currently only for the file downloader)""" self._progress_hooks.append(ph) def _bidi_workaround(self, message): if not hasattr(self, '_output_channel'): return message assert hasattr(self, '_output_process') assert isinstance(message, compat_str) line_count = message.count('\n') + 1 self._output_process.stdin.write((message + '\n').encode('utf-8')) self._output_process.stdin.flush() res = ''.join(self._output_channel.readline().decode('utf-8') for _ in range(line_count)) return res[:-len('\n')] def to_screen(self, message, skip_eol=False): """Print message to stdout if not in quiet mode.""" return self.to_stdout(message, skip_eol, check_quiet=True) def _write_string(self, s, out=None): write_string(s, out=out, encoding=self.params.get('encoding')) def to_stdout(self, message, skip_eol=False, check_quiet=False): """Print message to stdout if not in quiet mode.""" if self.params.get('logger'): self.params['logger'].debug(message) elif not check_quiet or not self.params.get('quiet', False): message = self._bidi_workaround(message) terminator = ['\n', ''][skip_eol] output = message + terminator self._write_string(output, self._screen_file) def to_stderr(self, message): """Print message to stderr.""" assert isinstance(message, compat_str) if self.params.get('logger'): self.params['logger'].error(message) else: message = self._bidi_workaround(message) output = message + '\n' self._write_string(output, self._err_file) def to_console_title(self, message): if not self.params.get('consoletitle', False): return if compat_os_name == 'nt': if ctypes.windll.kernel32.GetConsoleWindow(): # c_wchar_p() might not be necessary if `message` is # already of type unicode() ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) elif 'TERM' in os.environ: self._write_string('\033]0;%s\007' % message, self._screen_file) def save_console_title(self): if not self.params.get('consoletitle', False): return if compat_os_name != 'nt' and 'TERM' in os.environ: # Save the title on stack self._write_string('\033[22;0t', self._screen_file) def restore_console_title(self): if not self.params.get('consoletitle', False): return if compat_os_name != 'nt' and 'TERM' in os.environ: # Restore the title from stack self._write_string('\033[23;0t', self._screen_file) def __enter__(self): self.save_console_title() return self def __exit__(self, *args): self.restore_console_title() if self.params.get('cookiefile') is not None: self.cookiejar.save() def trouble(self, message=None, tb=None): """Determine action to take when a download problem appears. Depending on if the downloader has been configured to ignore download errors or not, this method may throw an exception or not when errors are found, after printing the message. tb, if given, is additional traceback information. """ if message is not None: self.to_stderr(message) if self.params.get('verbose'): if tb is None: if sys.exc_info()[0]: # if .trouble has been called from an except block tb = '' if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info)) tb += encode_compat_str(traceback.format_exc()) else: tb_data = traceback.format_list(traceback.extract_stack()) tb = ''.join(tb_data) self.to_stderr(tb) if not self.params.get('ignoreerrors', False): if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: exc_info = sys.exc_info()[1].exc_info else: exc_info = sys.exc_info() raise DownloadError(message, exc_info) self._download_retcode = 1 def report_warning(self, message): ''' Print the message to stderr, it will be prefixed with 'WARNING:' If stderr is a tty file the 'WARNING:' will be colored ''' if self.params.get('logger') is not None: self.params['logger'].warning(message) else: if self.params.get('no_warnings'): return if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt': _msg_header = '\033[0;33mWARNING:\033[0m' else: _msg_header = 'WARNING:' warning_message = '%s %s' % (_msg_header, message) self.to_stderr(warning_message) def report_error(self, message, tb=None): ''' Do the same as trouble, but prefixes the message with 'ERROR:', colored in red if stderr is a tty file. ''' if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt': _msg_header = '\033[0;31mERROR:\033[0m' else: _msg_header = 'ERROR:' error_message = '%s %s' % (_msg_header, message) self.trouble(error_message, tb) def report_file_already_downloaded(self, file_name): """Report file has already been fully downloaded.""" try: self.to_screen('[download] %s has already been downloaded' % file_name) except UnicodeEncodeError: self.to_screen('[download] The file has already been downloaded') def prepare_filename(self, info_dict): """Generate the output filename.""" try: template_dict = dict(info_dict) template_dict['epoch'] = int(time.time()) autonumber_size = self.params.get('autonumber_size') if autonumber_size is None: autonumber_size = 5 template_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads if template_dict.get('resolution') is None: if template_dict.get('width') and template_dict.get('height'): template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height']) elif template_dict.get('height'): template_dict['resolution'] = '%sp' % template_dict['height'] elif template_dict.get('width'): template_dict['resolution'] = '%dx?' % template_dict['width'] sanitize = lambda k, v: sanitize_filename( compat_str(v), restricted=self.params.get('restrictfilenames'), is_id=(k == 'id' or k.endswith('_id'))) template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v)) for k, v in template_dict.items() if v is not None and not isinstance(v, (list, tuple, dict))) template_dict = collections.defaultdict(lambda: 'NA', template_dict) outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) # For fields playlist_index and autonumber convert all occurrences # of %(field)s to %(field)0Nd for backward compatibility field_size_compat_map = { 'playlist_index': len(str(template_dict['n_entries'])), 'autonumber': autonumber_size, } FIELD_SIZE_COMPAT_RE = r'(?<!%)%\((?P<field>autonumber|playlist_index)\)s' mobj = re.search(FIELD_SIZE_COMPAT_RE, outtmpl) if mobj: outtmpl = re.sub( FIELD_SIZE_COMPAT_RE, r'%%(\1)0%dd' % field_size_compat_map[mobj.group('field')], outtmpl) # Missing numeric fields used together with integer presentation types # in format specification will break the argument substitution since # string 'NA' is returned for missing fields. We will patch output # template for missing fields to meet string presentation type. for numeric_field in self._NUMERIC_FIELDS: if numeric_field not in template_dict: # As of [1] format syntax is: # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting FORMAT_RE = r'''(?x) (?<!%) % \({0}\) # mapping key (?:[#0\-+ ]+)? # conversion flags (optional) (?:\d+)? # minimum field width (optional) (?:\.\d+)? # precision (optional) [hlL]? # length modifier (optional) [diouxXeEfFgGcrs%] # conversion type ''' outtmpl = re.sub( FORMAT_RE.format(numeric_field), r'%({0})s'.format(numeric_field), outtmpl) # expand_path translates '%%' into '%' and '$$' into '$' # correspondingly that is not what we want since we need to keep # '%%' intact for template dict substitution step. Working around # with boundary-alike separator hack. sep = ''.join([random.choice(ascii_letters) for _ in range(32)]) outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep)) # outtmpl should be expand_path'ed before template dict substitution # because meta fields may contain env variables we don't want to # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and # title "Hello $PATH", we don't want `$PATH` to be expanded. filename = expand_path(outtmpl).replace(sep, '') % template_dict # Temporary fix for #4787 # 'Treat' all problem characters by passing filename through preferredencoding # to workaround encoding issues with subprocess on python2 @ Windows if sys.version_info < (3, 0) and sys.platform == 'win32': filename = encodeFilename(filename, True).decode(preferredencoding()) return sanitize_path(filename) except ValueError as err: self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')') return None def _match_entry(self, info_dict, incomplete): """ Returns None iff the file should be downloaded """ video_title = info_dict.get('title', info_dict.get('id', 'video')) if 'title' in info_dict: # This can happen when we're just evaluating the playlist title = info_dict['title'] matchtitle = self.params.get('matchtitle', False) if matchtitle: if not re.search(matchtitle, title, re.IGNORECASE): return '"' + title + '" title did not match pattern "' + matchtitle + '"' rejecttitle = self.params.get('rejecttitle', False) if rejecttitle: if re.search(rejecttitle, title, re.IGNORECASE): return '"' + title + '" title matched reject pattern "' + rejecttitle + '"' date = info_dict.get('upload_date') if date is not None: dateRange = self.params.get('daterange', DateRange()) if date not in dateRange: return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange) view_count = info_dict.get('view_count') if view_count is not None: min_views = self.params.get('min_views') if min_views is not None and view_count < min_views: return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views) max_views = self.params.get('max_views') if max_views is not None and view_count > max_views: return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views) if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')): return 'Skipping "%s" because it is age restricted' % video_title if self.in_download_archive(info_dict): return '%s has already been recorded in archive' % video_title if not incomplete: match_filter = self.params.get('match_filter') if match_filter is not None: ret = match_filter(info_dict) if ret is not None: return ret return None @staticmethod def add_extra_info(info_dict, extra_info): '''Set the keys from extra_info in info dict if they are missing''' for key, value in extra_info.items(): info_dict.setdefault(key, value) def extract_info(self, url, download=True, ie_key=None, extra_info={}, process=True, force_generic_extractor=False): ''' Returns a list with a dictionary for each video we find. If 'download', also downloads the videos. extra_info is a dict containing the extra values to add to each result ''' if not ie_key and force_generic_extractor: ie_key = 'Generic' if ie_key: ies = [self.get_info_extractor(ie_key)] else: ies = self._ies for ie in ies: if not ie.suitable(url): continue ie = self.get_info_extractor(ie.ie_key()) if not ie.working(): self.report_warning('The program functionality for this site has been marked as broken, ' 'and will probably not work.') try: ie_result = ie.extract(url) if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here) break if isinstance(ie_result, list): # Backwards compatibility: old IE result format ie_result = { '_type': 'compat_list', 'entries': ie_result, } self.add_default_extra_info(ie_result, ie, url) if process: return self.process_ie_result(ie_result, download, extra_info) else: return ie_result except GeoRestrictedError as e: msg = e.msg if e.countries: msg += '\nThis video is available in %s.' % ', '.join( map(ISO3166Utils.short2full, e.countries)) msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.' self.report_error(msg) break except ExtractorError as e: # An error we somewhat expected self.report_error(compat_str(e), e.format_traceback()) break except MaxDownloadsReached: raise except Exception as e: if self.params.get('ignoreerrors', False): self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc())) break else: raise else: self.report_error('no suitable InfoExtractor for URL %s' % url) def add_default_extra_info(self, ie_result, ie, url): self.add_extra_info(ie_result, { 'extractor': ie.IE_NAME, 'webpage_url': url, 'webpage_url_basename': url_basename(url), 'extractor_key': ie.ie_key(), }) def process_ie_result(self, ie_result, download=True, extra_info={}): """ Take the result of the ie(may be modified) and resolve all unresolved references (URLs, playlist items). It will also download the videos if 'download'. Returns the resolved ie_result. """ result_type = ie_result.get('_type', 'video') if result_type in ('url', 'url_transparent'): ie_result['url'] = sanitize_url(ie_result['url']) extract_flat = self.params.get('extract_flat', False) if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or extract_flat is True): if self.params.get('forcejson', False): self.to_stdout(json.dumps(ie_result)) return ie_result if result_type == 'video': self.add_extra_info(ie_result, extra_info) return self.process_video_result(ie_result, download=download) elif result_type == 'url': # We have to add extra_info to the results because it may be # contained in a playlist return self.extract_info(ie_result['url'], download, ie_key=ie_result.get('ie_key'), extra_info=extra_info) elif result_type == 'url_transparent': # Use the information from the embedding page info = self.extract_info( ie_result['url'], ie_key=ie_result.get('ie_key'), extra_info=extra_info, download=False, process=False) # extract_info may return None when ignoreerrors is enabled and # extraction failed with an error, don't crash and return early # in this case if not info: return info force_properties = dict( (k, v) for k, v in ie_result.items() if v is not None) for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'): if f in force_properties: del force_properties[f] new_result = info.copy() new_result.update(force_properties) # Extracted info may not be a video result (i.e. # info.get('_type', 'video') != video) but rather an url or # url_transparent. In such cases outer metadata (from ie_result) # should be propagated to inner one (info). For this to happen # _type of info should be overridden with url_transparent. This # fixes issue from https://github.com/rg3/youtube-dl/pull/11163. if new_result.get('_type') == 'url': new_result['_type'] = 'url_transparent' return self.process_ie_result( new_result, download=download, extra_info=extra_info) elif result_type in ('playlist', 'multi_video'): # We process each entry in the playlist playlist = ie_result.get('title') or ie_result.get('id') self.to_screen('[download] Downloading playlist: %s' % playlist) playlist_results = [] playliststart = self.params.get('playliststart', 1) - 1 playlistend = self.params.get('playlistend') # For backwards compatibility, interpret -1 as whole list if playlistend == -1: playlistend = None playlistitems_str = self.params.get('playlist_items') playlistitems = None if playlistitems_str is not None: def iter_playlistitems(format): for string_segment in format.split(','): if '-' in string_segment: start, end = string_segment.split('-') for item in range(int(start), int(end) + 1): yield int(item) else: yield int(string_segment) playlistitems = orderedSet(iter_playlistitems(playlistitems_str)) ie_entries = ie_result['entries'] def make_playlistitems_entries(list_ie_entries): num_entries = len(list_ie_entries) return [ list_ie_entries[i - 1] for i in playlistitems if -num_entries <= i - 1 < num_entries] def report_download(num_entries): self.to_screen( '[%s] playlist %s: Downloading %d videos' % (ie_result['extractor'], playlist, num_entries)) if isinstance(ie_entries, list): n_all_entries = len(ie_entries) if playlistitems: entries = make_playlistitems_entries(ie_entries) else: entries = ie_entries[playliststart:playlistend] n_entries = len(entries) self.to_screen( '[%s] playlist %s: Collected %d video ids (downloading %d of them)' % (ie_result['extractor'], playlist, n_all_entries, n_entries)) elif isinstance(ie_entries, PagedList): if playlistitems: entries = [] for item in playlistitems: entries.extend(ie_entries.getslice( item - 1, item )) else: entries = ie_entries.getslice( playliststart, playlistend) n_entries = len(entries) report_download(n_entries) else: # iterable if playlistitems: entries = make_playlistitems_entries(list(itertools.islice( ie_entries, 0, max(playlistitems)))) else: entries = list(itertools.islice( ie_entries, playliststart, playlistend)) n_entries = len(entries) report_download(n_entries) if self.params.get('playlistreverse', False): entries = entries[::-1] if self.params.get('playlistrandom', False): random.shuffle(entries) x_forwarded_for = ie_result.get('__x_forwarded_for_ip') for i, entry in enumerate(entries, 1): self.to_screen('[download] Downloading video %s of %s' % (i, n_entries)) # This __x_forwarded_for_ip thing is a bit ugly but requires # minimal changes if x_forwarded_for: entry['__x_forwarded_for_ip'] = x_forwarded_for extra = { 'n_entries': n_entries, 'playlist': playlist, 'playlist_id': ie_result.get('id'), 'playlist_title': ie_result.get('title'), 'playlist_uploader': ie_result.get('uploader'), 'playlist_uploader_id': ie_result.get('uploader_id'), 'playlist_index': i + playliststart, 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], } reason = self._match_entry(entry, incomplete=True) if reason is not None: self.to_screen('[download] ' + reason) continue entry_result = self.process_ie_result(entry, download=download, extra_info=extra) playlist_results.append(entry_result) ie_result['entries'] = playlist_results self.to_screen('[download] Finished downloading playlist: %s' % playlist) return ie_result elif result_type == 'compat_list': self.report_warning( 'Extractor %s returned a compat_list result. ' 'It needs to be updated.' % ie_result.get('extractor')) def _fixup(r): self.add_extra_info( r, { 'extractor': ie_result['extractor'], 'webpage_url': ie_result['webpage_url'], 'webpage_url_basename': url_basename(ie_result['webpage_url']), 'extractor_key': ie_result['extractor_key'], } ) return r ie_result['entries'] = [ self.process_ie_result(_fixup(r), download, extra_info) for r in ie_result['entries'] ] return ie_result else: raise Exception('Invalid result type: %s' % result_type) def _build_format_filter(self, filter_spec): " Returns a function to filter the formats according to the filter_spec " OPERATORS = { '<': operator.lt, '<=': operator.le, '>': operator.gt, '>=': operator.ge, '=': operator.eq, '!=': operator.ne, } operator_rex = re.compile(r'''(?x)\s* (?P<key>width|height|tbr|abr|vbr|asr|filesize|fps) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?) $ ''' % '|'.join(map(re.escape, OPERATORS.keys()))) m = operator_rex.search(filter_spec) if m: try: comparison_value = int(m.group('value')) except ValueError: comparison_value = parse_filesize(m.group('value')) if comparison_value is None: comparison_value = parse_filesize(m.group('value') + 'B') if comparison_value is None: raise ValueError( 'Invalid value %r in format specification %r' % ( m.group('value'), filter_spec)) op = OPERATORS[m.group('op')] if not m: STR_OPERATORS = { '=': operator.eq, '!=': operator.ne, '^=': lambda attr, value: attr.startswith(value), '$=': lambda attr, value: attr.endswith(value), '*=': lambda attr, value: value in attr, } str_operator_rex = re.compile(r'''(?x) \s*(?P<key>ext|acodec|vcodec|container|protocol|format_id) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)? \s*(?P<value>[a-zA-Z0-9._-]+) \s*$ ''' % '|'.join(map(re.escape, STR_OPERATORS.keys()))) m = str_operator_rex.search(filter_spec) if m: comparison_value = m.group('value') op = STR_OPERATORS[m.group('op')] if not m: raise ValueError('Invalid filter specification %r' % filter_spec) def _filter(f): actual_value = f.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) return _filter def _default_format_spec(self, info_dict, download=True): def can_merge(): merger = FFmpegMergerPP(self) return merger.available and merger.can_merge() def prefer_best(): if self.params.get('simulate', False): return False if not download: return False if self.params.get('outtmpl', DEFAULT_OUTTMPL) == '-': return True if info_dict.get('is_live'): return True if not can_merge(): return True return False req_format_list = ['bestvideo+bestaudio', 'best'] if prefer_best(): req_format_list.reverse() return '/'.join(req_format_list) def build_format_selector(self, format_spec): def syntax_error(note, start): message = ( 'Invalid format specification: ' '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1])) return SyntaxError(message) PICKFIRST = 'PICKFIRST' MERGE = 'MERGE' SINGLE = 'SINGLE' GROUP = 'GROUP' FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters']) def _parse_filter(tokens): filter_parts = [] for type, string, start, _, _ in tokens: if type == tokenize.OP and string == ']': return ''.join(filter_parts) else: filter_parts.append(string) def _remove_unused_ops(tokens): # Remove operators that we don't use and join them with the surrounding strings # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9' ALLOWED_OPS = ('/', '+', ',', '(', ')') last_string, last_start, last_end, last_line = None, None, None, None for type, string, start, end, line in tokens: if type == tokenize.OP and string == '[': if last_string: yield tokenize.NAME, last_string, last_start, last_end, last_line last_string = None yield type, string, start, end, line # everything inside brackets will be handled by _parse_filter for type, string, start, end, line in tokens: yield type, string, start, end, line if type == tokenize.OP and string == ']': break elif type == tokenize.OP and string in ALLOWED_OPS: if last_string: yield tokenize.NAME, last_string, last_start, last_end, last_line last_string = None yield type, string, start, end, line elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]: if not last_string: last_string = string last_start = start last_end = end else: last_string += string if last_string: yield tokenize.NAME, last_string, last_start, last_end, last_line def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False): selectors = [] current_selector = None for type, string, start, _, _ in tokens: # ENCODING is only defined in python 3.x if type == getattr(tokenize, 'ENCODING', None): continue elif type in [tokenize.NAME, tokenize.NUMBER]: current_selector = FormatSelector(SINGLE, string, []) elif type == tokenize.OP: if string == ')': if not inside_group: # ')' will be handled by the parentheses group tokens.restore_last_token() break elif inside_merge and string in ['/', ',']: tokens.restore_last_token() break elif inside_choice and string == ',': tokens.restore_last_token() break elif string == ',': if not current_selector: raise syntax_error('"," must follow a format selector', start) selectors.append(current_selector) current_selector = None elif string == '/': if not current_selector: raise syntax_error('"/" must follow a format selector', start) first_choice = current_selector second_choice = _parse_format_selection(tokens, inside_choice=True) current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), []) elif string == '[': if not current_selector: current_selector = FormatSelector(SINGLE, 'best', []) format_filter = _parse_filter(tokens) current_selector.filters.append(format_filter) elif string == '(': if current_selector: raise syntax_error('Unexpected "("', start) group = _parse_format_selection(tokens, inside_group=True) current_selector = FormatSelector(GROUP, group, []) elif string == '+': video_selector = current_selector audio_selector = _parse_format_selection(tokens, inside_merge=True) if not video_selector or not audio_selector: raise syntax_error('"+" must be between two format selectors', start) current_selector = FormatSelector(MERGE, (video_selector, audio_selector), []) else: raise syntax_error('Operator not recognized: "{0}"'.format(string), start) elif type == tokenize.ENDMARKER: break if current_selector: selectors.append(current_selector) return selectors def _build_selector_function(selector): if isinstance(selector, list): fs = [_build_selector_function(s) for s in selector] def selector_function(ctx): for f in fs: for format in f(ctx): yield format return selector_function elif selector.type == GROUP: selector_function = _build_selector_function(selector.selector) elif selector.type == PICKFIRST: fs = [_build_selector_function(s) for s in selector.selector] def selector_function(ctx): for f in fs: picked_formats = list(f(ctx)) if picked_formats: return picked_formats return [] elif selector.type == SINGLE: format_spec = selector.selector def selector_function(ctx): formats = list(ctx['formats']) if not formats: return if format_spec == 'all': for f in formats: yield f elif format_spec in ['best', 'worst', None]: format_idx = 0 if format_spec == 'worst' else -1 audiovideo_formats = [ f for f in formats if f.get('vcodec') != 'none' and f.get('acodec') != 'none'] if audiovideo_formats: yield audiovideo_formats[format_idx] # for extractors with incomplete formats (audio only (soundcloud) # or video only (imgur)) we will fallback to best/worst # {video,audio}-only format elif ctx['incomplete_formats']: yield formats[format_idx] elif format_spec == 'bestaudio': audio_formats = [ f for f in formats if f.get('vcodec') == 'none'] if audio_formats: yield audio_formats[-1] elif format_spec == 'worstaudio': audio_formats = [ f for f in formats if f.get('vcodec') == 'none'] if audio_formats: yield audio_formats[0] elif format_spec == 'bestvideo': video_formats = [ f for f in formats if f.get('acodec') == 'none'] if video_formats: yield video_formats[-1] elif format_spec == 'worstvideo': video_formats = [ f for f in formats if f.get('acodec') == 'none'] if video_formats: yield video_formats[0] else: extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] if format_spec in extensions: filter_f = lambda f: f['ext'] == format_spec else: filter_f = lambda f: f['format_id'] == format_spec matches = list(filter(filter_f, formats)) if matches: yield matches[-1] elif selector.type == MERGE: def _merge(formats_info): format_1, format_2 = [f['format_id'] for f in formats_info] # The first format must contain the video and the # second the audio if formats_info[0].get('vcodec') == 'none': self.report_error('The first format must ' 'contain the video, try using ' '"-f %s+%s"' % (format_2, format_1)) return # Formats must be opposite (video+audio) if formats_info[0].get('acodec') == 'none' and formats_info[1].get('acodec') == 'none': self.report_error( 'Both formats %s and %s are video-only, you must specify "-f video+audio"' % (format_1, format_2)) return output_ext = ( formats_info[0]['ext'] if self.params.get('merge_output_format') is None else self.params['merge_output_format']) return { 'requested_formats': formats_info, 'format': '%s+%s' % (formats_info[0].get('format'), formats_info[1].get('format')), 'format_id': '%s+%s' % (formats_info[0].get('format_id'), formats_info[1].get('format_id')), 'width': formats_info[0].get('width'), 'height': formats_info[0].get('height'), 'resolution': formats_info[0].get('resolution'), 'fps': formats_info[0].get('fps'), 'vcodec': formats_info[0].get('vcodec'), 'vbr': formats_info[0].get('vbr'), 'stretched_ratio': formats_info[0].get('stretched_ratio'), 'acodec': formats_info[1].get('acodec'), 'abr': formats_info[1].get('abr'), 'ext': output_ext, } video_selector, audio_selector = map(_build_selector_function, selector.selector) def selector_function(ctx): for pair in itertools.product( video_selector(copy.deepcopy(ctx)), audio_selector(copy.deepcopy(ctx))): yield _merge(pair) filters = [self._build_format_filter(f) for f in selector.filters] def final_selector(ctx): ctx_copy = copy.deepcopy(ctx) for _filter in filters: ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats'])) return selector_function(ctx_copy) return final_selector stream = io.BytesIO(format_spec.encode('utf-8')) try: tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline))) except tokenize.TokenError: raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec))) class TokenIterator(object): def __init__(self, tokens): self.tokens = tokens self.counter = 0 def __iter__(self): return self def __next__(self): if self.counter >= len(self.tokens): raise StopIteration() value = self.tokens[self.counter] self.counter += 1 return value next = __next__ def restore_last_token(self): self.counter -= 1 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens))) return _build_selector_function(parsed_selector) def _calc_headers(self, info_dict): res = std_headers.copy() add_headers = info_dict.get('http_headers') if add_headers: res.update(add_headers) cookies = self._calc_cookies(info_dict) if cookies: res['Cookie'] = cookies if 'X-Forwarded-For' not in res: x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip') if x_forwarded_for_ip: res['X-Forwarded-For'] = x_forwarded_for_ip return res def _calc_cookies(self, info_dict): pr = sanitized_Request(info_dict['url']) self.cookiejar.add_cookie_header(pr) return pr.get_header('Cookie') def process_video_result(self, info_dict, download=True): assert info_dict.get('_type', 'video') == 'video' if 'id' not in info_dict: raise ExtractorError('Missing "id" field in extractor result') if 'title' not in info_dict: raise ExtractorError('Missing "title" field in extractor result') def report_force_conversion(field, field_not, conversion): self.report_warning( '"%s" field is not %s - forcing %s conversion, there is an error in extractor' % (field, field_not, conversion)) def sanitize_string_field(info, string_field): field = info.get(string_field) if field is None or isinstance(field, compat_str): return report_force_conversion(string_field, 'a string', 'string') info[string_field] = compat_str(field) def sanitize_numeric_fields(info): for numeric_field in self._NUMERIC_FIELDS: field = info.get(numeric_field) if field is None or isinstance(field, compat_numeric_types): continue report_force_conversion(numeric_field, 'numeric', 'int') info[numeric_field] = int_or_none(field) sanitize_string_field(info_dict, 'id') sanitize_numeric_fields(info_dict) if 'playlist' not in info_dict: # It isn't part of a playlist info_dict['playlist'] = None info_dict['playlist_index'] = None thumbnails = info_dict.get('thumbnails') if thumbnails is None: thumbnail = info_dict.get('thumbnail') if thumbnail: info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}] if thumbnails: thumbnails.sort(key=lambda t: ( t.get('preference') if t.get('preference') is not None else -1, t.get('width') if t.get('width') is not None else -1, t.get('height') if t.get('height') is not None else -1, t.get('id') if t.get('id') is not None else '', t.get('url'))) for i, t in enumerate(thumbnails): t['url'] = sanitize_url(t['url']) if t.get('width') and t.get('height'): t['resolution'] = '%dx%d' % (t['width'], t['height']) if t.get('id') is None: t['id'] = '%d' % i if self.params.get('list_thumbnails'): self.list_thumbnails(info_dict) return thumbnail = info_dict.get('thumbnail') if thumbnail: info_dict['thumbnail'] = sanitize_url(thumbnail) elif thumbnails: info_dict['thumbnail'] = thumbnails[-1]['url'] if 'display_id' not in info_dict and 'id' in info_dict: info_dict['display_id'] = info_dict['id'] if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None: # Working around out-of-range timestamp values (e.g. negative ones on Windows, # see http://bugs.python.org/issue1646728) try: upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp']) info_dict['upload_date'] = upload_date.strftime('%Y%m%d') except (ValueError, OverflowError, OSError): pass # Auto generate title fields corresponding to the *_number fields when missing # in order to always have clean titles. This is very common for TV series. for field in ('chapter', 'season', 'episode'): if info_dict.get('%s_number' % field) is not None and not info_dict.get(field): info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field]) subtitles = info_dict.get('subtitles') if subtitles: for _, subtitle in subtitles.items(): for subtitle_format in subtitle: if subtitle_format.get('url'): subtitle_format['url'] = sanitize_url(subtitle_format['url']) if subtitle_format.get('ext') is None: subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower() if self.params.get('listsubtitles', False): if 'automatic_captions' in info_dict: self.list_subtitles(info_dict['id'], info_dict.get('automatic_captions'), 'automatic captions') self.list_subtitles(info_dict['id'], subtitles, 'subtitles') return info_dict['requested_subtitles'] = self.process_subtitles( info_dict['id'], subtitles, info_dict.get('automatic_captions')) # We now pick which formats have to be downloaded if info_dict.get('formats') is None: # There's only one format available formats = [info_dict] else: formats = info_dict['formats'] if not formats: raise ExtractorError('No video formats found!') def is_wellformed(f): url = f.get('url') if not url: self.report_warning( '"url" field is missing or empty - skipping format, ' 'there is an error in extractor') return False if isinstance(url, bytes): sanitize_string_field(f, 'url') return True # Filter out malformed formats for better extraction robustness formats = list(filter(is_wellformed, formats)) formats_dict = {} # We check that all the formats have the format and format_id fields for i, format in enumerate(formats): sanitize_string_field(format, 'format_id') sanitize_numeric_fields(format) format['url'] = sanitize_url(format['url']) if not format.get('format_id'): format['format_id'] = compat_str(i) else: # Sanitize format_id from characters used in format selector expression format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id']) format_id = format['format_id'] if format_id not in formats_dict: formats_dict[format_id] = [] formats_dict[format_id].append(format) # Make sure all formats have unique format_id for format_id, ambiguous_formats in formats_dict.items(): if len(ambiguous_formats) > 1: for i, format in enumerate(ambiguous_formats): format['format_id'] = '%s-%d' % (format_id, i) for i, format in enumerate(formats): if format.get('format') is None: format['format'] = '{id} - {res}{note}'.format( id=format['format_id'], res=self.format_resolution(format), note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '', ) # Automatically determine file extension if missing if format.get('ext') is None: format['ext'] = determine_ext(format['url']).lower() # Automatically determine protocol if missing (useful for format # selection purposes) if format.get('protocol') is None: format['protocol'] = determine_protocol(format) # Add HTTP headers, so that external programs can use them from the # json output full_format_info = info_dict.copy() full_format_info.update(format) format['http_headers'] = self._calc_headers(full_format_info) # Remove private housekeeping stuff if '__x_forwarded_for_ip' in info_dict: del info_dict['__x_forwarded_for_ip'] # TODO Central sorting goes here if formats[0] is not info_dict: # only set the 'formats' fields if the original info_dict list them # otherwise we end up with a circular reference, the first (and unique) # element in the 'formats' field in info_dict is info_dict itself, # which can't be exported to json info_dict['formats'] = formats if self.params.get('listformats'): self.list_formats(info_dict) return req_format = self.params.get('format') if req_format is None: req_format = self._default_format_spec(info_dict, download=download) if self.params.get('verbose'): self.to_stdout('[debug] Default format spec: %s' % req_format) format_selector = self.build_format_selector(req_format) # While in format selection we may need to have an access to the original # format set in order to calculate some metrics or do some processing. # For now we need to be able to guess whether original formats provided # by extractor are incomplete or not (i.e. whether extractor provides only # video-only or audio-only formats) for proper formats selection for # extractors with such incomplete formats (see # https://github.com/rg3/youtube-dl/pull/5556). # Since formats may be filtered during format selection and may not match # the original formats the results may be incorrect. Thus original formats # or pre-calculated metrics should be passed to format selection routines # as well. # We will pass a context object containing all necessary additional data # instead of just formats. # This fixes incorrect format selection issue (see # https://github.com/rg3/youtube-dl/issues/10083). incomplete_formats = ( # All formats are video-only or all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats) or # all formats are audio-only all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)) ctx = { 'formats': formats, 'incomplete_formats': incomplete_formats, } formats_to_download = list(format_selector(ctx)) if not formats_to_download: raise ExtractorError('requested format not available', expected=True) if download: if len(formats_to_download) > 1: self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download))) for format in formats_to_download: new_info = dict(info_dict) new_info.update(format) self.process_info(new_info) # We update the info dict with the best quality format (backwards compatibility) info_dict.update(formats_to_download[-1]) return info_dict def process_subtitles(self, video_id, normal_subtitles, automatic_captions): """Select the requested subtitles and their format""" available_subs = {} if normal_subtitles and self.params.get('writesubtitles'): available_subs.update(normal_subtitles) if automatic_captions and self.params.get('writeautomaticsub'): for lang, cap_info in automatic_captions.items(): if lang not in available_subs: available_subs[lang] = cap_info if (not self.params.get('writesubtitles') and not self.params.get('writeautomaticsub') or not available_subs): return None if self.params.get('allsubtitles', False): requested_langs = available_subs.keys() else: if self.params.get('subtitleslangs', False): requested_langs = self.params.get('subtitleslangs') elif 'en' in available_subs: requested_langs = ['en'] else: requested_langs = [list(available_subs.keys())[0]] formats_query = self.params.get('subtitlesformat', 'best') formats_preference = formats_query.split('/') if formats_query else [] subs = {} for lang in requested_langs: formats = available_subs.get(lang) if formats is None: self.report_warning('%s subtitles not available for %s' % (lang, video_id)) continue for ext in formats_preference: if ext == 'best': f = formats[-1] break matches = list(filter(lambda f: f['ext'] == ext, formats)) if matches: f = matches[-1] break else: f = formats[-1] self.report_warning( 'No subtitle format found matching "%s" for language %s, ' 'using %s' % (formats_query, lang, f['ext'])) subs[lang] = f return subs def process_info(self, info_dict): """Process a single resolved IE result.""" assert info_dict.get('_type', 'video') == 'video' max_downloads = self.params.get('max_downloads') if max_downloads is not None: if self._num_downloads >= int(max_downloads): raise MaxDownloadsReached() info_dict['fulltitle'] = info_dict['title'] if len(info_dict['title']) > 200: info_dict['title'] = info_dict['title'][:197] + '...' if 'format' not in info_dict: info_dict['format'] = info_dict['ext'] reason = self._match_entry(info_dict, incomplete=False) if reason is not None: self.to_screen('[download] ' + reason) return self._num_downloads += 1 info_dict['_filename'] = filename = self.prepare_filename(info_dict) # Forced printings if self.params.get('forcetitle', False): self.to_stdout(info_dict['fulltitle']) if self.params.get('forceid', False): self.to_stdout(info_dict['id']) if self.params.get('forceurl', False): if info_dict.get('requested_formats') is not None: for f in info_dict['requested_formats']: self.to_stdout(f['url'] + f.get('play_path', '')) else: # For RTMP URLs, also include the playpath self.to_stdout(info_dict['url'] + info_dict.get('play_path', '')) if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None: self.to_stdout(info_dict['thumbnail']) if self.params.get('forcedescription', False) and info_dict.get('description') is not None: self.to_stdout(info_dict['description']) if self.params.get('forcefilename', False) and filename is not None: self.to_stdout(filename) if self.params.get('forceduration', False) and info_dict.get('duration') is not None: self.to_stdout(formatSeconds(info_dict['duration'])) if self.params.get('forceformat', False): self.to_stdout(info_dict['format']) if self.params.get('forcejson', False): self.to_stdout(json.dumps(info_dict)) # Do nothing else if in simulate mode if self.params.get('simulate', False): return if filename is None: return def ensure_dir_exists(path): try: dn = os.path.dirname(path) if dn and not os.path.exists(dn): os.makedirs(dn) return True except (OSError, IOError) as err: self.report_error('unable to create directory ' + error_to_compat_str(err)) return False if not ensure_dir_exists(sanitize_path(encodeFilename(filename))): return if self.params.get('writedescription', False): descfn = replace_extension(filename, 'description', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)): self.to_screen('[info] Video description is already present') elif info_dict.get('description') is None: self.report_warning('There\'s no description to write.') else: try: self.to_screen('[info] Writing video description to: ' + descfn) with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: descfile.write(info_dict['description']) except (OSError, IOError): self.report_error('Cannot write description file ' + descfn) return if self.params.get('writeannotations', False): annofn = replace_extension(filename, 'annotations.xml', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)): self.to_screen('[info] Video annotations are already present') else: try: self.to_screen('[info] Writing video annotations to: ' + annofn) with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile: annofile.write(info_dict['annotations']) except (KeyError, TypeError): self.report_warning('There are no annotations to write.') except (OSError, IOError): self.report_error('Cannot write annotations file: ' + annofn) return subtitles_are_requested = any([self.params.get('writesubtitles', False), self.params.get('writeautomaticsub')]) if subtitles_are_requested and info_dict.get('requested_subtitles'): # subtitles download errors are already managed as troubles in relevant IE # that way it will silently go on when used with unsupporting IE subtitles = info_dict['requested_subtitles'] ie = self.get_info_extractor(info_dict['extractor_key']) for sub_lang, sub_info in subtitles.items(): sub_format = sub_info['ext'] sub_filename = subtitles_filename(filename, sub_lang, sub_format) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)): self.to_screen('[info] Video subtitle %s.%s is already present' % (sub_lang, sub_format)) else: self.to_screen('[info] Writing video subtitles to: ' + sub_filename) if sub_info.get('data') is not None: try: # Use newline='' to prevent conversion of newline characters # See https://github.com/rg3/youtube-dl/issues/10268 with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile: subfile.write(sub_info['data']) except (OSError, IOError): self.report_error('Cannot write subtitles file ' + sub_filename) return else: try: sub_data = ie._request_webpage( sub_info['url'], info_dict['id'], note=False).read() with io.open(encodeFilename(sub_filename), 'wb') as subfile: subfile.write(sub_data) except (ExtractorError, IOError, OSError, ValueError) as err: self.report_warning('Unable to download subtitle for "%s": %s' % (sub_lang, error_to_compat_str(err))) continue if self.params.get('writeinfojson', False): infofn = replace_extension(filename, 'info.json', info_dict.get('ext')) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)): self.to_screen('[info] Video description metadata is already present') else: self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn) try: write_json_file(self.filter_requested_info(info_dict), infofn) except (OSError, IOError): self.report_error('Cannot write metadata to JSON file ' + infofn) return self._write_thumbnails(info_dict, filename) if not self.params.get('skip_download', False): try: def dl(name, info): fd = get_suitable_downloader(info, self.params)(self, self.params) for ph in self._progress_hooks: fd.add_progress_hook(ph) if self.params.get('verbose'): self.to_stdout('[debug] Invoking downloader on %r' % info.get('url')) return fd.download(name, info) if info_dict.get('requested_formats') is not None: downloaded = [] success = True merger = FFmpegMergerPP(self) if not merger.available: postprocessors = [] self.report_warning('You have requested multiple ' 'formats but ffmpeg or avconv are not installed.' ' The formats won\'t be merged.') else: postprocessors = [merger] def compatible_formats(formats): video, audio = formats # Check extension video_ext, audio_ext = audio.get('ext'), video.get('ext') if video_ext and audio_ext: COMPATIBLE_EXTS = ( ('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'), ('webm') ) for exts in COMPATIBLE_EXTS: if video_ext in exts and audio_ext in exts: return True # TODO: Check acodec/vcodec return False filename_real_ext = os.path.splitext(filename)[1][1:] filename_wo_ext = ( os.path.splitext(filename)[0] if filename_real_ext == info_dict['ext'] else filename) requested_formats = info_dict['requested_formats'] if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats): info_dict['ext'] = 'mkv' self.report_warning( 'Requested formats are incompatible for merge and will be merged into mkv.') # Ensure filename always has a correct extension for successful merge filename = '%s.%s' % (filename_wo_ext, info_dict['ext']) if os.path.exists(encodeFilename(filename)): self.to_screen( '[download] %s has already been downloaded and ' 'merged' % filename) else: for f in requested_formats: new_info = dict(info_dict) new_info.update(f) fname = prepend_extension( self.prepare_filename(new_info), 'f%s' % f['format_id'], new_info['ext']) if not ensure_dir_exists(fname): return downloaded.append(fname) partial_success = dl(fname, new_info) success = success and partial_success info_dict['__postprocessors'] = postprocessors info_dict['__files_to_merge'] = downloaded else: # Just a single file success = dl(filename, info_dict) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_error('unable to download video data: %s' % error_to_compat_str(err)) return except (OSError, IOError) as err: raise UnavailableVideoError(err) except (ContentTooShortError, ) as err: self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) return if success and filename != '-': # Fixup content fixup_policy = self.params.get('fixup') if fixup_policy is None: fixup_policy = 'detect_or_warn' INSTALL_FFMPEG_MESSAGE = 'Install ffmpeg or avconv to fix this automatically.' stretched_ratio = info_dict.get('stretched_ratio') if stretched_ratio is not None and stretched_ratio != 1: if fixup_policy == 'warn': self.report_warning('%s: Non-uniform pixel ratio (%s)' % ( info_dict['id'], stretched_ratio)) elif fixup_policy == 'detect_or_warn': stretched_pp = FFmpegFixupStretchedPP(self) if stretched_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(stretched_pp) else: self.report_warning( '%s: Non-uniform pixel ratio (%s). %s' % (info_dict['id'], stretched_ratio, INSTALL_FFMPEG_MESSAGE)) else: assert fixup_policy in ('ignore', 'never') if (info_dict.get('requested_formats') is None and info_dict.get('container') == 'm4a_dash'): if fixup_policy == 'warn': self.report_warning( '%s: writing DASH m4a. ' 'Only some players support this container.' % info_dict['id']) elif fixup_policy == 'detect_or_warn': fixup_pp = FFmpegFixupM4aPP(self) if fixup_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(fixup_pp) else: self.report_warning( '%s: writing DASH m4a. ' 'Only some players support this container. %s' % (info_dict['id'], INSTALL_FFMPEG_MESSAGE)) else: assert fixup_policy in ('ignore', 'never') if (info_dict.get('protocol') == 'm3u8_native' or info_dict.get('protocol') == 'm3u8' and self.params.get('hls_prefer_native')): if fixup_policy == 'warn': self.report_warning('%s: malformed AAC bitstream detected.' % ( info_dict['id'])) elif fixup_policy == 'detect_or_warn': fixup_pp = FFmpegFixupM3u8PP(self) if fixup_pp.available: info_dict.setdefault('__postprocessors', []) info_dict['__postprocessors'].append(fixup_pp) else: self.report_warning( '%s: malformed AAC bitstream detected. %s' % (info_dict['id'], INSTALL_FFMPEG_MESSAGE)) else: assert fixup_policy in ('ignore', 'never') try: self.post_process(filename, info_dict) except (PostProcessingError) as err: self.report_error('postprocessing: %s' % str(err)) return self.record_download_archive(info_dict) def download(self, url_list): """Download a given list of URLs.""" outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) if (len(url_list) > 1 and outtmpl != '-' and '%' not in outtmpl and self.params.get('max_downloads') != 1): raise SameFileError(outtmpl) for url in url_list: try: # It also downloads the videos res = self.extract_info( url, force_generic_extractor=self.params.get('force_generic_extractor', False)) except UnavailableVideoError: self.report_error('unable to download video') except MaxDownloadsReached: self.to_screen('[info] Maximum number of downloaded files reached.') raise else: if self.params.get('dump_single_json', False): self.to_stdout(json.dumps(res)) return self._download_retcode def download_with_info_file(self, info_filename): with contextlib.closing(fileinput.FileInput( [info_filename], mode='r', openhook=fileinput.hook_encoded('utf-8'))) as f: # FileInput doesn't have a read method, we can't call json.load info = self.filter_requested_info(json.loads('\n'.join(f))) try: self.process_ie_result(info, download=True) except DownloadError: webpage_url = info.get('webpage_url') if webpage_url is not None: self.report_warning('The info failed to download, trying with "%s"' % webpage_url) return self.download([webpage_url]) else: raise return self._download_retcode @staticmethod def filter_requested_info(info_dict): return dict( (k, v) for k, v in info_dict.items() if k not in ['requested_formats', 'requested_subtitles']) def post_process(self, filename, ie_info): """Run all the postprocessors on the given file.""" info = dict(ie_info) info['filepath'] = filename pps_chain = [] if ie_info.get('__postprocessors') is not None: pps_chain.extend(ie_info['__postprocessors']) pps_chain.extend(self._pps) for pp in pps_chain: files_to_delete = [] try: files_to_delete, info = pp.run(info) except PostProcessingError as e: self.report_error(e.msg) if files_to_delete and not self.params.get('keepvideo', False): for old_filename in files_to_delete: self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename) try: os.remove(encodeFilename(old_filename)) except (IOError, OSError): self.report_warning('Unable to remove downloaded original file') def _make_archive_id(self, info_dict): # Future-proof against any change in case # and backwards compatibility with prior versions extractor = info_dict.get('extractor_key') if extractor is None: if 'id' in info_dict: extractor = info_dict.get('ie_key') # key in a playlist if extractor is None: return None # Incomplete video information return extractor.lower() + ' ' + info_dict['id'] def in_download_archive(self, info_dict): fn = self.params.get('download_archive') if fn is None: return False vid_id = self._make_archive_id(info_dict) if vid_id is None: return False # Incomplete video information try: with locked_file(fn, 'r', encoding='utf-8') as archive_file: for line in archive_file: if line.strip() == vid_id: return True except IOError as ioe: if ioe.errno != errno.ENOENT: raise return False def record_download_archive(self, info_dict): fn = self.params.get('download_archive') if fn is None: return vid_id = self._make_archive_id(info_dict) assert vid_id with locked_file(fn, 'a', encoding='utf-8') as archive_file: archive_file.write(vid_id + '\n') @staticmethod def format_resolution(format, default='unknown'): if format.get('vcodec') == 'none': return 'audio only' if format.get('resolution') is not None: return format['resolution'] if format.get('height') is not None: if format.get('width') is not None: res = '%sx%s' % (format['width'], format['height']) else: res = '%sp' % format['height'] elif format.get('width') is not None: res = '%dx?' % format['width'] else: res = default return res def _format_note(self, fdict): res = '' if fdict.get('ext') in ['f4f', 'f4m']: res += '(unsupported) ' if fdict.get('language'): if res: res += ' ' res += '[%s] ' % fdict['language'] if fdict.get('format_note') is not None: res += fdict['format_note'] + ' ' if fdict.get('tbr') is not None: res += '%4dk ' % fdict['tbr'] if fdict.get('container') is not None: if res: res += ', ' res += '%s container' % fdict['container'] if (fdict.get('vcodec') is not None and fdict.get('vcodec') != 'none'): if res: res += ', ' res += fdict['vcodec'] if fdict.get('vbr') is not None: res += '@' elif fdict.get('vbr') is not None and fdict.get('abr') is not None: res += 'video@' if fdict.get('vbr') is not None: res += '%4dk' % fdict['vbr'] if fdict.get('fps') is not None: if res: res += ', ' res += '%sfps' % fdict['fps'] if fdict.get('acodec') is not None: if res: res += ', ' if fdict['acodec'] == 'none': res += 'video only' else: res += '%-5s' % fdict['acodec'] elif fdict.get('abr') is not None: if res: res += ', ' res += 'audio' if fdict.get('abr') is not None: res += '@%3dk' % fdict['abr'] if fdict.get('asr') is not None: res += ' (%5dHz)' % fdict['asr'] if fdict.get('filesize') is not None: if res: res += ', ' res += format_bytes(fdict['filesize']) elif fdict.get('filesize_approx') is not None: if res: res += ', ' res += '~' + format_bytes(fdict['filesize_approx']) return res def list_formats(self, info_dict): formats = info_dict.get('formats', [info_dict]) table = [ [f['format_id'], f['ext'], self.format_resolution(f), self._format_note(f)] for f in formats if f.get('preference') is None or f['preference'] >= -1000] if len(formats) > 1: table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)' header_line = ['format code', 'extension', 'resolution', 'note'] self.to_screen( '[info] Available formats for %s:\n%s' % (info_dict['id'], render_table(header_line, table))) def list_thumbnails(self, info_dict): thumbnails = info_dict.get('thumbnails') if not thumbnails: self.to_screen('[info] No thumbnails present for %s' % info_dict['id']) return self.to_screen( '[info] Thumbnails for %s:' % info_dict['id']) self.to_screen(render_table( ['ID', 'width', 'height', 'URL'], [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])) def list_subtitles(self, video_id, subtitles, name='subtitles'): if not subtitles: self.to_screen('%s has no %s' % (video_id, name)) return self.to_screen( 'Available %s for %s:' % (name, video_id)) self.to_screen(render_table( ['Language', 'formats'], [[lang, ', '.join(f['ext'] for f in reversed(formats))] for lang, formats in subtitles.items()])) def urlopen(self, req): """ Start an HTTP download """ if isinstance(req, compat_basestring): req = sanitized_Request(req) return self._opener.open(req, timeout=self._socket_timeout) def print_debug_header(self): if not self.params.get('verbose'): return if type('') is not compat_str: # Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326) self.report_warning( 'Your Python is broken! Update to a newer and supported version') stdout_encoding = getattr( sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__) encoding_str = ( '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % ( locale.getpreferredencoding(), sys.getfilesystemencoding(), stdout_encoding, self.get_encoding())) write_string(encoding_str, encoding=None) self._write_string('[debug] youtube-dl version ' + __version__ + '\n') if _LAZY_LOADER: self._write_string('[debug] Lazy loading extractors enabled' + '\n') try: sp = subprocess.Popen( ['git', 'rev-parse', '--short', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.path.dirname(os.path.abspath(__file__))) out, err = sp.communicate() out = out.decode().strip() if re.match('[0-9a-f]+', out): self._write_string('[debug] Git HEAD: ' + out + '\n') except Exception: try: sys.exc_clear() except Exception: pass self._write_string('[debug] Python version %s - %s\n' % ( platform.python_version(), platform_name())) exe_versions = FFmpegPostProcessor.get_versions(self) exe_versions['rtmpdump'] = rtmpdump_version() exe_versions['phantomjs'] = PhantomJSwrapper._version() exe_str = ', '.join( '%s %s' % (exe, v) for exe, v in sorted(exe_versions.items()) if v ) if not exe_str: exe_str = 'none' self._write_string('[debug] exe versions: %s\n' % exe_str) proxy_map = {} for handler in self._opener.handlers: if hasattr(handler, 'proxies'): proxy_map.update(handler.proxies) self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n') if self.params.get('call_home', False): ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8') self._write_string('[debug] Public IP address: %s\n' % ipaddr) latest_version = self.urlopen( 'https://yt-dl.org/latest/version').read().decode('utf-8') if version_tuple(latest_version) > version_tuple(__version__): self.report_warning( 'You are using an outdated version (newest version: %s)! ' 'See https://yt-dl.org/update if you need help updating.' % latest_version) def _setup_opener(self): timeout_val = self.params.get('socket_timeout') self._socket_timeout = 600 if timeout_val is None else float(timeout_val) opts_cookiefile = self.params.get('cookiefile') opts_proxy = self.params.get('proxy') if opts_cookiefile is None: self.cookiejar = compat_cookiejar.CookieJar() else: opts_cookiefile = expand_path(opts_cookiefile) self.cookiejar = compat_cookiejar.MozillaCookieJar( opts_cookiefile) if os.access(opts_cookiefile, os.R_OK): self.cookiejar.load() cookie_processor = YoutubeDLCookieProcessor(self.cookiejar) if opts_proxy is not None: if opts_proxy == '': proxies = {} else: proxies = {'http': opts_proxy, 'https': opts_proxy} else: proxies = compat_urllib_request.getproxies() # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805) if 'http' in proxies and 'https' not in proxies: proxies['https'] = proxies['http'] proxy_handler = PerRequestProxyHandler(proxies) debuglevel = 1 if self.params.get('debug_printtraffic') else 0 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel) ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel) data_handler = compat_urllib_request_DataHandler() # When passing our own FileHandler instance, build_opener won't add the # default FileHandler and allows us to disable the file protocol, which # can be used for malicious purposes (see # https://github.com/rg3/youtube-dl/issues/8227) file_handler = compat_urllib_request.FileHandler() def file_open(*args, **kwargs): raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in youtube-dl for security reasons') file_handler.file_open = file_open opener = compat_urllib_request.build_opener( proxy_handler, https_handler, cookie_processor, ydlh, data_handler, file_handler) # Delete the default user-agent header, which would otherwise apply in # cases where our custom HTTP handler doesn't come into play # (See https://github.com/rg3/youtube-dl/issues/1309 for details) opener.addheaders = [] self._opener = opener def encode(self, s): if isinstance(s, bytes): return s # Already encoded try: return s.encode(self.get_encoding()) except UnicodeEncodeError as err: err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.' raise def get_encoding(self): encoding = self.params.get('encoding') if encoding is None: encoding = preferredencoding() return encoding def _write_thumbnails(self, info_dict, filename): if self.params.get('writethumbnail', False): thumbnails = info_dict.get('thumbnails') if thumbnails: thumbnails = [thumbnails[-1]] elif self.params.get('write_all_thumbnails', False): thumbnails = info_dict.get('thumbnails') else: return if not thumbnails: # No thumbnails present, so return immediately return for t in thumbnails: thumb_ext = determine_ext(t['url'], 'jpg') suffix = '_%s' % t['id'] if len(thumbnails) > 1 else '' thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else '' t['filename'] = thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)): self.to_screen('[%s] %s: Thumbnail %sis already present' % (info_dict['extractor'], info_dict['id'], thumb_display_id)) else: self.to_screen('[%s] %s: Downloading thumbnail %s...' % (info_dict['extractor'], info_dict['id'], thumb_display_id)) try: uf = self.urlopen(t['url']) with open(encodeFilename(thumb_filename), 'wb') as thumbf: shutil.copyfileobj(uf, thumbf) self.to_screen('[%s] %s: Writing thumbnail %sto: %s' % (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename)) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_warning('Unable to download thumbnail "%s": %s' % (t['url'], error_to_compat_str(err)))
46.068718
194
0.553063
ace80fc8c18278b1606d4a8c46f7a8d1ef0d1297
319
py
Python
tests/test_blog.py
Nancy-Muthinzi/ip6-Blog
18da561829cfdfe99a178b4afef0913311cc8a8b
[ "Unlicense" ]
null
null
null
tests/test_blog.py
Nancy-Muthinzi/ip6-Blog
18da561829cfdfe99a178b4afef0913311cc8a8b
[ "Unlicense" ]
null
null
null
tests/test_blog.py
Nancy-Muthinzi/ip6-Blog
18da561829cfdfe99a178b4afef0913311cc8a8b
[ "Unlicense" ]
null
null
null
import unittest from app.models import Blog class UserModelTest(unittest.TestCase): def setUp(self): ''' method to run before every test ''' self.new_blog = Blog(123,'My life in Moringa School') def test_instance(self): self.assertTrue(isinstance(self.new_blog,Blog))
22.785714
61
0.655172
ace8104d227c5323a25c01aec4405535dd89ed22
1,330
py
Python
mwlib/epub/utils/misc.py
pediapress/mwlib.epub
d0989cf34d58a0db95744c4c39578853eb5f40f1
[ "Unlicense" ]
2
2015-02-17T11:18:12.000Z
2020-06-09T14:48:01.000Z
mwlib/epub/utils/misc.py
pediapress/mwlib.epub
d0989cf34d58a0db95744c4c39578853eb5f40f1
[ "Unlicense" ]
null
null
null
mwlib/epub/utils/misc.py
pediapress/mwlib.epub
d0989cf34d58a0db95744c4c39578853eb5f40f1
[ "Unlicense" ]
null
null
null
#! /usr/bin/env python #! -*- coding:utf-8 -*- # Copyright (c) 2012, PediaPress GmbH # See README.txt for additional licensing information. from lxml.builder import ElementMaker from lxml import etree E = ElementMaker() def get_css_link_element(): return E.link(rel='stylesheet', href='wp.css', type='text/css') def flatten_tree(tree): return etree.tostring(tree, pretty_print=True, encoding='utf-8', xml_declaration=True, method='xml', doctype='<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">' ) def xhtml_page(title='', body_content=None, flatten=True): head = E.head( E.meta({'http-equiv':"Content-Type", 'content': "application/xhtml+xml; charset=utf-8"}), E.title(title), get_css_link_element(), ) # add styles in case body = E.body() tree = E.html({'xmlns':'http://www.w3.org/1999/xhtml'}, head, body, ) for element in body_content: body.append(element) if flatten: return flatten_tree(tree) else: return tree
27.142857
133
0.527068
ace810a25cb1fedcd4ee99223dbb3d6ab71af770
5,581
py
Python
tests/user_tests.py
FFWiki/intangir-bot
6398b01cf961c6e54f28707f45229be8ac516738
[ "MIT" ]
2
2017-09-16T09:12:31.000Z
2017-09-19T19:12:32.000Z
tests/user_tests.py
FFWiki/intangir-bot
6398b01cf961c6e54f28707f45229be8ac516738
[ "MIT" ]
56
2016-12-13T04:57:36.000Z
2017-11-24T10:05:41.000Z
tests/user_tests.py
magul/pywikibot-core
4874edc0f3f314108bcd25486d9df817da8457fe
[ "MIT" ]
1
2018-10-04T01:42:31.000Z
2018-10-04T01:42:31.000Z
# -*- coding: utf-8 -*- """Tests for the User page.""" # # (C) Pywikibot team, 2016-2018 # # Distributed under the terms of the MIT license. # from __future__ import absolute_import, unicode_literals import pywikibot from pywikibot.tools import suppress_warnings from pywikibot import User from tests.aspects import TestCase, unittest class TestUserClass(TestCase): """Test User class.""" family = 'wikipedia' code = 'de' def test_registered_user(self): """Test registered user.""" user = User(self.site, 'Xqt') with suppress_warnings('pywikibot.page.User.name', DeprecationWarning): self.assertEqual(user.name(), user.username) self.assertEqual(user.title(withNamespace=False), user.username) self.assertTrue(user.isRegistered()) self.assertFalse(user.isAnonymous()) self.assertIsInstance(user.registration(), pywikibot.Timestamp) self.assertGreater(user.editCount(), 0) self.assertFalse(user.isBlocked()) self.assertTrue(user.isEmailable()) self.assertEqual(user.gender(), 'unknown') self.assertIn('userid', user.getprops()) self.assertEqual(user.getprops()['userid'], 287832) self.assertEqual(user.pageid, 6927779) self.assertEqual(user.getUserPage(), pywikibot.Page(self.site, 'Benutzer:Xqt')) self.assertEqual(user.getUserPage(subpage='pwb'), pywikibot.Page(self.site, 'Benutzer:Xqt/pwb')) self.assertEqual(user.getUserTalkPage(), pywikibot.Page(self.site, 'Benutzer Diskussion:Xqt')) self.assertEqual(user.getUserTalkPage(subpage='pwb'), pywikibot.Page(self.site, 'Benutzer Diskussion:Xqt/pwb')) self.assertTrue(user.is_thankable) contribs = user.contributions(total=10) self.assertEqual(len(list(contribs)), 10) self.assertTrue(all(isinstance(contrib, tuple) for contrib in contribs)) self.assertTrue(all('user' in contrib and contrib['user'] == user.username for contrib in contribs)) self.assertIn('user', user.groups()) self.assertIn('edit', user.rights()) def test_registered_user_without_timestamp(self): """Test registered user when registration timestamp is None.""" user = User(self.site, 'Ulfb') self.assertTrue(user.isRegistered()) self.assertFalse(user.isAnonymous()) self.assertIsNone(user.registration()) self.assertIsNone(user.getprops()['registration']) self.assertGreater(user.editCount(), 0) self.assertEqual(user.gender(), 'male') self.assertIn('userid', user.getprops()) self.assertTrue(user.is_thankable) def test_female_user(self): """Test female user.""" user = User(self.site, 'Alraunenstern') self.assertTrue(user.isRegistered()) self.assertFalse(user.isAnonymous()) self.assertGreater(user.editCount(), 0) self.assertEqual(user.gender(), 'female') self.assertIn('userid', user.getprops()) self.assertTrue(user.is_thankable) def test_anonymous_user(self): """Test registered user.""" user = User(self.site, '123.45.67.89') with suppress_warnings('pywikibot.page.User.name', DeprecationWarning): self.assertEqual(user.name(), user.username) self.assertEqual(user.title(withNamespace=False), user.username) self.assertFalse(user.isRegistered()) self.assertTrue(user.isAnonymous()) self.assertIsNone(user.registration()) self.assertFalse(user.isEmailable()) self.assertEqual(user.gender(), 'unknown') self.assertIn('invalid', user.getprops()) self.assertFalse(user.is_thankable) def test_unregistered_user(self): """Test unregistered user.""" user = User(self.site, 'This user name is not registered yet') with suppress_warnings('pywikibot.page.User.name', DeprecationWarning): self.assertEqual(user.name(), user.username) self.assertEqual(user.title(withNamespace=False), user.username) self.assertFalse(user.isRegistered()) self.assertFalse(user.isAnonymous()) self.assertIsNone(user.registration()) self.assertFalse(user.isEmailable()) self.assertEqual(user.gender(), 'unknown') self.assertIn('missing', user.getprops()) self.assertFalse(user.is_thankable) def test_invalid_user(self): """Test invalid user.""" user = User(self.site, 'Invalid char\x9f in Name') with suppress_warnings('pywikibot.page.User.name', DeprecationWarning): self.assertEqual(user.name(), user.username) self.assertEqual(user.title(withNamespace=False), user.username) self.assertFalse(user.isRegistered()) self.assertFalse(user.isAnonymous()) self.assertIsNone(user.registration()) self.assertFalse(user.isEmailable()) self.assertEqual(user.gender(), 'unknown') self.assertIn('invalid', user.getprops()) self.assertFalse(user.is_thankable) def test_bot_user(self): """Test bot user.""" user = User(self.site, 'Xqbot') self.assertIn('bot', user.groups()) self.assertFalse(user.is_thankable) if __name__ == '__main__': # pragma: no cover try: unittest.main() except SystemExit: pass
40.737226
79
0.640208
ace811142cf9fbd43baf7448def130169f5d6ce8
9,384
py
Python
quotefix/attribution.py
robertklep/quotefixformac
f6439cd8c9e47966c071df7590f4fc6a793ed8a2
[ "Unlicense" ]
128
2015-01-19T09:33:52.000Z
2022-01-16T02:58:30.000Z
quotefix/attribution.py
robertklep/quotefixformac
f6439cd8c9e47966c071df7590f4fc6a793ed8a2
[ "Unlicense" ]
67
2015-02-06T11:06:56.000Z
2022-02-16T15:50:23.000Z
quotefix/attribution.py
robertklep/quotefixformac
f6439cd8c9e47966c071df7590f4fc6a793ed8a2
[ "Unlicense" ]
17
2015-06-03T08:48:56.000Z
2022-02-14T09:36:01.000Z
from AppKit import NSRunAlertPanel from objc import Category, lookUpClass from datetime import datetime from logger import logger from quotefix.utils import swizzle, SimpleTemplate from quotefix.pyratemp import Template from quotefix.messagetypes import * from quotefix.attributionclasses import * import re, platform, traceback # Mavericks try: Message = lookUpClass('MCMessage') except: from AppKit import Message # patch MessageHeaders class to return empty attributions with forwards try: MCMessageHeaders = lookUpClass('MCMessageHeaders') class MCMessageHeaders(Category(MCMessageHeaders)): @classmethod def registerQuoteFixApplication(cls, app): cls.app = app @swizzle(MCMessageHeaders, 'htmlStringShowingHeaderDetailLevel:useBold:useGray:') def htmlStringShowingHeaderDetailLevel_useBold_useGray_(self, original, level, bold, gray): if self.app.use_custom_forwarding_attribution and self.app.remove_apple_mail_forward_attribution: return '' return original(self, level, bold, gray) # Yosemite and up @swizzle(MCMessageHeaders, 'htmlStringUseBold:useGray:') def htmlStringUseBold_useGray_(self, original, bold, gray): if self.app.use_custom_forwarding_attribution and self.app.remove_apple_mail_forward_attribution: return '' return original(self, bold, gray) MessageHeaders = MCMessageHeaders except: from AppKit import MessageHeaders class MessageHeaders(Category(MessageHeaders)): @classmethod def registerQuoteFixApplication(cls, app): cls.app = app @swizzle(MessageHeaders, 'htmlStringShowingHeaderDetailLevel:useBold:useGray:') def htmlStringShowingHeaderDetailLevel_useBold_useGray_(self, original, level, bold, gray): if self.app.use_custom_forwarding_attribution and self.app.remove_apple_mail_forward_attribution: return '' return original(self, level, bold, gray) class CustomizedAttribution: """ Provide customized reply/sendagain/forward attributions """ @classmethod def registerQuoteFixApplication(cls, app): cls.app = app @classmethod def customize_reply(cls, app, editor, dom, reply, inreplyto): return cls.customize_attribution( # grab the original attribution string from the # Message class, so we can replace it with a # customized version of it. original = Message.replyPrefixWithSpacer_(False), editor = editor, dom = dom, reply = reply, inreplyto = inreplyto, template = app.custom_reply_attribution, messagetype = REPLY ) @classmethod def customize_sendagain(cls, app, editor, dom, reply, inreplyto): return cls.customize_attribution( original = None, editor = editor, dom = dom, reply = reply, inreplyto = inreplyto, template = app.custom_sendagain_attribution, messagetype = SENDAGAIN ) @classmethod def customize_forward(cls, app, editor, dom, reply, inreplyto): return cls.customize_attribution( original = Message.forwardedMessagePrefixWithSpacer_(False), editor = editor, dom = dom, reply = reply, inreplyto = inreplyto, template = app.custom_forwarding_attribution, messagetype = FORWARD ) @classmethod def customize_attribution(cls, original, editor, dom, reply, inreplyto, template, messagetype): is_forward = messagetype == FORWARD is_reply = messagetype == REPLY is_sendagain = messagetype == SENDAGAIN # create matcher for matching original attribution (and replace # nsbp's with normal spaces) if original: original = original.replace(u'\xa0', ' ').strip() original = original.replace('(', r'\(').replace(')', r'\)') original = re.sub(r'%\d+\$\@', '.*?', original) original = re.sub(r'\s+', '(?:\\s|&nbsp;)+', original) original = original + r'(?=[<\s])' matcher = re.compile(original) else: matcher = None # rich text message? is_rich = editor.backEnd().containsRichText() # should attribution be treated as HTML? is_html = (is_forward and cls.app.custom_forwarding_is_html) or \ (is_sendagain and cls.app.custom_sendagain_is_html) or \ (is_reply and cls.app.custom_reply_is_html) # check if message is rich text with HTML-attribution if is_html and not is_rich: if (is_forward and cls.app.custom_forwarding_convert_to_rich) or \ (is_sendagain and cls.app.custom_sendagain_convert_to_rich) or \ (is_reply and cls.app.custom_reply_convert_to_rich): editor.makeRichText_(editor) elif not cls.app.dont_show_html_attribution_warning: idx = NSRunAlertPanel( "QuoteFix warning", "You are using an HTML-attribution, but the current message format is plain text.\n\n" + "Unless you convert to rich text, the HTML-formatting will be lost when sending the message.", "OK", "Don't show this warning again", None ) if idx == 0: cls.app.dont_show_html_attribution_warning = True # render attribution attribution = cls.render_attribution( reply = reply, inreplyto = inreplyto, template = template, is_html = is_html, ) # replace leading whitespace with non-breaking spaces attribution = re.sub(r'(?m)^( +)' , lambda m: u'\u00a0' * len(m.group(1)), attribution) attribution = re.sub(r'(?m)^(\t+)', lambda m: u'\u00a0\u00a0' * len(m.group(1)), attribution) # replace newlines with hard linebreaks attribution = attribution.replace('\n', '<br/>') # Get HTML contents of e-mail. root = dom.documentElement() html = root.innerHTML() # Fix attributions for Yosemite and up osMinorVersion = int(platform.mac_ver()[0].split('.')[1]) if osMinorVersion >= 10: # move <blockquote> one level down html = re.sub(r'(?i)(<blockquote.*?>)(.*?)(<br.*?>)+', r'\2\1', html, count = 1) # Special case: Mail doesn't include an attribution for Send Again messages, # so we'll just add a customized attribution right after the <body> element. if is_sendagain or matcher == None: # TODO: limit quote level! html = re.sub(r'(?i)(?P<element><\s?body.*?>)', r'\g<element>' + attribution, html, count = 1) elif matcher: html = matcher.sub(attribution, html, count = 1) # Restore HTML of root element. root.setInnerHTML_(html) # TODO: increase quote level of attribution? # if (is_forward and cls.app.custom_forwarding_increase_quotelevel) or \ # (is_reply and cls.app.custom_reply_increase_quotelevel): # copy = copynode.cloneNode_(True) # copynode.parentNode().removeChild_(copynode) # blockquote = root.firstDescendantBlockQuote() # blockquote.insertBefore_refChild_(copy, blockquote.childNodes().item_(0)) return True @classmethod def render_attribution(cls, reply, inreplyto, template, is_html): # expand template and return it return cls.render_with_params( template, cls.setup_params(reply, inreplyto), is_html ) @classmethod def render_with_params(cls, template, params, is_html): # hmm... template = template.replace('message.from', 'message.From') template = template.replace('response.from', 'response.From') template = template.replace('recipients.all', 'recipients.All') # escape some characters when not using HTML-mode if not is_html: template = template.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;') # templating enabled? if cls.app.custom_attribution_allow_templating: # try to expand a complex template first try: return Template(string = template, data = params)() except Exception: logger.debug(traceback.format_exc()) return "<i>&lt;A templating error occured, please check your template for errors&gt;</i>" # simple template return SimpleTemplate(template).substitute(params) @classmethod def setup_params(cls, reply, inreplyto): return { 'message' : QFMessage(inreplyto), 'response' : QFMessage(reply), # 'now'? }
40.978166
114
0.599851
ace811481f54f73612f91fc57a889c32778053c7
11,858
py
Python
module_4/performance_diagrams.py
pankajcivil/ams-ml-python-course
1841dccb7621b013b65596946fe998cc2ad465a7
[ "MIT" ]
3
2018-12-27T03:57:11.000Z
2021-02-04T02:10:13.000Z
module_4/performance_diagrams.py
kbSSR/ams-ml-python-course
1841dccb7621b013b65596946fe998cc2ad465a7
[ "MIT" ]
null
null
null
module_4/performance_diagrams.py
kbSSR/ams-ml-python-course
1841dccb7621b013b65596946fe998cc2ad465a7
[ "MIT" ]
1
2021-02-12T19:34:32.000Z
2021-02-12T19:34:32.000Z
"""Methods for plotting performance diagram.""" import numpy import matplotlib.colors import matplotlib.pyplot as pyplot DEFAULT_LINE_COLOUR = numpy.array([228, 26, 28], dtype=float) / 255 DEFAULT_LINE_WIDTH = 3 DEFAULT_BIAS_LINE_COLOUR = numpy.full(3, 152. / 255) DEFAULT_BIAS_LINE_WIDTH = 2 LEVELS_FOR_CSI_CONTOURS = numpy.linspace(0, 1, num=11, dtype=float) LEVELS_FOR_BIAS_CONTOURS = numpy.array( [0.25, 0.5, 0.75, 1., 1.5, 2., 3., 5.]) BIAS_STRING_FORMAT = '%.2f' BIAS_LABEL_PADDING_PX = 10 FIGURE_WIDTH_INCHES = 15 FIGURE_HEIGHT_INCHES = 15 FONT_SIZE = 30 pyplot.rc('font', size=FONT_SIZE) pyplot.rc('axes', titlesize=FONT_SIZE) pyplot.rc('axes', labelsize=FONT_SIZE) pyplot.rc('xtick', labelsize=FONT_SIZE) pyplot.rc('ytick', labelsize=FONT_SIZE) pyplot.rc('legend', fontsize=FONT_SIZE) pyplot.rc('figure', titlesize=FONT_SIZE) def _get_sr_pod_grid(success_ratio_spacing=0.01, pod_spacing=0.01): """Creates grid in SR-POD (success ratio / probability of detection) space. M = number of rows (unique POD values) in grid N = number of columns (unique success ratios) in grid :param success_ratio_spacing: Spacing between grid cells in adjacent columns. :param pod_spacing: Spacing between grid cells in adjacent rows. :return: success_ratio_matrix: M-by-N numpy array of success ratios. Success ratio increases with column index. :return: pod_matrix: M-by-N numpy array of POD values. POD decreases with row index. """ num_success_ratios = 1 + int(numpy.ceil(1. / success_ratio_spacing)) num_pod_values = 1 + int(numpy.ceil(1. / pod_spacing)) unique_success_ratios = numpy.linspace(0., 1., num=num_success_ratios) unique_pod_values = numpy.linspace(0., 1., num=num_pod_values)[::-1] return numpy.meshgrid(unique_success_ratios, unique_pod_values) def _csi_from_sr_and_pod(success_ratio_array, pod_array): """Computes CSI (critical success index) from success ratio and POD. POD = probability of detection :param success_ratio_array: numpy array (any shape) of success ratios. :param pod_array: numpy array (same shape) of POD values. :return: csi_array: numpy array (same shape) of CSI values. """ return (success_ratio_array ** -1 + pod_array ** -1 - 1.) ** -1 def _bias_from_sr_and_pod(success_ratio_array, pod_array): """Computes frequency bias from success ratio and POD. POD = probability of detection :param success_ratio_array: numpy array (any shape) of success ratios. :param pod_array: numpy array (same shape) of POD values. :return: frequency_bias_array: numpy array (same shape) of frequency biases. """ return pod_array / success_ratio_array def _get_csi_colour_scheme(): """Returns colour scheme for CSI (critical success index). :return: colour_map_object: Colour scheme (instance of `matplotlib.colors.ListedColormap`). :return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`, defining the scale of the colour map. """ this_colour_map_object = pyplot.cm.Blues this_colour_norm_object = matplotlib.colors.BoundaryNorm( LEVELS_FOR_CSI_CONTOURS, this_colour_map_object.N) rgba_matrix = this_colour_map_object(this_colour_norm_object( LEVELS_FOR_CSI_CONTOURS)) colour_list = [ rgba_matrix[i, ..., :-1] for i in range(rgba_matrix.shape[0]) ] colour_map_object = matplotlib.colors.ListedColormap(colour_list) colour_map_object.set_under(numpy.array([1, 1, 1])) colour_norm_object = matplotlib.colors.BoundaryNorm( LEVELS_FOR_CSI_CONTOURS, colour_map_object.N) return colour_map_object, colour_norm_object def _add_colour_bar( axes_object, colour_map_object, values_to_colour, min_colour_value, max_colour_value, colour_norm_object=None, orientation_string='vertical', extend_min=True, extend_max=True, fraction_of_axis_length=1., font_size=FONT_SIZE): """Adds colour bar to existing axes. :param axes_object: Existing axes (instance of `matplotlib.axes._subplots.AxesSubplot`). :param colour_map_object: Colour scheme (instance of `matplotlib.pyplot.cm`). :param values_to_colour: numpy array of values to colour. :param min_colour_value: Minimum value in colour map. :param max_colour_value: Max value in colour map. :param colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`, defining the scale of the colour map. If `colour_norm_object is None`, will assume that scale is linear. :param orientation_string: Orientation of colour bar ("vertical" or "horizontal"). :param extend_min: Boolean flag. If True, the bottom of the colour bar will have an arrow. If False, it will be a flat line, suggesting that lower values are not possible. :param extend_max: Same but for top of colour bar. :param fraction_of_axis_length: Fraction of axis length (y-axis if orientation is "vertical", x-axis if orientation is "horizontal") occupied by colour bar. :param font_size: Font size for labels on colour bar. :return: colour_bar_object: Colour bar (instance of `matplotlib.pyplot.colorbar`) created by this method. """ if colour_norm_object is None: colour_norm_object = matplotlib.colors.Normalize( vmin=min_colour_value, vmax=max_colour_value, clip=False) scalar_mappable_object = pyplot.cm.ScalarMappable( cmap=colour_map_object, norm=colour_norm_object) scalar_mappable_object.set_array(values_to_colour) if extend_min and extend_max: extend_string = 'both' elif extend_min: extend_string = 'min' elif extend_max: extend_string = 'max' else: extend_string = 'neither' if orientation_string == 'horizontal': padding = 0.075 else: padding = 0.05 colour_bar_object = pyplot.colorbar( ax=axes_object, mappable=scalar_mappable_object, orientation=orientation_string, pad=padding, extend=extend_string, shrink=fraction_of_axis_length) colour_bar_object.ax.tick_params(labelsize=font_size) return colour_bar_object def _get_points_in_perf_diagram(observed_labels, forecast_probabilities): """Creates points for performance diagram. E = number of examples T = number of binarization thresholds :param observed_labels: length-E numpy array of class labels (integers in 0...1). :param forecast_probabilities: length-E numpy array with forecast probabilities of label = 1. :return: pod_by_threshold: length-T numpy array of POD (probability of detection) values. :return: success_ratio_by_threshold: length-T numpy array of success ratios. """ assert numpy.all(numpy.logical_or( observed_labels == 0, observed_labels == 1 )) assert numpy.all(numpy.logical_and( forecast_probabilities >= 0, forecast_probabilities <= 1 )) observed_labels = observed_labels.astype(int) binarization_thresholds = numpy.linspace(0, 1, num=1001, dtype=float) num_thresholds = len(binarization_thresholds) pod_by_threshold = numpy.full(num_thresholds, numpy.nan) success_ratio_by_threshold = numpy.full(num_thresholds, numpy.nan) for k in range(num_thresholds): these_forecast_labels = ( forecast_probabilities >= binarization_thresholds[k] ).astype(int) this_num_hits = numpy.sum(numpy.logical_and( these_forecast_labels == 1, observed_labels == 1 )) this_num_false_alarms = numpy.sum(numpy.logical_and( these_forecast_labels == 1, observed_labels == 0 )) this_num_misses = numpy.sum(numpy.logical_and( these_forecast_labels == 0, observed_labels == 1 )) try: pod_by_threshold[k] = ( float(this_num_hits) / (this_num_hits + this_num_misses) ) except ZeroDivisionError: pass try: success_ratio_by_threshold[k] = ( float(this_num_hits) / (this_num_hits + this_num_false_alarms) ) except ZeroDivisionError: pass pod_by_threshold = numpy.array([1.] + pod_by_threshold.tolist() + [0.]) success_ratio_by_threshold = numpy.array( [0.] + success_ratio_by_threshold.tolist() + [1.] ) return pod_by_threshold, success_ratio_by_threshold def plot_performance_diagram( observed_labels, forecast_probabilities, line_colour=DEFAULT_LINE_COLOUR, line_width=DEFAULT_LINE_WIDTH, bias_line_colour=DEFAULT_BIAS_LINE_COLOUR, bias_line_width=DEFAULT_BIAS_LINE_WIDTH): """Plots performance diagram. E = number of examples :param observed_labels: length-E numpy array of class labels (integers in 0...1). :param forecast_probabilities: length-E numpy array with forecast probabilities of label = 1. :param line_colour: Colour (in any format accepted by `matplotlib.colors`). :param line_width: Line width (real positive number). :param bias_line_colour: Colour of contour lines for frequency bias. :param bias_line_width: Width of contour lines for frequency bias. :return: pod_by_threshold: See doc for `_get_points_in_perf_diagram`. detection) values. :return: success_ratio_by_threshold: Same. """ pod_by_threshold, success_ratio_by_threshold = _get_points_in_perf_diagram( observed_labels=observed_labels, forecast_probabilities=forecast_probabilities) _, axes_object = pyplot.subplots( 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES) ) success_ratio_matrix, pod_matrix = _get_sr_pod_grid() csi_matrix = _csi_from_sr_and_pod(success_ratio_matrix, pod_matrix) frequency_bias_matrix = _bias_from_sr_and_pod( success_ratio_matrix, pod_matrix) this_colour_map_object, this_colour_norm_object = _get_csi_colour_scheme() pyplot.contourf( success_ratio_matrix, pod_matrix, csi_matrix, LEVELS_FOR_CSI_CONTOURS, cmap=this_colour_map_object, norm=this_colour_norm_object, vmin=0., vmax=1., axes=axes_object) colour_bar_object = _add_colour_bar( axes_object=axes_object, colour_map_object=this_colour_map_object, colour_norm_object=this_colour_norm_object, values_to_colour=csi_matrix, min_colour_value=0., max_colour_value=1., orientation_string='vertical', extend_min=False, extend_max=False) colour_bar_object.set_label('CSI (critical success index)') bias_colour_tuple = () for _ in range(len(LEVELS_FOR_BIAS_CONTOURS)): bias_colour_tuple += (bias_line_colour,) bias_contour_object = pyplot.contour( success_ratio_matrix, pod_matrix, frequency_bias_matrix, LEVELS_FOR_BIAS_CONTOURS, colors=bias_colour_tuple, linewidths=bias_line_width, linestyles='dashed', axes=axes_object) pyplot.clabel( bias_contour_object, inline=True, inline_spacing=BIAS_LABEL_PADDING_PX, fmt=BIAS_STRING_FORMAT, fontsize=FONT_SIZE) nan_flags = numpy.logical_or( numpy.isnan(success_ratio_by_threshold), numpy.isnan(pod_by_threshold) ) if not numpy.all(nan_flags): real_indices = numpy.where(numpy.invert(nan_flags))[0] axes_object.plot( success_ratio_by_threshold[real_indices], pod_by_threshold[real_indices], color=line_colour, linestyle='solid', linewidth=line_width) axes_object.set_xlabel('Success ratio (1 - FAR)') axes_object.set_ylabel('POD (probability of detection)') axes_object.set_xlim(0., 1.) axes_object.set_ylim(0., 1.) return pod_by_threshold, success_ratio_by_threshold
37.289308
80
0.715129
ace8123c53df0e95fb2ccc671f4ec531759c607b
794
py
Python
tests/test_apps/test_calibrations.py
monash-emu/AuTuMN
fa3b81ef54cf561e0e7364a48f4ff96585dc3310
[ "BSD-2-Clause-FreeBSD" ]
14
2020-03-11T06:15:30.000Z
2022-03-09T03:38:35.000Z
tests/test_apps/test_calibrations.py
monash-emu/AuTuMN
fa3b81ef54cf561e0e7364a48f4ff96585dc3310
[ "BSD-2-Clause-FreeBSD" ]
96
2020-01-29T05:10:29.000Z
2022-03-31T01:48:46.000Z
tests/test_apps/test_calibrations.py
monash-emu/AuTuMN
fa3b81ef54cf561e0e7364a48f4ff96585dc3310
[ "BSD-2-Clause-FreeBSD" ]
10
2020-04-24T00:38:00.000Z
2021-08-19T16:19:03.000Z
import pytest from autumn.settings import Models from autumn.tools.project.project import _PROJECTS, get_project COVID_PROJECTS = list(_PROJECTS[Models.COVID_19].keys()) COVID_CALIBS = list(zip(COVID_PROJECTS, [Models.COVID_19] * len(COVID_PROJECTS))) TB_PROJECTS = list(_PROJECTS[Models.TB].keys()) TB_CALIBS = list(zip(TB_PROJECTS, [Models.TB] * len(TB_PROJECTS))) CALIBS = COVID_CALIBS + TB_CALIBS @pytest.mark.github_only @pytest.mark.calibrate_models @pytest.mark.parametrize("project_name, model_name", CALIBS) def test_calibration(project_name, model_name): """ Calibration smoke test - make sure everything can run for 10 seconds without exploding. """ project = get_project(model_name, project_name) project.calibrate(max_seconds=10, chain_idx=1, num_chains=1)
36.090909
91
0.777078
ace8127062e722888f7a7bee4a69fabdb9728515
2,826
py
Python
test/onnx/test_pytorch_jit_onnx.py
stungkit/pytorch
0f05e398705bf15406bce79f7ee57d3935ad2abd
[ "Intel" ]
null
null
null
test/onnx/test_pytorch_jit_onnx.py
stungkit/pytorch
0f05e398705bf15406bce79f7ee57d3935ad2abd
[ "Intel" ]
1
2022-01-10T18:39:28.000Z
2022-01-10T19:15:57.000Z
test/onnx/test_pytorch_jit_onnx.py
stungkit/pytorch
0f05e398705bf15406bce79f7ee57d3935ad2abd
[ "Intel" ]
null
null
null
# Owner(s): ["module: onnx"] import unittest import onnxruntime import torch from torch._C import parse_ir from torch.onnx import verification def _jit_graph_to_onnx_model(graph, operator_export_type, opset_version): r""" This function exports torch::jit::Graph object to serialized ONNX ModelProto. This function is for testing purpose. It only keeps the essential parts for IR graph conversions. It also does not interact with actual PyTorch modules nor PyTorch tensor inputs. """ from torch.onnx.symbolic_helper import _set_onnx_shape_inference, _set_opset_version from torch.onnx.utils import _optimize_graph # Shape inference is required because some ops' symbolic functions # generate sub-graphs based on inputs' types. _set_onnx_shape_inference(True) _set_opset_version(opset_version) graph = _optimize_graph(graph, operator_export_type, params_dict={}) proto, _, _, _ = graph._export_onnx( {}, opset_version, {}, False, operator_export_type, False, False, {}, True, "", {}, ) return proto class _TestJITIRToONNX: """Abstract base class for test cases. Intentionally not a sub-class of unittest.TestCase so that unittest / pytest don't run it directly. unitest.TestCase is mixed in as another base class when creating concrete sub-types. See MakeTestCase(). """ opset_version = -1 # Sub-classes must override ort_providers = ["CPUExecutionProvider"] def run_test(self, graph_ir, example_inputs): graph = parse_ir(graph_ir) jit_outs = torch._C._jit_interpret_graph(graph, example_inputs) onnx_proto = _jit_graph_to_onnx_model( graph, torch.onnx.OperatorExportTypes.ONNX, self.opset_version ) ort_sess = onnxruntime.InferenceSession( onnx_proto, providers=self.ort_providers ) ort_outs = verification._run_ort(ort_sess, example_inputs) verification._compare_ort_pytorch_outputs( ort_outs, jit_outs, rtol=1e-3, atol=1e-7 ) def test_example_ir(self): graph_ir = """ graph(%1 : Float(2, 3), %2 : Float(2, 3)): %3 : int = prim::Constant[value=1]() %4 : Float(2, 3) = aten::add(%1, %2, %3) return (%4) """ a = torch.randn(2, 3) b = torch.randn(2, 3) self.run_test(graph_ir, (a, b)) def MakeTestCase(opset_version: int) -> type: name = f"TestJITIRToONNX_opset{opset_version}" return type( str(name), (unittest.TestCase,), dict(_TestJITIRToONNX.__dict__, opset_version=opset_version), ) TestJITIRToONNX_opset14 = MakeTestCase(14) if __name__ == "__main__": unittest.main()
29.134021
88
0.654636
ace812ad9e6ebb08d04a73c1473cc9a930f81d64
6,446
py
Python
ibis/expr/tests/test_format.py
nubank/ibis
3423ab8ece7698491d491e898f739504fb3580be
[ "Apache-2.0" ]
3
2017-04-17T14:20:36.000Z
2019-06-04T19:07:19.000Z
ibis/expr/tests/test_format.py
nubank/ibis
3423ab8ece7698491d491e898f739504fb3580be
[ "Apache-2.0" ]
1
2016-11-29T13:31:35.000Z
2016-11-29T13:31:35.000Z
ibis/expr/tests/test_format.py
nubank/ibis
3423ab8ece7698491d491e898f739504fb3580be
[ "Apache-2.0" ]
null
null
null
# Copyright 2014 Cloudera Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ibis from ibis.compat import unittest from ibis.expr.format import ExprFormatter from ibis.expr.tests.mocks import MockConnection class TestExprFormatting(unittest.TestCase): # Uncertain about how much we want to commit to unit tests around the # particulars of the output at the moment. def setUp(self): self.schema = [ ('a', 'int8'), ('b', 'int16'), ('c', 'int32'), ('d', 'int64'), ('e', 'float'), ('f', 'double'), ('g', 'string'), ('h', 'boolean') ] self.schema_dict = dict(self.schema) self.table = ibis.table(self.schema) self.con = MockConnection() def test_format_table_column(self): # GH #507 result = repr(self.table.f) assert 'Column[array(double)]' in result def test_format_projection(self): # This should produce a ref to the projection proj = self.table[['c', 'a', 'f']] repr(proj['a']) def test_table_type_output(self): foo = ibis.table( [ ('job', 'string'), ('dept_id', 'string'), ('year', 'int32'), ('y', 'double') ], 'foo') expr = foo.dept_id == foo.view().dept_id result = repr(expr) assert 'SelfReference[table]' in result assert 'UnboundTable[table]' in result def test_memoize_aggregate_correctly(self): table = self.table agg_expr = (table['c'].sum() / table['c'].mean() - 1).name('analysis') agg_exprs = [table['a'].sum().name('sum(a)'), table['b'].mean().name('mean(b)'), agg_expr] result = table.aggregate(agg_exprs, by=['g']) formatter = ExprFormatter(result) formatted = formatter.get_result() alias = formatter.memo.get_alias(table.op()) assert formatted.count(alias) == 7 def test_aggregate_arg_names(self): # Not sure how to test this *well* t = self.table by_exprs = [t.g.name('key1'), t.f.round().name('key2')] agg_exprs = [t.c.sum().name('c'), t.d.mean().name('d')] expr = self.table.group_by(by_exprs).aggregate(agg_exprs) result = repr(expr) assert 'metrics' in result assert 'by' in result def test_format_multiple_join_with_projection(self): # Star schema with fact table table = ibis.table([ ('c', 'int32'), ('f', 'double'), ('foo_id', 'string'), ('bar_id', 'string'), ], 'one') table2 = ibis.table([ ('foo_id', 'string'), ('value1', 'double') ], 'two') table3 = ibis.table([ ('bar_id', 'string'), ('value2', 'double') ], 'three') filtered = table[table['f'] > 0] pred1 = filtered['foo_id'] == table2['foo_id'] pred2 = filtered['bar_id'] == table3['bar_id'] j1 = filtered.left_join(table2, [pred1]) j2 = j1.inner_join(table3, [pred2]) # Project out the desired fields view = j2[[filtered, table2['value1'], table3['value2']]] # it works! repr(view) def test_memoize_database_table(self): table = self.con.table('test1') table2 = self.con.table('test2') filter_pred = table['f'] > 0 table3 = table[filter_pred] join_pred = table3['g'] == table2['key'] joined = table2.inner_join(table3, [join_pred]) met1 = (table3['f'] - table2['value']).mean().name('foo') result = joined.aggregate([met1, table3['f'].sum().name('bar')], by=[table3['g'], table2['key']]) formatted = repr(result) assert formatted.count('test1') == 1 assert formatted.count('test2') == 1 def test_memoize_filtered_table(self): airlines = ibis.table([('dest', 'string'), ('origin', 'string'), ('arrdelay', 'int32')], 'airlines') dests = ['ORD', 'JFK', 'SFO'] t = airlines[airlines.dest.isin(dests)] delay_filter = t.dest.topk(10, by=t.arrdelay.mean()) result = repr(delay_filter) assert result.count('Selection') == 1 def test_memoize_insert_sort_key(self): table = self.con.table('airlines') t = table['arrdelay', 'dest'] expr = (t.group_by('dest') .mutate(dest_avg=t.arrdelay.mean(), dev=t.arrdelay - t.arrdelay.mean())) worst = (expr[expr.dev.notnull()] .sort_by(ibis.desc('dev')) .limit(10)) result = repr(worst) assert result.count('airlines') == 1 def test_named_value_expr_show_name(self): expr = self.table.f * 2 expr2 = expr.name('baz') # it works! repr(expr) result2 = repr(expr2) # not really committing to a particular output yet assert 'baz' in result2 def test_memoize_filtered_tables_in_join(self): # related: GH #667 purchases = ibis.table([('region', 'string'), ('kind', 'string'), ('user', 'int64'), ('amount', 'double')], 'purchases') metric = purchases.amount.sum().name('total') agged = (purchases.group_by(['region', 'kind']) .aggregate(metric)) left = agged[agged.kind == 'foo'] right = agged[agged.kind == 'bar'] cond = left.region == right.region joined = (left.join(right, cond) [left, right.total.name('right_total')]) result = repr(joined) # Join, and one for each aggregation assert result.count('predicates') == 3
31.443902
78
0.544213
ace813a54e35a1bed4760a57d2c922ba2e5ce85c
425
py
Python
dj_dpv/core/models.py
axeliodiaz/dpv
56fce55a374594d2164f7d7fd23ac7c4242e4304
[ "MIT" ]
null
null
null
dj_dpv/core/models.py
axeliodiaz/dpv
56fce55a374594d2164f7d7fd23ac7c4242e4304
[ "MIT" ]
null
null
null
dj_dpv/core/models.py
axeliodiaz/dpv
56fce55a374594d2164f7d7fd23ac7c4242e4304
[ "MIT" ]
null
null
null
from django.db import models from model_utils.models import TimeStampedModel from model_utils import Choices class Divisa(TimeStampedModel): precio = models.DecimalField(max_digits=1000, decimal_places=3) class Meta: abstract = True def __unicode__(self): return "{}: {}".format(self.created, self.precio) class TipoDivisa(Divisa): nombre = models.CharField(blank=True, max_length=100)
22.368421
67
0.731765
ace8155e9208c65aec62e43e890bf207e3029836
2,437
py
Python
tests/unittest/test_metrics.py
StrayBird-ATSH/gluon-nlp
5dc6b9c9fab9e99b155554a50466c514b879ea84
[ "Apache-2.0" ]
1
2021-06-17T12:59:25.000Z
2021-06-17T12:59:25.000Z
tests/unittest/test_metrics.py
StrayBird-ATSH/gluon-nlp
5dc6b9c9fab9e99b155554a50466c514b879ea84
[ "Apache-2.0" ]
3
2020-09-01T05:45:57.000Z
2020-10-22T23:14:20.000Z
tests/unittest/test_metrics.py
StrayBird-ATSH/gluon-nlp
5dc6b9c9fab9e99b155554a50466c514b879ea84
[ "Apache-2.0" ]
1
2020-09-04T22:28:31.000Z
2020-09-04T22:28:31.000Z
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import mxnet as mx import numpy as np from gluonnlp.metric import MaskedAccuracy, LengthNormalizedLoss from mxnet.test_utils import assert_almost_equal def test_acc(): pred = mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]]) label = mx.nd.array([0, 1, 1]) mask = mx.nd.array([1, 1, 0]) metric = MaskedAccuracy() metric.update([label], [pred], [mask]) _, acc = metric.get() matched = (np.argmax(pred.asnumpy(), axis=1) == label.asnumpy()) * mask.asnumpy() valid_count = mask.asnumpy().sum() expected_acc = 1.0 * matched.sum() / valid_count assert acc == expected_acc metric = MaskedAccuracy() metric.update([label], [pred]) _, acc = metric.get() matched = (np.argmax(pred.asnumpy(), axis=1) == label.asnumpy()) valid_count = len(label) expected_acc = 1.0 * matched.sum() / valid_count assert acc == expected_acc def test_normalized_loss(rtol=1e-5, atol=1e-5): tgt_valid_length = mx.nd.array([1, 3, 2, 7]) loss = mx.nd.array([1.1, 2.5, 3.8, 5.3]) metric = LengthNormalizedLoss() metric.update([0, tgt_valid_length], loss) _, metric_loss = metric.get() expected_loss = loss.asnumpy().sum() / tgt_valid_length.asnumpy().sum() assert_almost_equal(metric_loss, expected_loss, rtol=rtol, atol=atol) tgt_valid_length = mx.nd.array([8, 4, 2, 7]) loss = mx.nd.array([8.7, 2.3, 1.8, 9.3]) metric = LengthNormalizedLoss() metric.update([0, tgt_valid_length], loss) _, metric_loss = metric.get() expected_loss = loss.asnumpy().sum() / tgt_valid_length.asnumpy().sum() assert_almost_equal(metric_loss, expected_loss, rtol=rtol, atol=atol)
40.616667
85
0.694296
ace815a8e2da9436d60995da1f3f1250104fbbd6
4,168
py
Python
Non-React Stuff/alexa/lambda/skill_env/ask_sdk_model/dialog/dynamic_entities_directive.py
ReciPull/reciprogram
b8c7e4610f95c5beafad3c9880fc5beceec523e7
[ "MIT" ]
1
2019-09-16T19:13:13.000Z
2019-09-16T19:13:13.000Z
Non-React Stuff/alexa/lambda/skill_env/ask_sdk_model/dialog/dynamic_entities_directive.py
ReciPull/reciprogram
b8c7e4610f95c5beafad3c9880fc5beceec523e7
[ "MIT" ]
5
2021-03-09T03:30:14.000Z
2022-02-26T10:42:17.000Z
alexa/reciPullLambda/ask_sdk_model/dialog/dynamic_entities_directive.py
ReciPull/recipull.github.io
e6b800af02658bb7948297c4ddc1b7af6d978839
[ "MIT" ]
null
null
null
# coding: utf-8 # # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file # except in compliance with the License. A copy of the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for # the specific language governing permissions and limitations under the License. # import pprint import re # noqa: F401 import six import typing from enum import Enum from ask_sdk_model.directive import Directive if typing.TYPE_CHECKING: from typing import Dict, List, Optional from datetime import datetime from ask_sdk_model.er.dynamic.update_behavior import UpdateBehavior from ask_sdk_model.er.dynamic.entity_list_item import EntityListItem class DynamicEntitiesDirective(Directive): """ :param update_behavior: :type update_behavior: (optional) ask_sdk_model.er.dynamic.update_behavior.UpdateBehavior :param types: :type types: (optional) list[ask_sdk_model.er.dynamic.entity_list_item.EntityListItem] """ deserialized_types = { 'object_type': 'str', 'update_behavior': 'ask_sdk_model.er.dynamic.update_behavior.UpdateBehavior', 'types': 'list[ask_sdk_model.er.dynamic.entity_list_item.EntityListItem]' } # type: Dict attribute_map = { 'object_type': 'type', 'update_behavior': 'updateBehavior', 'types': 'types' } # type: Dict def __init__(self, update_behavior=None, types=None): # type: (Optional[UpdateBehavior], Optional[List[EntityListItem]]) -> None """ :param update_behavior: :type update_behavior: (optional) ask_sdk_model.er.dynamic.update_behavior.UpdateBehavior :param types: :type types: (optional) list[ask_sdk_model.er.dynamic.entity_list_item.EntityListItem] """ self.__discriminator_value = "Dialog.UpdateDynamicEntities" # type: str self.object_type = self.__discriminator_value super(DynamicEntitiesDirective, self).__init__(object_type=self.__discriminator_value) self.update_behavior = update_behavior self.types = types def to_dict(self): # type: () -> Dict[str, object] """Returns the model properties as a dict""" result = {} # type: Dict for attr, _ in six.iteritems(self.deserialized_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x.value if isinstance(x, Enum) else x, value )) elif isinstance(value, Enum): result[attr] = value.value elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else (item[0], item[1].value) if isinstance(item[1], Enum) else item, value.items() )) else: result[attr] = value return result def to_str(self): # type: () -> str """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): # type: () -> str """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): # type: (object) -> bool """Returns true if both objects are equal""" if not isinstance(other, DynamicEntitiesDirective): return False return self.__dict__ == other.__dict__ def __ne__(self, other): # type: (object) -> bool """Returns true if both objects are not equal""" return not self == other
34.733333
97
0.62452
ace81624fc18860640a7bad3420f1b753274a888
2,828
py
Python
tests/test_round2.py
aperrin66/DAPPER
d9d09ed87ca58d59972296e317bfeea50ba6cdd0
[ "MIT" ]
225
2016-11-01T09:55:25.000Z
2022-03-31T06:03:28.000Z
tests/test_round2.py
aperrin66/DAPPER
d9d09ed87ca58d59972296e317bfeea50ba6cdd0
[ "MIT" ]
85
2018-02-09T03:13:18.000Z
2022-03-18T17:10:28.000Z
tests/test_round2.py
aperrin66/DAPPER
d9d09ed87ca58d59972296e317bfeea50ba6cdd0
[ "MIT" ]
92
2017-05-29T23:24:45.000Z
2022-03-31T06:03:29.000Z
"""Tests for `round2` and `round2sigfig`.""" import numpy as np import pytest from dapper.tools.rounding import round2, round2sigfig class ca(float): """Make `==` approximate. Example: >>> ca(1 + 1e-6) == 1 True This might be a roundabout way to execute `np.isclose`, but it is a fun way. """ def __new__(cls, val, tol=1e-5): """From <https://stackoverflow.com/q/35943789>.""" self = super().__new__(cls, val) self.tol = tol return self def __eq__(self, other): """Make equality comparison approximate.""" return np.isclose(self, other, self.tol) # Test cases for round2sigfig lst1 = [ (1 , 0, 0) , (11 , 0, 0) , (111 , 0, 0) , (1 , 1, 1) , (11 , 1, 10) , (111 , 1, 100) , (1 , 2, 1) , (11 , 2, 11) , (111 , 2, 110) , (1 , 3, 1) , (11 , 3, 11) , (111 , 3, 111) , (1 , 4, 1) , (11 , 4, 11) , (111 , 4, 111) , (1.11, 1, 1) , (1.11, 2, 1.1) , (1.11, 3, 1.11), (1.11, 4, 1.11), ] # Test cases for round2 lst2 = [ (1.2345, 1.0 , 1) , (12.345, 1.0 , 12) , (123.45, 1.0 , 123) , (1234.5, 1.0 , 1234) , (12345., 1.0 , 12345) , (1.2345, 9.0 , 1) , (12.345, 9.0 , 12) , (123.45, 9.0 , 123) , (1234.5, 9.0 , 1234) , (12345., 9.0 , 12345) , (1.2345, 10. , 0) , (12.345, 10. , 10) , (123.45, 10. , 120) , (1234.5, 10. , 1230) , (12345., 10. , 12340) , (1.2345, 0.1 , 1.2) , (12.345, 0.1 , 12.3) , (123.45, 0.1 , 123.4) , (1234.5, 0.1 , 1234.5), (12345., 0.1 , 12345) , (1.2345, 0.2 , 1.2) , (12.345, 0.2 , 12.3) , (123.45, 0.2 , 123.4) , (1234.5, 0.2 , 1234.5), (12345., 0.2 , 12345) , (0.1 , 0.3 , 0.1) , (0.2 , 0.3 , 0.2) , (0.3 , 0.3 , 0.3) , (1.65 , 1.234, 2.0) , (1.65 , 0.543, 1.6) , (1.87 , 0.543, 1.9) , (1.92 , 0.543, 1.9) , ] # For inspiration: # p = 99.3 # print(p) # for x in range(10): # x = 148.5 + .1*x # print(f"x: {x:.1f}:", round2(x, p)) @pytest.mark.parametrize("x, p, y", lst1) def test_round2sigfig(x, p, y): rounded = round2sigfig(x, p) desired = ca(y, 1e-9) assert rounded == desired @pytest.mark.parametrize("x, p, y", lst2) def test_round2(x, p, y): rounded = round2(x, p) desired = ca(y, 1e-9) assert rounded == desired if __name__ == "__main__": # Demonstrate `ca`. numbers = [ 1 + 1e-6, 1 + 1e-4, ] for x in numbers: print("\nx:", x) values = { "=": x, "≈": ca(x), } for mode, x2 in values.items(): print(f"x {mode} 1 {mode} x:", x2 == 1 == x2)
21.263158
59
0.433168
ace816c75bba2a9b074125f3c2bca25073c31d08
3,700
py
Python
CTK/Carousel.py
pigmej/CTK
f2e15a190bb5b0c3a6bba3baeeee896da43b97a6
[ "BSD-3-Clause" ]
1
2018-08-27T11:56:48.000Z
2018-08-27T11:56:48.000Z
CTK/Carousel.py
pigmej/CTK
f2e15a190bb5b0c3a6bba3baeeee896da43b97a6
[ "BSD-3-Clause" ]
null
null
null
CTK/Carousel.py
pigmej/CTK
f2e15a190bb5b0c3a6bba3baeeee896da43b97a6
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- # # CTK: Cherokee Toolkit # # Authors: # Alvaro Lopez Ortega <alvaro@alobbs.com> # # Copyright (C) 2009 Alvaro Lopez Ortega # # This program is free software; you can redistribute it and/or # modify it under the terms of version 2 of the GNU General Public # License as published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. # from Box import Box from List import List from Link import Link from RawHTML import RawHTML HEADERS = [ '<script type="text/javascript" src="/CTK/js/Carousel.js"></script>' ] JS_INIT = """ $("#%(id)s").Carousel(); """ class Carousel (Box): """ Widget to render a slideshow box. Optional arguments: props: dictionary with properties for HTML element, such as {'class': 'test', 'id': 'snapshots'} Example: shots = CTK.Carousel() shots += CTK.Image ({'src': 'image1.png'}) shots += CTK.Image ({'src': 'image2.png'}) """ def __init__ (self, props_={}): props = props_.copy() if 'class' in props: props['class'] += " carousel" else: props['class'] = "carousel" Box.__init__ (self, props.copy()) self.images = List ({'class': 'overview'}) self.pager = List ({'class': 'pager'}) self.controls = None Box.__iadd__ (self, self.images) def __iadd__ (self, widget): link = Link (None, RawHTML ("%s" %(len(self.images.child) +1))) self.images += widget self.pager += link self.pager[-1].props['class'] = 'pagenum' return self def Render (self): # Add pager and arrows if there is more than 1 item if len(self.pager) > 1 and not self.controls: arrows = Box({'class':'arrows'}) arrows += Link (None, RawHTML("%s"%(_('left'))), {'class': "buttons prev"}) arrows += Link (None, RawHTML("%s"%(_('right'))), {'class': "buttons next"}) self.controls = Box({'class':'controls'}) self.controls += arrows self.controls += self.pager Box.__iadd__ (self, self.controls) # Render render = Box.Render (self) render.headers += HEADERS render.js += JS_INIT %({'id': self.id}) return render class CarouselThumbnails (Carousel): """ Widget to render a slideshow box, with thumbnails. Optional arguments: props: dictionary with properties for HTML element, such as {'class': 'test', 'id': 'snapshots'} Example: shots = CTK.CarouselThumbnails() shots += CTK.Image ({'src': 'image1.png'}) shots += CTK.Image ({'src': 'image2.png'}) """ def __init__ (self, props_={}): Carousel.__init__ (self, props_.copy()) def __iadd__ (self, widget): box = Box ({'class': 'carousel-thumbs'}) box += RawHTML ("%s" %(len(self.images.child) +1)) box += Box ({'class': 'carousel_thumbs-image'}, widget) link = Link (None, Box ({'class': 'carousel_thumbs-link'}, box)) self.images += widget self.pager += link self.pager[-1].props['class'] = 'pagenum' return self
30.081301
88
0.582973
ace81754dadcd845f077f6ec4337a415c502c290
602
py
Python
src/niveristand/_translation/py2rtseq/exp_transformer.py
ioancornea/niveristand-python
a7fd578aefa904e9eb0bab00762af0ebba21ada0
[ "MIT" ]
6
2018-07-04T10:59:43.000Z
2022-03-24T13:34:33.000Z
src/niveristand/_translation/py2rtseq/exp_transformer.py
ioancornea/niveristand-python
a7fd578aefa904e9eb0bab00762af0ebba21ada0
[ "MIT" ]
14
2018-11-05T20:05:33.000Z
2022-03-10T12:54:58.000Z
src/niveristand/_translation/py2rtseq/exp_transformer.py
ioancornea/niveristand-python
a7fd578aefa904e9eb0bab00762af0ebba21ada0
[ "MIT" ]
15
2018-07-04T07:58:49.000Z
2022-02-22T16:35:26.000Z
from niveristand._translation import utils from niveristand._translation.py2rtseq import validations from niveristand.clientapi import realtimesequencedefinition as rtseqapi def exp_transformer(node, resources): if validations.check_if_looks_like_doc_block(node): exp = "" else: exp = utils.generic_ast_node_transform(node.value, resources) if bool(exp): # Custom actions generate their own expressions and return None # so only add an expression if something was returned rtseqapi.add_expression(resources.get_current_block(), exp) return exp
37.625
72
0.759136
ace8176514ce3f9f9e2848bcffec16588b9337c4
12,444
py
Python
cakechat/dialog_model/keras_model.py
tyfecho/cakechat
98945f558759eee14f34d99f4c8a3ddbd0f5b292
[ "Apache-2.0" ]
null
null
null
cakechat/dialog_model/keras_model.py
tyfecho/cakechat
98945f558759eee14f34d99f4c8a3ddbd0f5b292
[ "Apache-2.0" ]
null
null
null
cakechat/dialog_model/keras_model.py
tyfecho/cakechat
98945f558759eee14f34d99f4c8a3ddbd0f5b292
[ "Apache-2.0" ]
null
null
null
import abc import os import time import tensorflow as tf from datetime import timedelta import tensorflow.keras.backend as K from cakechat.dialog_model.abstract_callbacks import AbstractKerasModelCallback, ParametrizedCallback, \ _KerasCallbackAdapter from cakechat.dialog_model.abstract_model import AbstractModel from cakechat.dialog_model.quality.metrics.plotters import DummyMetricsPlotter from cakechat.utils.env import is_main_horovod_worker, set_horovod_worker_random_seed from cakechat.utils.files_utils import is_non_empty_file from cakechat.utils.logger import WithLogger class KerasTFModelIsolator(object): def __init__(self): # Use global keras (tensorflow) session config here keras_session_config = K.get_session()._config self._keras_isolated_graph = tf.compat.v1.Graph() self._keras_isolated_session = tf.Session(graph=self._keras_isolated_graph, config=keras_session_config) def _isolate_func(self, func): def wrapper(*args, **kwargs): with self._keras_isolated_graph.as_default(): with self._keras_isolated_session.as_default(): return func(*args, **kwargs) return wrapper class EvaluateAndSaveBestIntermediateModelCallback(AbstractKerasModelCallback, WithLogger): def __init__(self, model, eval_state_per_batches): """ :param model: AbstractKerasModel object :param eval_state_per_batches: run model evaluation each `eval_state_per_batches` steps """ super(EvaluateAndSaveBestIntermediateModelCallback, self).__init__(model) WithLogger.__init__(self) self._eval_state_per_batches = eval_state_per_batches self._training_start_time = None self._cur_epoch_start_time = None @property def callback_params(self): return {'eval_state_per_batches': self._eval_state_per_batches} @property def runs_only_on_main_worker(self): return True @staticmethod def _get_formatted_time(seconds): return str(timedelta(seconds=int(seconds))) def _log_metrics(self, dataset_name_to_metrics): for dataset_name, metrics in dataset_name_to_metrics.items(): for metric_name, metric_value in metrics.items(): self._logger.info('{} {} = {}'.format(dataset_name, metric_name, metric_value)) self._model.metrics_plotter.plot(self._model.model_id, '{}/{}'.format(dataset_name, metric_name), metric_value) def _eval_and_save_current_model(self, batch_num=None): total_elapsed_time = time.time() - self._training_start_time self._logger.info('Total elapsed time: {}'.format(self._get_formatted_time(total_elapsed_time))) if batch_num: elapsed_time_per_batch = (time.time() - self._cur_epoch_start_time) / batch_num self._logger.info('Cur batch num: {}; Train time per batch: {:.2f} seconds'.format( batch_num, elapsed_time_per_batch)) dataset_name_to_metrics = self._model._evaluate() self._log_metrics(dataset_name_to_metrics) if not os.path.exists(self._model.model_path): os.makedirs(self._model.model_path) if self._model.metrics is None or self._model._is_better_model(dataset_name_to_metrics, self._model.metrics): self._logger.info('Obtained new best model. Saving it to {}'.format(self._model._model_resource_path)) self._model._save_model(self._model._model_resource_path) self._model._metrics_serializer.save_metrics(self._model._metrics_resource_path, dataset_name_to_metrics) self._model._metrics = dataset_name_to_metrics self._model._save_model(self._model._model_progress_resource_path) def on_train_begin(self, logs=None): self._logger.info('Start training') self._training_start_time = time.time() def on_train_end(self, logs=None): self._logger.info('Stop training and compute final model metrics') self._eval_and_save_current_model() def on_batch_end(self, batch_num, logs=None): if batch_num > 0 and batch_num % self._eval_state_per_batches == 0: self._eval_and_save_current_model(batch_num) def on_epoch_begin(self, epoch_num, logs=None): cur_epoch_num = epoch_num + 1 self._logger.info('Starting epoch {}'.format(cur_epoch_num)) self._cur_epoch_start_time = time.time() def on_epoch_end(self, epoch_num, logs=None): cur_epoch_num = epoch_num + 1 cur_epoch_time = time.time() - self._cur_epoch_start_time self._logger.info('For epoch {} elapsed time: {}'.format(cur_epoch_num, self._get_formatted_time(cur_epoch_time))) class AbstractKerasModel(AbstractModel, metaclass=abc.ABCMeta): # Model resources default values _MODEL_PROGRESS_RESOURCE_NAME = 'model.current' def __init__(self, metrics_plotter=None, horovod=None, training_callbacks=None, *args, **kwargs): """ :param metrics_plotter: object that plots training and validation metrics (see `TensorboardMetricsPlotter`) :param horovod: horovod module initialized for training on multiple GPUs. If None, uses single GPU, or CPU :param training_callbacks: list of instances of `AbstractKerasModelCallback`/`ParametrizedCallback` or None. In subclasses, please call `_create_essential_callbacks` to get essential callbacks, and/or put your own ones in this argument. """ super(AbstractKerasModel, self).__init__(*args, **kwargs) self._metrics_plotter = metrics_plotter if metrics_plotter else DummyMetricsPlotter() self._horovod = horovod self._class_weight = None self._callbacks = training_callbacks or [] @staticmethod def _create_essential_callbacks(model, horovod=None, eval_state_per_batches=None): """ :param model: a model object, typically `self` :param horovod: if not None, adds callback for model params broadcasting between workers :param eval_state_per_batches: if not None, adds callback to evaluate the model every `eval_state_per_batches` batches :return: a list of callbacks """ callbacks = [] if horovod: callbacks.append( ParametrizedCallback( horovod.callbacks.BroadcastGlobalVariablesCallback(0), runs_only_on_main_worker=False)) if eval_state_per_batches: callbacks.append(EvaluateAndSaveBestIntermediateModelCallback(model, eval_state_per_batches)) return callbacks def _get_worker_callbacks(self): if is_main_horovod_worker(self._horovod): # all callbacks should be run on main worker return self._callbacks # but not all callbacks should be run on a not main worker return [callback for callback in self._callbacks if not callback.runs_only_on_main_worker] @staticmethod def _to_keras_callbacks(callbacks): """ Casts AbstractKerasModel callbacks (see `cakechat.dialog_model.callbacks`) to the keras-based ones (instances of `keras.callbacks.Callback`) :param callbacks: :return: """ keras_callbacks = [] for custom_callback in callbacks: if isinstance(custom_callback, AbstractKerasModelCallback): keras_callback = _KerasCallbackAdapter(custom_callback) elif isinstance(custom_callback, ParametrizedCallback): keras_callback = custom_callback.callback else: raise ValueError('Unsupported callback type: {}'.format(type(custom_callback))) keras_callbacks.append(keras_callback) return keras_callbacks def _set_class_weight(self, class_weight): self._class_weight = class_weight @property @abc.abstractmethod def _model_params(self): pass @property def model_params(self): params = { 'training_callbacks': { cb.__class__.__name__: cb.callback_params for cb in self._callbacks if cb.callback_params } } params.update(self._model_params) return params @property def _model_progress_resource_path(self): return os.path.join(self.model_path, self._MODEL_PROGRESS_RESOURCE_NAME) @property def model(self): self.init_model() return self._model @property def metrics_plotter(self): return self._metrics_plotter @abc.abstractmethod def _get_training_model(self): pass @abc.abstractmethod def _build_model(self): pass @abc.abstractmethod def _is_better_model(self, new_metrics, old_metrics): pass @abc.abstractmethod def _get_training_batch_generator(self): """ :return: generator with (inputs, targets) or (inputs, targets, sample_weights) tuples. The generator is expected to loop over its data indefinitely. An epoch finishes when epoch_batches_num batches have been seen by the training worker. """ pass @abc.abstractmethod def _get_epoch_batches_num(self): pass def _save_model(self, model_file_path): self._model.save(model_file_path, overwrite=True) self._logger.info('Saved model weights to {}'.format(model_file_path)) def _load_model(self, fresh_model, model_file_path): fresh_model.load_weights(model_file_path, by_name=True) self._logger.info('Restored model weights from {}'.format(model_file_path)) return fresh_model def _load_model_if_exists(self): if is_non_empty_file(self._model_progress_resource_path): self._model = self._load_model(self._model, self._model_progress_resource_path) self._metrics = self._metrics_serializer.load_metrics(self._metrics_resource_path) return self._logger.info('Could not find saved model at {}\nModel will be trained from scratch.\n' .format(self._model_progress_resource_path)) def print_weights_summary(self): summary = '\n\nModel weights summary:' summary += '\n\t{0:<80} {1:<20} {2:}\n'.format('layer name', 'output shape:', 'size:') weights_names = [weight.name for layer in self._model.layers for weight in layer.weights] weights = self._model.get_weights() total_network_size = 0 for name, weight in zip(weights_names, weights): param_size = weight.nbytes / 1024 / 1024 summary += '\n\t{0:<80} {1:20} {2:<.2f}Mb'.format(name, str(weight.shape), param_size) total_network_size += param_size summary += '\n\nTotal network size: {0:.1f} Mb\n'.format(total_network_size) self._logger.info(summary) def init_model(self): if not self._model: self._logger.info('Initializing NN model') self._model = self._build_model() self._logger.info('NN model is initialized\n') self.print_weights_summary() def train_model(self): self.init_model() self._load_model_if_exists() set_horovod_worker_random_seed(self._horovod) training_batch_generator = self._get_training_batch_generator() epoch_batches_num = self._get_epoch_batches_num() workers_num = self._horovod.size() if self._horovod else 1 self._logger.info('Total epochs num = {}; Total batches per epochs = {}; Total workers for train = {}'.format( self.model_params['epochs_num'], epoch_batches_num, workers_num)) worker_callbacks = self._get_worker_callbacks() training_model = self._get_training_model() training_model.fit_generator( training_batch_generator, steps_per_epoch=epoch_batches_num // workers_num, callbacks=self._to_keras_callbacks(worker_callbacks), epochs=self.model_params['epochs_num'], class_weight=self._class_weight, verbose=0, workers=0) # reload model with the best quality if is_main_horovod_worker(self._horovod): self._model = self._load_model(self._model, self._model_resource_path)
40.271845
120
0.684908
ace8178ce3db58dc7bb1daaad240d980a255292d
462
py
Python
ontask/table/services/__init__.py
pinheiroo27/ontask_b
23fee8caf4e1c5694a710a77f3004ca5d9effeac
[ "MIT" ]
33
2017-12-02T04:09:24.000Z
2021-11-07T08:41:57.000Z
ontask/table/services/__init__.py
pinheiroo27/ontask_b
23fee8caf4e1c5694a710a77f3004ca5d9effeac
[ "MIT" ]
189
2017-11-16T04:06:29.000Z
2022-03-11T23:35:59.000Z
ontask/table/services/__init__.py
pinheiroo27/ontask_b
23fee8caf4e1c5694a710a77f3004ca5d9effeac
[ "MIT" ]
30
2017-11-30T03:35:44.000Z
2022-01-31T03:08:08.000Z
from ontask.table.services.display import ( perform_row_delete, render_table_display_page, render_table_display_server_side, ) from ontask.table.services.download import create_response_with_csv from ontask.table.services.errors import OnTaskTableNoKeyValueError from ontask.table.services.stats import ( get_column_visualization_items, get_table_visualization_items, ) from ontask.table.services.view import ViewTable, do_clone_view, save_view_form
42
79
0.854978
ace8186c8272fd01b3dde46fc26ba9d1c108600a
527
py
Python
photos/models.py
Deepanjalli/job_portal6
2869de5dca16a88f840ce0e4a26fe2edba3e9cae
[ "MIT" ]
null
null
null
photos/models.py
Deepanjalli/job_portal6
2869de5dca16a88f840ce0e4a26fe2edba3e9cae
[ "MIT" ]
4
2020-06-06T01:42:22.000Z
2021-09-08T01:50:57.000Z
photos/models.py
Deepanjalli/job_portal6
2869de5dca16a88f840ce0e4a26fe2edba3e9cae
[ "MIT" ]
null
null
null
from django.db import models from django.urls import reverse class Photo(models.Model): title = models.CharField(max_length=144) image = models.ImageField( upload_to='%Y/%m/%d/', height_field='image_height', width_field='image_width' ) image_height = models.FloatField() image_width = models.FloatField() timestamp = models.DateTimeField(auto_now_add=True) updated = models.DateTimeField(auto_now=True) def __str__(self): return str(self.title) def get_absolute_url(self): return reverse("photos:detail")
25.095238
52
0.760911
ace8193b736c397c7850962fc96fc50cac6586fd
14,889
py
Python
utils.py
ACampero/Introspection_CoRL
e53ed6bbc3f0cc19802db7becc2c91b4e319ce82
[ "MIT" ]
null
null
null
utils.py
ACampero/Introspection_CoRL
e53ed6bbc3f0cc19802db7becc2c91b4e319ce82
[ "MIT" ]
null
null
null
utils.py
ACampero/Introspection_CoRL
e53ed6bbc3f0cc19802db7becc2c91b4e319ce82
[ "MIT" ]
null
null
null
import sys import os import time import math import torch import numpy as np from PIL import Image, ImageDraw, ImageFont from torch.autograd import Variable import itertools import struct # get_image_size import imghdr # get_image_size import pdb def sigmoid(x): return 1.0/(math.exp(-x)+1.) def softmax(x): x = torch.exp(x - torch.max(x)) x = x/x.sum() return x def bbox_iou(box1, box2, x1y1x2y2=True): if x1y1x2y2: mx = min(box1[0], box2[0]) Mx = max(box1[2], box2[2]) my = min(box1[1], box2[1]) My = max(box1[3], box2[3]) w1 = box1[2] - box1[0] h1 = box1[3] - box1[1] w2 = box2[2] - box2[0] h2 = box2[3] - box2[1] else: mx = min(box1[0]-box1[2]/2.0, box2[0]-box2[2]/2.0) Mx = max(box1[0]+box1[2]/2.0, box2[0]+box2[2]/2.0) my = min(box1[1]-box1[3]/2.0, box2[1]-box2[3]/2.0) My = max(box1[1]+box1[3]/2.0, box2[1]+box2[3]/2.0) w1 = box1[2] h1 = box1[3] w2 = box2[2] h2 = box2[3] uw = Mx - mx uh = My - my cw = w1 + w2 - uw ch = h1 + h2 - uh carea = 0 if cw <= 0 or ch <= 0: return 0.0 area1 = w1 * h1 area2 = w2 * h2 carea = cw * ch uarea = area1 + area2 - carea return carea/uarea def bbox_ious(boxes1, boxes2, x1y1x2y2=True): if x1y1x2y2: mx = torch.min(boxes1[0], boxes2[0]) Mx = torch.max(boxes1[2], boxes2[2]) my = torch.min(boxes1[1], boxes2[1]) My = torch.max(boxes1[3], boxes2[3]) w1 = boxes1[2] - boxes1[0] h1 = boxes1[3] - boxes1[1] w2 = boxes2[2] - boxes2[0] h2 = boxes2[3] - boxes2[1] else: mx = torch.min(boxes1[0]-boxes1[2]/2.0, boxes2[0]-boxes2[2]/2.0) Mx = torch.max(boxes1[0]+boxes1[2]/2.0, boxes2[0]+boxes2[2]/2.0) my = torch.min(boxes1[1]-boxes1[3]/2.0, boxes2[1]-boxes2[3]/2.0) My = torch.max(boxes1[1]+boxes1[3]/2.0, boxes2[1]+boxes2[3]/2.0) w1 = boxes1[2] h1 = boxes1[3] w2 = boxes2[2] h2 = boxes2[3] uw = Mx - mx uh = My - my cw = w1 + w2 - uw ch = h1 + h2 - uh mask = ((cw <= 0) + (ch <= 0) > 0) area1 = w1 * h1 area2 = w2 * h2 carea = cw * ch carea[mask] = 0 uarea = area1 + area2 - carea return carea/uarea def nms(boxes, nms_thresh): if len(boxes) == 0: return boxes det_confs = torch.zeros(len(boxes)) for i in range(len(boxes)): det_confs[i] = 1-boxes[i][4] _,sortIds = torch.sort(det_confs) out_boxes = [] for i in range(len(boxes)): box_i = boxes[sortIds[i]] if box_i[4] > 0: out_boxes.append(box_i) for j in range(i+1, len(boxes)): box_j = boxes[sortIds[j]] if bbox_iou(box_i, box_j, x1y1x2y2=False) > nms_thresh: #print(box_i, box_j, bbox_iou(box_i, box_j, x1y1x2y2=False)) box_j[4] = 0 return out_boxes def convert2cpu(gpu_matrix): return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix) def convert2cpu_long(gpu_matrix): return torch.LongTensor(gpu_matrix.size()).copy_(gpu_matrix) def get_region_boxes(output, conf_thresh, num_classes, anchors, num_anchors, only_objectness=1, validation=False): anchor_step = len(anchors)/num_anchors if output.dim() == 3: output = output.unsqueeze(0) batch = output.size(0) assert(output.size(1) == (5+num_classes)*num_anchors) h = output.size(2) w = output.size(3) t0 = time.time() all_boxes = [] output = output.view(batch*num_anchors, 5+num_classes, h*w).transpose(0,1).contiguous().view(5+num_classes, batch*num_anchors*h*w) grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).type_as(output) #cuda() grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).type_as(output) #cuda() xs = torch.sigmoid(output[0]) + grid_x ys = torch.sigmoid(output[1]) + grid_y anchor_w = torch.Tensor(anchors).view(num_anchors, anchor_step).index_select(1, torch.LongTensor([0])) anchor_h = torch.Tensor(anchors).view(num_anchors, anchor_step).index_select(1, torch.LongTensor([1])) anchor_w = anchor_w.repeat(batch, 1).repeat(1, 1, h*w).view(batch*num_anchors*h*w).type_as(output) #cuda() anchor_h = anchor_h.repeat(batch, 1).repeat(1, 1, h*w).view(batch*num_anchors*h*w).type_as(output) #cuda() ws = torch.exp(output[2]) * anchor_w hs = torch.exp(output[3]) * anchor_h det_confs = torch.sigmoid(output[4]) cls_confs = torch.nn.Softmax()(Variable(output[5:5+num_classes].transpose(0,1))).data cls_max_confs, cls_max_ids = torch.max(cls_confs, 1) cls_max_confs = cls_max_confs.view(-1) cls_max_ids = cls_max_ids.view(-1) t1 = time.time() sz_hw = h*w sz_hwa = sz_hw*num_anchors det_confs = convert2cpu(det_confs) cls_max_confs = convert2cpu(cls_max_confs) cls_max_ids = convert2cpu_long(cls_max_ids) xs = convert2cpu(xs) ys = convert2cpu(ys) ws = convert2cpu(ws) hs = convert2cpu(hs) if validation: cls_confs = convert2cpu(cls_confs.view(-1, num_classes)) t2 = time.time() for b in range(batch): boxes = [] for cy in range(h): for cx in range(w): for i in range(num_anchors): ind = b*sz_hwa + i*sz_hw + cy*w + cx det_conf = det_confs[ind] if only_objectness: conf = det_confs[ind] else: conf = det_confs[ind] * cls_max_confs[ind] if conf > conf_thresh: bcx = xs[ind] bcy = ys[ind] bw = ws[ind] bh = hs[ind] cls_max_conf = cls_max_confs[ind] cls_max_id = cls_max_ids[ind] box = [bcx/w, bcy/h, bw/w, bh/h, det_conf, cls_max_conf, cls_max_id, cy, cx, h/13] if (not only_objectness) and validation: for c in range(num_classes): tmp_conf = cls_confs[ind][c] if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh: box.append(tmp_conf) box.append(c) boxes.append(box) all_boxes.append(boxes) t3 = time.time() if False: print('---------------------------------') print('matrix computation : %f' % (t1-t0)) print(' gpu to cpu : %f' % (t2-t1)) print(' boxes filter : %f' % (t3-t2)) print('---------------------------------') return all_boxes def plot_boxes_cv2(img, boxes, savename=None, class_names=None, color=None): import cv2 colors = torch.FloatTensor([[1,0,1],[0,0,1],[0,1,1],[0,1,0],[1,1,0],[1,0,0]]); def get_color(c, x, max_val): ratio = float(x)/max_val * 5 i = int(math.floor(ratio)) j = int(math.ceil(ratio)) ratio = ratio - i r = (1-ratio) * colors[i][c] + ratio*colors[j][c] return int(r*255) width = img.shape[1] height = img.shape[0] for i in range(len(boxes)): box = boxes[i] x1 = int(round((box[0] - box[2]/2.0) * width)) y1 = int(round((box[1] - box[3]/2.0) * height)) x2 = int(round((box[0] + box[2]/2.0) * width)) y2 = int(round((box[1] + box[3]/2.0) * height)) if color: rgb = color else: rgb = (255, 0, 0) if len(box) >= 7 and class_names: cls_conf = box[5] cls_id = box[6] print('%s: %f' % (class_names[cls_id], cls_conf)) classes = len(class_names) offset = cls_id * 123457 % classes red = get_color(2, offset, classes) green = get_color(1, offset, classes) blue = get_color(0, offset, classes) if color is None: rgb = (red, green, blue) img = cv2.putText(img, class_names[cls_id], (x1,y1), cv2.FONT_HERSHEY_SIMPLEX, 1.2, rgb, 1) img = cv2.rectangle(img, (x1,y1), (x2,y2), rgb, 1) if savename: print("save plot results to %s" % savename) cv2.imwrite(savename, img) return img def plot_boxes(img, boxes, savename=None, class_names=None): colors = torch.FloatTensor([[1,0,1],[0,0,1],[0,1,1],[0,1,0],[1,1,0],[1,0,0]]); def get_color(c, x, max_val): ratio = float(x)/max_val * 5 i = int(math.floor(ratio)) j = int(math.ceil(ratio)) ratio = ratio - i r = (1-ratio) * colors[i][c] + ratio*colors[j][c] return int(r*255) width = img.width height = img.height draw = ImageDraw.Draw(img) for i in range(len(boxes)): box = boxes[i] x1 = (box[0] - box[2]/2.0) * width y1 = (box[1] - box[3]/2.0) * height x2 = (box[0] + box[2]/2.0) * width y2 = (box[1] + box[3]/2.0) * height rgb = (255, 0, 0) if len(box) >= 7 and class_names: cls_conf = box[5] cls_id = box[6] print('%s: %f' % (class_names[cls_id], cls_conf)) classes = len(class_names) offset = cls_id * 123457 % classes red = get_color(2, offset, classes) green = get_color(1, offset, classes) blue = get_color(0, offset, classes) rgb = (red, green, blue) draw.text((x1, y1), class_names[cls_id], fill=rgb) draw.rectangle([x1, y1, x2, y2], outline = rgb) if savename: print("save plot results to %s" % savename) img.save(savename) return img def read_truths(lab_path): if not os.path.exists(lab_path): return np.array([]) if os.path.getsize(lab_path): truths = np.loadtxt(lab_path) truths = truths.reshape(truths.size/5, 5) # to avoid single truth problem return truths else: return np.array([]) def read_truths_args(lab_path, min_box_scale): truths = read_truths(lab_path) new_truths = [] for i in range(truths.shape[0]): if truths[i][3] < min_box_scale: continue new_truths.append([truths[i][0], truths[i][1], truths[i][2], truths[i][3], truths[i][4]]) return np.array(new_truths) def load_class_names(namesfile): class_names = [] with open(namesfile, 'r') as fp: lines = fp.readlines() for line in lines: line = line.rstrip() class_names.append(line) return class_names def image2torch(img): width = img.width height = img.height img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes())) img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous() img = img.view(1, 3, height, width) img = img.float().div(255.0) return img def do_detect(model, img, conf_thresh, nms_thresh, use_cuda=1): model.eval() t0 = time.time() if isinstance(img, Image.Image): width = img.width height = img.height img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes())) img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous() img = img.view(1, 3, height, width) img = img.float().div(255.0) elif type(img) == np.ndarray: # cv2 image img = torch.from_numpy(img.transpose(2,0,1)).float().div(255.0).unsqueeze(0) else: print("unknow image type") exit(-1) t1 = time.time() if use_cuda: img = img.cuda() img = torch.autograd.Variable(img) t2 = time.time() list_boxes, convrep = model(img) boxes = list_boxes[0][0] + list_boxes[1][0] + list_boxes[2][0] t3 = time.time() boxes = nms(boxes, nms_thresh) t4 = time.time() if False: print('-----------------------------------') print(' image to tensor : %f' % (t1 - t0)) print(' tensor to cuda : %f' % (t2 - t1)) print(' predict : %f' % (t3 - t2)) print(' nms : %f' % (t4 - t3)) print(' total : %f' % (t4 - t0)) print('-----------------------------------') return boxes, convrep def read_data_cfg(datacfg): options = dict() options['gpus'] = '0,1,2,3' options['num_workers'] = '10' with open(datacfg, 'r') as fp: lines = fp.readlines() for line in lines: line = line.strip() if line == '': continue key,value = line.split('=') key = key.strip() value = value.strip() options[key] = value return options def scale_bboxes(bboxes, width, height): import copy dets = copy.deepcopy(bboxes) for i in range(len(dets)): dets[i][0] = dets[i][0] * width dets[i][1] = dets[i][1] * height dets[i][2] = dets[i][2] * width dets[i][3] = dets[i][3] * height return dets def file_lines(thefilepath): count = 0 thefile = open(thefilepath, 'rb') while True: buffer = thefile.read(8192*1024) if not buffer: break count += buffer.count('\n') thefile.close( ) return count def get_image_size(fname): '''Determine the image type of fhandle and return its size. from draco''' with open(fname, 'rb') as fhandle: head = fhandle.read(24) if len(head) != 24: return if imghdr.what(fname) == 'png': check = struct.unpack('>i', head[4:8])[0] if check != 0x0d0a1a0a: return width, height = struct.unpack('>ii', head[16:24]) elif imghdr.what(fname) == 'gif': width, height = struct.unpack('<HH', head[6:10]) elif imghdr.what(fname) == 'jpeg' or imghdr.what(fname) == 'jpg': try: fhandle.seek(0) # Read 0xff next size = 2 ftype = 0 while not 0xc0 <= ftype <= 0xcf: fhandle.seek(size, 1) byte = fhandle.read(1) while ord(byte) == 0xff: byte = fhandle.read(1) ftype = ord(byte) size = struct.unpack('>H', fhandle.read(2))[0] - 2 # We are at a SOFn block fhandle.seek(1, 1) # Skip `precision' byte. height, width = struct.unpack('>HH', fhandle.read(4)) except Exception: #IGNORE:W0703 return else: return return width, height def logging(message): print('%s %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), message))
34.545244
138
0.536302
ace8194d81bab2b18979f8ac23d804c51470bc99
3,654
py
Python
{{ cookiecutter.project_name }}/tests/test_handler.py
depersgroep/cookiecutter-aws-sam
87fda6ff183e60bc04056664196422943a44c304
[ "MIT-0" ]
2
2018-09-28T16:51:20.000Z
2020-04-09T17:04:24.000Z
{{ cookiecutter.project_name }}/tests/test_handler.py
depersgroep/cookiecutter-aws-sam
87fda6ff183e60bc04056664196422943a44c304
[ "MIT-0" ]
null
null
null
{{ cookiecutter.project_name }}/tests/test_handler.py
depersgroep/cookiecutter-aws-sam
87fda6ff183e60bc04056664196422943a44c304
[ "MIT-0" ]
1
2019-03-13T13:31:06.000Z
2019-03-13T13:31:06.000Z
import json import unittest from unittest import mock from my_module import lambda_one from my_module import lambda_two class TestCheckValidateCerts(unittest.TestCase): def apigw_event(self): """ Generates API GW Event""" return { "body": "{ \"test\": \"body\"}", "resource": "/{proxy+}", "requestContext": { "resourceId": "123456", "apiId": "1234567890", "resourcePath": "/{proxy+}", "httpMethod": "POST", "requestId": "c6af9ac6-7b61-11e6-9a41-93e8deadbeef", "accountId": "123456789012", "identity": { "apiKey": "", "userArn": "", "cognitoAuthenticationType": "", "caller": "", "userAgent": "Custom User Agent String", "user": "", "cognitoIdentityPoolId": "", "cognitoIdentityId": "", "cognitoAuthenticationProvider": "", "sourceIp": "127.0.0.1", "accountId": "" }, "stage": "prod" }, "queryStringParameters": { "foo": "bar" }, "headers": { "Via": "1.1 08f323deadbeefa7af34d5feb414ce27.cloudfront.net (CloudFront)", "Accept-Language": "en-US,en;q=0.8", "CloudFront-Is-Desktop-Viewer": "true", "CloudFront-Is-SmartTV-Viewer": "false", "CloudFront-Is-Mobile-Viewer": "false", "X-Forwarded-For": "127.0.0.1, 127.0.0.2", "CloudFront-Viewer-Country": "US", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", "Upgrade-Insecure-Requests": "1", "X-Forwarded-Port": "443", "Host": "1234567890.execute-api.us-east-1.amazonaws.com", "X-Forwarded-Proto": "https", "X-Amz-Cf-Id": "aaaaaaaaaae3VYQb9jd-nvCd-de396Uhbp027Y2JvkCPNLmGJHqlaA==", "CloudFront-Is-Tablet-Viewer": "false", "Cache-Control": "max-age=0", "User-Agent": "Custom User Agent String", "CloudFront-Forwarded-Proto": "https", "Accept-Encoding": "gzip, deflate, sdch" }, "pathParameters": { "proxy": "/examplepath" }, "httpMethod": "POST", "stageVariables": { "baz": "qux" }, "path": "/examplepath" } @mock.patch('aws_xray_sdk.core.xray_recorder.current_subsegment') @mock.patch('aws_xray_sdk.core.xray_recorder.begin_subsegment') def test_lambda_one__handler(self, mock_begin_segment, mock_current_segment): response = lambda_one.lambda_handler(self.apigw_event(), "") assert response['statusCode'] == 200 body = json.loads(response['body']) assert 'Id' in body assert 'Count' in body def test_lambda_two__handler(self): response = lambda_two.lambda_handler(self.apigw_event(), "") assert response['statusCode'] == 200 body = json.loads(response['body']) assert body['status'] == 'ok'
35.134615
93
0.462781
ace8194f61214da5c7992e53127966cf4103c98a
1,714
py
Python
test/test_result.py
IanLee1521/assemblyline_client
ed2394710b5dcd5b99b21d7352a8c5dff49b6463
[ "MIT" ]
21
2020-04-27T02:48:15.000Z
2022-02-09T00:15:52.000Z
test/test_result.py
IanLee1521/assemblyline_client
ed2394710b5dcd5b99b21d7352a8c5dff49b6463
[ "MIT" ]
5
2020-05-11T12:44:49.000Z
2022-03-23T11:43:38.000Z
test/test_result.py
IanLee1521/assemblyline_client
ed2394710b5dcd5b99b21d7352a8c5dff49b6463
[ "MIT" ]
8
2020-11-09T12:24:40.000Z
2021-09-30T15:57:10.000Z
try: from utils import random_id_from_collection except ImportError: import pytest import sys if sys.version_info < (3, 0): pytestmark = pytest.mark.skip else: raise def test_get_result(datastore, client): result_id = random_id_from_collection(datastore, 'result') res = client.result(result_id) ds_res = datastore.result.get(result_id, as_obj=False) assert res['response'] == ds_res['response'] assert res['sha256'] == ds_res['sha256'] assert res['sha256'] == result_id[:64] def test_get_result_error(datastore, client): error_id = random_id_from_collection(datastore, 'error') res = client.result.error(error_id) assert res == datastore.error.get(error_id, as_obj=False) def test_get_multiple_error(datastore, client): m_results = [] m_errors = [] for _ in range(5): m_results.append(random_id_from_collection(datastore, 'result')) for _ in range(2): m_errors.append(random_id_from_collection(datastore, 'error')) m_results = list(set(m_results)) m_errors = list(set(m_errors)) res = client.result.multiple(error=m_errors, result=m_results) assert sorted(list(res['error'].keys())) == sorted(m_errors) assert sorted(list(res['result'].keys())) == sorted(m_results) ds_result = datastore.get_multiple_results(m_results) for res_key in m_results: assert res['result'][res_key]['response'] == ds_result[res_key]['response'] assert res['result'][res_key]['sha256'] == ds_result[res_key]['sha256'] assert res['result'][res_key]['sha256'] == res_key[:64] assert res['error'] == datastore.error.multiget(m_errors, as_dictionary=True, as_obj=False)
34.28
95
0.689615
ace81a2d2305eff5bf7aa0bf3019c7f1dd71a6a7
1,859
py
Python
virustotal.py
Micr067/security-tools
076d230219a7c065a95b16ca76428af43898490c
[ "MIT" ]
null
null
null
virustotal.py
Micr067/security-tools
076d230219a7c065a95b16ca76428af43898490c
[ "MIT" ]
null
null
null
virustotal.py
Micr067/security-tools
076d230219a7c065a95b16ca76428af43898490c
[ "MIT" ]
null
null
null
#!/usr/bin/env python from netaddr import * import ctfpwn import json import time import os import argparse virus_total_api_key = os.environ['VIRUS_TOTAL_API_KEY'] def process(cidr, logfile='virustotal.log'): total_found_domains = 0 ips = IPSet([cidr]) for ip in ips: print("\n[+] Resolving IP: {}".format(ip)) found_domains = 0 url = 'https://www.virustotal.com/vtapi/v2/ip-address/report?apikey={}&ip={}'.format( virus_total_api_key, str(ip)) resp = ctfpwn.http_get(url) if resp: domains = json.loads(resp) if (domains['response_code'] == 0): print "[-] Empty response for {}".format(str(ip)) time.sleep(15) continue f = open(logfile, 'a') for d in domains['resolutions']: print('>>> {}'.format(d['hostname'])) found_domains = found_domains + 1 f.write("{}\n".format(d['hostname'])) print "[+] Found {} domain(s) on {}".format(found_domains, str(ip)) print("[+] Waiting 15 sec. until next request (VirusTotal API restriction)") total_found_domains = total_found_domains + found_domains f.close() else: print "[-] Empty response for {}".format(str(ip)) time.sleep(15) print("\n\n[+] Done, found {} in total".format(total_found_domains)) def main(): parser = argparse.ArgumentParser() logfile = '' parser.add_argument( "-c", "--cidr", help="Network CIDR") parser.add_argument( "-o", "--output", help="Log filename (default - virustotal.log)") args = parser.parse_args() if args.output: logfile = args.output if args.cidr: process(args.cidr, logfile) if __name__ == "__main__": main()
25.121622
93
0.566971
ace81c29d38eaa84f3cf7d5b109ba5b300ca6fd8
2,832
py
Python
2-MaksimovKA/train/losses.py
motokimura/SpaceNet_SAR_Buildings_Solutions
c208d0070124e087d4c2f8dfaade5a8c1a132498
[ "Apache-2.0" ]
60
2020-07-29T23:31:18.000Z
2022-03-20T02:02:47.000Z
2-MaksimovKA/train/losses.py
Z-Zheng/SpaceNet_SAR_Buildings_Solutions
6a9c3962d987d985384d0d41a187f5fbfadac82c
[ "Apache-2.0" ]
9
2021-01-15T08:57:15.000Z
2021-11-04T04:27:41.000Z
2-MaksimovKA/train/losses.py
Z-Zheng/SpaceNet_SAR_Buildings_Solutions
6a9c3962d987d985384d0d41a187f5fbfadac82c
[ "Apache-2.0" ]
16
2020-07-30T12:56:03.000Z
2021-08-13T16:55:05.000Z
import torch from torch import Tensor from torch.nn.modules.loss import _Loss from torch.nn import BCEWithLogitsLoss def get_loss(loss_name='cce'): if loss_name == 'focal_dice': return FocalDiceLoss() elif loss_name == 'bce': return BCEWithLogitsLoss() class FocalDiceLoss(torch.nn.Module): def __init__(self, coef_focal=1.0, coef_dice=1.0, weights=(1.0, 0.1, 0.5)): super().__init__() self.dice_loss = DiceLoss() self.focal_loss = FocalLoss() self.weights = weights self.coef_focal = coef_focal self.coef_dice = coef_dice def forward(self, outputs, targets): loss = 0.0 for i in range(outputs.shape[1]): dice = self.weights[i]*self.dice_loss(outputs[:, i, ...], targets[:, i, ...]) focal = self.weights[i]*self.focal_loss(outputs[:, i, ...], targets[:, i, ...]) loss += self.coef_dice * dice + self.coef_focal * focal return loss class DiceLoss(_Loss): def __init__(self, per_image=False): super(DiceLoss, self).__init__() self.per_image = per_image def forward(self, y_pred: Tensor, y_true: Tensor): """ :param y_pred: NxCxHxW :param y_true: NxCxHxW :return: scalar """ per_image = self.per_image y_pred = y_pred.sigmoid() batch_size = y_pred.size()[0] eps = 1e-5 if not per_image: batch_size = 1 dice_target = y_true.contiguous().view(batch_size, -1).float() dice_output = y_pred.contiguous().view(batch_size, -1) intersection = torch.sum(dice_output * dice_target, dim=1) union = torch.sum(dice_output, dim=1) + torch.sum(dice_target, dim=1) + eps loss = (1 - (2 * intersection + eps) / union).mean() return loss class FocalLoss(_Loss): def __init__(self, ignore_index=255, gamma=2): super(FocalLoss, self).__init__() self.gamma = gamma self.ignore_index = ignore_index def forward(self, y_pred: Tensor, y_true: Tensor): """ :param y_pred: NxCxHxW :param y_true: NxCxHxW :return: scalar """ y_pred = y_pred.sigmoid() gamma = self.gamma ignore_index = self.ignore_index outputs = y_pred.contiguous() targets = y_true.contiguous() eps = 1e-8 non_ignored = targets.view(-1) != ignore_index targets = targets.view(-1)[non_ignored].float() outputs = outputs.contiguous().view(-1)[non_ignored] outputs = torch.clamp(outputs, eps, 1. - eps) targets = torch.clamp(targets, eps, 1. - eps) pt = (1 - targets) * (1 - outputs) + targets * outputs return (-(1. - pt) ** gamma * torch.log(pt)).mean()
30.451613
91
0.586158
ace81cbd3f57246830b52f96d82b3ca6b4aa75dc
219,158
py
Python
core/controllers/acl_decorators_test.py
YBCS/oppia
f74b606e8511cd4296b3c99aad37e53b66cca196
[ "Apache-2.0" ]
null
null
null
core/controllers/acl_decorators_test.py
YBCS/oppia
f74b606e8511cd4296b3c99aad37e53b66cca196
[ "Apache-2.0" ]
4
2022-02-12T14:02:05.000Z
2022-03-27T18:08:48.000Z
core/controllers/acl_decorators_test.py
YBCS/oppia
f74b606e8511cd4296b3c99aad37e53b66cca196
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 # # Copyright 2017 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for core.domain.acl_decorators.""" from __future__ import annotations import json from core import android_validation_constants from core import feconf from core.constants import constants from core.controllers import acl_decorators from core.controllers import base from core.domain import app_feedback_report_domain from core.domain import blog_services from core.domain import classifier_domain from core.domain import classifier_services from core.domain import config_services from core.domain import exp_domain from core.domain import exp_services from core.domain import feedback_services from core.domain import question_domain from core.domain import question_services from core.domain import rights_domain from core.domain import rights_manager from core.domain import skill_services from core.domain import state_domain from core.domain import story_services from core.domain import subtopic_page_domain from core.domain import subtopic_page_services from core.domain import suggestion_services from core.domain import topic_domain from core.domain import topic_fetchers from core.domain import topic_services from core.domain import user_services from core.tests import test_utils import webapp2 import webtest class PlayExplorationDecoratorTests(test_utils.GenericTestBase): """Tests for play exploration decorator.""" user_email = 'user@example.com' username = 'user' published_exp_id = 'exp_id_1' private_exp_id = 'exp_id_2' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'exploration_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_play_exploration def get(self, exploration_id): return self.render_json({'exploration_id': exploration_id}) def setUp(self): super(PlayExplorationDecoratorTests, self).setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.user_email, self.username) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.set_moderators([self.MODERATOR_USERNAME]) self.owner = user_services.get_user_actions_info(self.owner_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_play_exploration/<exploration_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.save_new_valid_exploration( self.published_exp_id, self.owner_id) self.save_new_valid_exploration( self.private_exp_id, self.owner_id) rights_manager.publish_exploration(self.owner, self.published_exp_id) def test_can_not_access_exploration_with_disabled_exploration_ids(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_play_exploration/%s' % (feconf.DISABLED_EXPLORATION_IDS[0]), expected_status_int=404) def test_guest_can_access_published_exploration(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_play_exploration/%s' % self.published_exp_id) self.assertEqual(response['exploration_id'], self.published_exp_id) def test_guest_cannot_access_private_exploration(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_play_exploration/%s' % self.private_exp_id, expected_status_int=404) def test_moderator_can_access_private_exploration(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_play_exploration/%s' % self.private_exp_id) self.assertEqual(response['exploration_id'], self.private_exp_id) self.logout() def test_owner_can_access_private_exploration(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_play_exploration/%s' % self.private_exp_id) self.assertEqual(response['exploration_id'], self.private_exp_id) self.logout() def test_logged_in_user_cannot_access_not_owned_exploration(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_play_exploration/%s' % self.private_exp_id, expected_status_int=404) self.logout() class PlayCollectionDecoratorTests(test_utils.GenericTestBase): """Tests for play collection decorator.""" user_email = 'user@example.com' username = 'user' published_exp_id = 'exp_id_1' private_exp_id = 'exp_id_2' published_col_id = 'col_id_1' private_col_id = 'col_id_2' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'collection_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_play_collection def get(self, collection_id): return self.render_json({'collection_id': collection_id}) def setUp(self): super(PlayCollectionDecoratorTests, self).setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.user_email, self.username) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.set_moderators([self.MODERATOR_USERNAME]) self.owner = user_services.get_user_actions_info(self.owner_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_play_collection/<collection_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.save_new_valid_exploration( self.published_exp_id, self.owner_id) self.save_new_valid_exploration( self.private_exp_id, self.owner_id) self.save_new_valid_collection( self.published_col_id, self.owner_id, exploration_id=self.published_col_id) self.save_new_valid_collection( self.private_col_id, self.owner_id, exploration_id=self.private_col_id) rights_manager.publish_exploration(self.owner, self.published_exp_id) rights_manager.publish_collection(self.owner, self.published_col_id) def test_guest_can_access_published_collection(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_play_collection/%s' % self.published_col_id) self.assertEqual(response['collection_id'], self.published_col_id) def test_guest_cannot_access_private_collection(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_play_collection/%s' % self.private_col_id, expected_status_int=404) def test_moderator_can_access_private_collection(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_play_collection/%s' % self.private_col_id) self.assertEqual(response['collection_id'], self.private_col_id) self.logout() def test_owner_can_access_private_collection(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_play_collection/%s' % self.private_col_id) self.assertEqual(response['collection_id'], self.private_col_id) self.logout() def test_logged_in_user_cannot_access_not_owned_private_collection(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_play_collection/%s' % self.private_col_id, expected_status_int=404) self.logout() def test_cannot_access_collection_with_invalid_collection_id(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_play_collection/invalid_collection_id', expected_status_int=404) self.logout() class EditCollectionDecoratorTests(test_utils.GenericTestBase): """Tests for can_edit_collection decorator.""" user_email = 'user@example.com' username = 'user' published_exp_id = 'exp_id_1' private_exp_id = 'exp_id_2' published_col_id = 'col_id_1' private_col_id = 'col_id_2' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'collection_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_edit_collection def get(self, collection_id): return self.render_json({'collection_id': collection_id}) def setUp(self): super(EditCollectionDecoratorTests, self).setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.user_email, self.username) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.set_moderators([self.MODERATOR_USERNAME]) self.set_collection_editors([self.OWNER_USERNAME]) self.owner = user_services.get_user_actions_info(self.owner_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_edit_collection/<collection_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.save_new_valid_exploration( self.published_exp_id, self.owner_id) self.save_new_valid_exploration( self.private_exp_id, self.owner_id) self.save_new_valid_collection( self.published_col_id, self.owner_id, exploration_id=self.published_col_id) self.save_new_valid_collection( self.private_col_id, self.owner_id, exploration_id=self.private_col_id) rights_manager.publish_exploration(self.owner, self.published_exp_id) rights_manager.publish_collection(self.owner, self.published_col_id) def test_can_not_edit_collection_with_invalid_collection_id(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_edit_collection/invalid_col_id', expected_status_int=404) self.logout() def test_guest_cannot_edit_collection_via_json_handler(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_edit_collection/%s' % self.published_col_id, expected_status_int=401) def test_guest_is_redirected_when_using_html_handler(self): with self.swap( self.MockHandler, 'GET_HANDLER_ERROR_RETURN_TYPE', feconf.HANDLER_TYPE_HTML): response = self.mock_testapp.get( '/mock_edit_collection/%s' % self.published_col_id, expect_errors=True) self.assertEqual(response.status_int, 302) def test_normal_user_cannot_edit_collection(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_edit_collection/%s' % self.private_col_id, expected_status_int=401) self.logout() def test_owner_can_edit_owned_collection(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_edit_collection/%s' % self.private_col_id) self.assertEqual(response['collection_id'], self.private_col_id) self.logout() def test_moderator_can_edit_private_collection(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_edit_collection/%s' % self.private_col_id) self.assertEqual(response['collection_id'], self.private_col_id) self.logout() def test_moderator_can_edit_public_collection(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_edit_collection/%s' % self.published_col_id) self.assertEqual(response['collection_id'], self.published_col_id) self.logout() def test_admin_can_edit_any_private_collection(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_edit_collection/%s' % self.private_col_id) self.assertEqual(response['collection_id'], self.private_col_id) self.logout() class ClassroomExistDecoratorTests(test_utils.GenericTestBase): """Tests for does_classroom_exist decorator""" class MockDataHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'classroom_url_fragment': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = { 'GET': {} } @acl_decorators.does_classroom_exist def get(self, _): self.render_json({'success': True}) class MockPageHandler(base.BaseHandler): URL_PATH_ARGS_SCHEMAS = { 'classroom_url_fragment': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = { 'GET': {} } @acl_decorators.does_classroom_exist def get(self, _): self.render_json('oppia-root.mainpage.html') def setUp(self): super(ClassroomExistDecoratorTests, self).setUp() self.signup( self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.user_id_admin = ( self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL)) self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL) config_services.set_property( self.user_id_admin, 'classroom_pages_data', [{ 'name': 'math', 'url_fragment': 'math', 'topic_ids': [], 'course_details': '', 'topic_list_intro': '' }]) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_classroom_data/<classroom_url_fragment>', self.MockDataHandler), webapp2.Route( '/mock_classroom_page/<classroom_url_fragment>', self.MockPageHandler )], debug=feconf.DEBUG )) def test_any_user_can_access_a_valid_classroom(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_classroom_data/math', expected_status_int=200) def test_redirects_user_to_default_classroom_if_given_not_available( self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_classroom_data/invalid', expected_status_int=404) def test_raises_error_if_return_type_is_not_json(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_html_response( '/mock_classroom_page/invalid', expected_status_int=500) class CreateExplorationDecoratorTests(test_utils.GenericTestBase): """Tests for can_create_exploration decorator.""" username = 'banneduser' user_email = 'user@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_create_exploration def get(self): self.render_json({'success': True}) def setUp(self): super(CreateExplorationDecoratorTests, self).setUp() self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.signup(self.user_email, self.username) self.mark_user_banned(self.username) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock/create', self.MockHandler)], debug=feconf.DEBUG, )) def test_banned_user_cannot_create_exploration(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/create', expected_status_int=401) self.logout() def test_normal_user_can_create_exploration(self): self.login(self.EDITOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/create') self.assertEqual(response['success'], True) self.logout() def test_guest_cannot_create_exploration_via_json_handler(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/create', expected_status_int=401) def test_guest_is_redirected_when_using_html_handler(self): with self.swap( self.MockHandler, 'GET_HANDLER_ERROR_RETURN_TYPE', feconf.HANDLER_TYPE_HTML): response = self.mock_testapp.get('/mock/create', expect_errors=True) self.assertEqual(response.status_int, 302) class CreateCollectionDecoratorTests(test_utils.GenericTestBase): """Tests for can_create_collection decorator.""" username = 'collectioneditor' user_email = 'user@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_create_collection def get(self): self.render_json({'success': True}) def setUp(self): super(CreateCollectionDecoratorTests, self).setUp() self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.signup(self.user_email, self.username) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_collection_editors([self.username]) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock/create', self.MockHandler)], debug=feconf.DEBUG, )) def test_guest_cannot_create_collection_via_json_handler(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/create', expected_status_int=401) def test_guest_is_redirected_when_using_html_handler(self): with self.swap( self.MockHandler, 'GET_HANDLER_ERROR_RETURN_TYPE', feconf.HANDLER_TYPE_HTML): response = self.mock_testapp.get('/mock/create', expect_errors=True) self.assertEqual(response.status_int, 302) def test_normal_user_cannot_create_collection(self): self.login(self.EDITOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/create', expected_status_int=401) self.logout() def test_collection_editor_can_create_collection(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/create') self.assertEqual(response['success'], True) self.logout() class AccessCreatorDashboardTests(test_utils.GenericTestBase): """Tests for can_access_creator_dashboard decorator.""" username = 'banneduser' user_email = 'user@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_access_creator_dashboard def get(self): self.render_json({'success': True}) def setUp(self): super(AccessCreatorDashboardTests, self).setUp() self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.signup(self.user_email, self.username) self.mark_user_banned(self.username) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock/access', self.MockHandler)], debug=feconf.DEBUG, )) def test_banned_user_cannot_access_editor_dashboard(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/access', expected_status_int=401) self.logout() def test_normal_user_can_access_editor_dashboard(self): self.login(self.EDITOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/access') self.assertEqual(response['success'], True) class CommentOnFeedbackThreadTests(test_utils.GenericTestBase): """Tests for can_comment_on_feedback_thread decorator.""" published_exp_id = 'exp_0' private_exp_id = 'exp_1' viewer_username = 'viewer' viewer_email = 'viewer@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'thread_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_comment_on_feedback_thread def get(self, thread_id): self.render_json({'thread_id': thread_id}) def setUp(self): super(CommentOnFeedbackThreadTests, self).setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.viewer_email, self.viewer_username) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.set_moderators([self.MODERATOR_USERNAME]) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.owner = user_services.get_user_actions_info(self.owner_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_comment_on_feedback_thread/<thread_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.save_new_valid_exploration( self.published_exp_id, self.owner_id) self.save_new_valid_exploration( self.private_exp_id, self.owner_id) rights_manager.publish_exploration(self.owner, self.published_exp_id) def test_can_not_comment_on_feedback_threads_with_disabled_exp_id(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_comment_on_feedback_thread/exploration.%s.thread1' % feconf.DISABLED_EXPLORATION_IDS[0], expected_status_int=404) self.logout() def test_viewer_cannot_comment_on_feedback_for_private_exploration(self): self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_comment_on_feedback_thread/exploration.%s.thread1' % self.private_exp_id, expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to comment on ' 'exploration feedback.') self.logout() def test_can_not_comment_on_feedback_threads_with_invalid_thread_id(self): self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_comment_on_feedback_thread/invalid_thread_id', expected_status_int=400) self.assertEqual(response['error'], 'Not a valid thread id.') self.logout() def test_guest_cannot_comment_on_feedback_threads_via_json_handler(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_comment_on_feedback_thread/exploration.%s.thread1' % (self.private_exp_id), expected_status_int=401) self.get_json( '/mock_comment_on_feedback_thread/exploration.%s.thread1' % (self.published_exp_id), expected_status_int=401) def test_guest_is_redirected_when_using_html_handler(self): with self.swap( self.MockHandler, 'GET_HANDLER_ERROR_RETURN_TYPE', feconf.HANDLER_TYPE_HTML): response = self.mock_testapp.get( '/mock_comment_on_feedback_thread/exploration.%s.thread1' % (self.private_exp_id), expect_errors=True) self.assertEqual(response.status_int, 302) response = self.mock_testapp.get( '/mock_comment_on_feedback_thread/exploration.%s.thread1' % (self.published_exp_id), expect_errors=True) self.assertEqual(response.status_int, 302) def test_owner_can_comment_on_feedback_for_private_exploration(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_comment_on_feedback_thread/exploration.%s.thread1' % (self.private_exp_id)) self.logout() def test_moderator_can_comment_on_feeback_for_public_exploration(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_comment_on_feedback_thread/exploration.%s.thread1' % (self.published_exp_id)) self.logout() def test_moderator_can_comment_on_feeback_for_private_exploration(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_comment_on_feedback_thread/exploration.%s.thread1' % (self.private_exp_id)) self.logout() class CreateFeedbackThreadTests(test_utils.GenericTestBase): """Tests for can_create_feedback_thread decorator.""" published_exp_id = 'exp_0' private_exp_id = 'exp_1' viewer_username = 'viewer' viewer_email = 'viewer@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'exploration_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_create_feedback_thread def get(self, exploration_id): self.render_json({'exploration_id': exploration_id}) def setUp(self): super(CreateFeedbackThreadTests, self).setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.viewer_email, self.viewer_username) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.set_moderators([self.MODERATOR_USERNAME]) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.owner = user_services.get_user_actions_info(self.owner_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_create_feedback_thread/<exploration_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.save_new_valid_exploration( self.published_exp_id, self.owner_id) self.save_new_valid_exploration( self.private_exp_id, self.owner_id) rights_manager.publish_exploration(self.owner, self.published_exp_id) def test_can_not_create_feedback_threads_with_disabled_exp_id(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_create_feedback_thread/%s' % (feconf.DISABLED_EXPLORATION_IDS[0]), expected_status_int=404) def test_viewer_cannot_create_feedback_for_private_exploration(self): self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_create_feedback_thread/%s' % self.private_exp_id, expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to create ' 'exploration feedback.') self.logout() def test_guest_can_create_feedback_threads_for_public_exploration(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_create_feedback_thread/%s' % self.published_exp_id) def test_owner_cannot_create_feedback_for_private_exploration(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_create_feedback_thread/%s' % self.private_exp_id) self.logout() def test_moderator_can_create_feeback_for_public_exploration(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_create_feedback_thread/%s' % self.published_exp_id) self.logout() def test_moderator_can_create_feeback_for_private_exploration(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_create_feedback_thread/%s' % self.private_exp_id) self.logout() class ViewFeedbackThreadTests(test_utils.GenericTestBase): """Tests for can_view_feedback_thread decorator.""" published_exp_id = 'exp_0' private_exp_id = 'exp_1' viewer_username = 'viewer' viewer_email = 'viewer@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'thread_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_view_feedback_thread def get(self, thread_id): self.render_json({'thread_id': thread_id}) def setUp(self): super(ViewFeedbackThreadTests, self).setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.viewer_email, self.viewer_username) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.set_moderators([self.MODERATOR_USERNAME]) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.owner = user_services.get_user_actions_info(self.owner_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_view_feedback_thread/<thread_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.save_new_valid_exploration( self.published_exp_id, self.owner_id) self.save_new_valid_exploration( self.private_exp_id, self.owner_id) self.public_exp_thread_id = feedback_services.create_thread( feconf.ENTITY_TYPE_EXPLORATION, self.published_exp_id, self.owner_id, 'public exp', 'some text') self.private_exp_thread_id = feedback_services.create_thread( feconf.ENTITY_TYPE_EXPLORATION, self.private_exp_id, self.owner_id, 'private exp', 'some text') self.disabled_exp_thread_id = feedback_services.create_thread( feconf.ENTITY_TYPE_EXPLORATION, feconf.DISABLED_EXPLORATION_IDS[0], self.owner_id, 'disabled exp', 'some text') rights_manager.publish_exploration(self.owner, self.published_exp_id) def test_can_not_view_feedback_threads_with_disabled_exp_id(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_view_feedback_thread/%s' % self.disabled_exp_thread_id, expected_status_int=404) def test_viewer_cannot_view_feedback_for_private_exploration(self): self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_view_feedback_thread/%s' % self.private_exp_thread_id, expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to view ' 'exploration feedback.') self.logout() def test_viewer_cannot_view_feedback_threads_with_invalid_thread_id(self): self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_view_feedback_thread/invalid_thread_id', expected_status_int=400) self.assertEqual(response['error'], 'Not a valid thread id.') self.logout() def test_viewer_can_view_non_exploration_related_feedback(self): self.login(self.viewer_email) skill_thread_id = feedback_services.create_thread( 'skill', 'skillid1', None, 'unused subject', 'unused text') with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_view_feedback_thread/%s' % skill_thread_id) def test_guest_can_view_feedback_threads_for_public_exploration(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_view_feedback_thread/%s' % self.public_exp_thread_id) def test_owner_cannot_view_feedback_for_private_exploration(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_view_feedback_thread/%s' % self.private_exp_thread_id) self.logout() def test_moderator_can_view_feeback_for_public_exploration(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_view_feedback_thread/%s' % self.public_exp_thread_id) self.logout() def test_moderator_can_view_feeback_for_private_exploration(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_view_feedback_thread/%s' % self.private_exp_thread_id) self.logout() class ManageEmailDashboardTests(test_utils.GenericTestBase): """Tests for can_manage_email_dashboard decorator.""" query_id = 'query_id' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'query_id': { 'schema': { 'type': 'basestring' }, 'default_value': None } } HANDLER_ARGS_SCHEMAS = { 'GET': {}, 'PUT': {} } @acl_decorators.can_manage_email_dashboard def get(self): return self.render_json({'success': 1}) @acl_decorators.can_manage_email_dashboard def put(self, query_id): return self.render_json({'query_id': query_id}) def setUp(self): super(ManageEmailDashboardTests, self).setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.set_moderators([self.MODERATOR_USERNAME]) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [ webapp2.Route('/mock/', self.MockHandler), webapp2.Route('/mock/<query_id>', self.MockHandler) ], debug=feconf.DEBUG, )) def test_moderator_cannot_access_email_dashboard(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/', expected_status_int=401) self.logout() def test_super_admin_can_access_email_dashboard(self): self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/') self.assertEqual(response['success'], 1) with self.swap(self, 'testapp', self.mock_testapp): response = self.mock_testapp.put('/mock/%s' % self.query_id) self.assertEqual(response.status_int, 200) self.logout() class RateExplorationTests(test_utils.GenericTestBase): """Tests for can_rate_exploration decorator.""" username = 'user' user_email = 'user@example.com' exp_id = 'exp_id' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'exploration_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_rate_exploration def get(self, exploration_id): self.render_json({'exploration_id': exploration_id}) def setUp(self): super(RateExplorationTests, self).setUp() self.signup(self.user_email, self.username) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock/<exploration_id>', self.MockHandler)], debug=feconf.DEBUG, )) def test_guest_cannot_give_rating(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/%s' % self.exp_id, expected_status_int=401) def test_normal_user_can_give_rating(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.exp_id) self.assertEqual(response['exploration_id'], self.exp_id) self.logout() class AccessModeratorPageTests(test_utils.GenericTestBase): username = 'user' user_email = 'user@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_access_moderator_page def get(self): return self.render_json({'success': 1}) def setUp(self): super(AccessModeratorPageTests, self).setUp() self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.user_email, self.username) self.set_moderators([self.MODERATOR_USERNAME]) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock/', self.MockHandler)], debug=feconf.DEBUG, )) def test_normal_user_cannot_access_moderator_page(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/', expected_status_int=401) self.logout() def test_moderator_can_access_moderator_page(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/') self.assertEqual(response['success'], 1) self.logout() class FlagExplorationTests(test_utils.GenericTestBase): """Tests for can_flag_exploration decorator.""" username = 'user' user_email = 'user@example.com' exp_id = 'exp_id' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'exploration_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_flag_exploration def get(self, exploration_id): self.render_json({'exploration_id': exploration_id}) def setUp(self): super(FlagExplorationTests, self).setUp() self.signup(self.user_email, self.username) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock/<exploration_id>', self.MockHandler)], debug=feconf.DEBUG, )) def test_guest_cannot_flag_exploration(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/%s' % self.exp_id, expected_status_int=401) def test_normal_user_can_flag_exploration(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.exp_id) self.assertEqual(response['exploration_id'], self.exp_id) self.logout() class SubscriptionToUsersTests(test_utils.GenericTestBase): """Tests for can_subscribe_to_users decorator.""" username = 'user' user_email = 'user@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_subscribe_to_users def get(self): self.render_json({'success': True}) def setUp(self): super(SubscriptionToUsersTests, self).setUp() self.signup(self.user_email, self.username) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock/', self.MockHandler)], debug=feconf.DEBUG, )) def test_guest_cannot_subscribe_to_users(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/', expected_status_int=401) def test_normal_user_can_subscribe_to_users(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/') self.assertEqual(response['success'], True) self.logout() class SendModeratorEmailsTests(test_utils.GenericTestBase): username = 'user' user_email = 'user@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_send_moderator_emails def get(self): return self.render_json({'success': 1}) def setUp(self): super(SendModeratorEmailsTests, self).setUp() self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.user_email, self.username) self.set_moderators([self.MODERATOR_USERNAME]) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock/', self.MockHandler)], debug=feconf.DEBUG, )) def test_normal_user_cannot_send_moderator_emails(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/', expected_status_int=401) self.logout() def test_moderator_can_send_moderator_emails(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/') self.assertEqual(response['success'], 1) self.logout() class CanAccessReleaseCoordinatorPageDecoratorTests(test_utils.GenericTestBase): username = 'user' user_email = 'user@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_access_release_coordinator_page def get(self): return self.render_json({'success': 1}) def setUp(self): super(CanAccessReleaseCoordinatorPageDecoratorTests, self).setUp() self.signup(feconf.SYSTEM_EMAIL_ADDRESS, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.user_email, self.username) self.signup( self.RELEASE_COORDINATOR_EMAIL, self.RELEASE_COORDINATOR_USERNAME) self.add_user_role( self.RELEASE_COORDINATOR_USERNAME, feconf.ROLE_ID_RELEASE_COORDINATOR) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/release-coordinator', self.MockHandler)], debug=feconf.DEBUG, )) def test_normal_user_cannot_access_release_coordinator_page(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/release-coordinator', expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to access release coordinator page.') self.logout() def test_guest_user_cannot_access_release_coordinator_page(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/release-coordinator', expected_status_int=401) self.assertEqual( response['error'], 'You must be logged in to access this resource.') self.logout() def test_super_admin_cannot_access_release_coordinator_page(self): self.login(feconf.SYSTEM_EMAIL_ADDRESS) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/release-coordinator', expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to access release coordinator page.') self.logout() def test_release_coordinator_can_access_release_coordinator_page(self): self.login(self.RELEASE_COORDINATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/release-coordinator') self.assertEqual(response['success'], 1) self.logout() class CanAccessBlogAdminPageDecoratorTests(test_utils.GenericTestBase): username = 'user' user_email = 'user@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'GET': {}, } @acl_decorators.can_access_blog_admin_page def get(self): return self.render_json({'success': 1}) def setUp(self): super(CanAccessBlogAdminPageDecoratorTests, self).setUp() self.signup(self.user_email, self.username) self.signup(self.BLOG_EDITOR_EMAIL, self.BLOG_EDITOR_USERNAME) self.signup(self.BLOG_ADMIN_EMAIL, self.BLOG_ADMIN_USERNAME) self.add_user_role( self.BLOG_ADMIN_USERNAME, feconf.ROLE_ID_BLOG_ADMIN) self.add_user_role( self.BLOG_EDITOR_USERNAME, feconf.ROLE_ID_BLOG_POST_EDITOR) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/blog-admin', self.MockHandler)], debug=feconf.DEBUG, )) def test_normal_user_cannot_access_blog_admin_page(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/blog-admin', expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to access blog admin page.') self.logout() def test_guest_user_cannot_access_blog_admin_page(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/blog-admin', expected_status_int=401) self.assertEqual( response['error'], 'You must be logged in to access this resource.') self.logout() def test_blog_post_editor_cannot_access_blog_admin_page(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/blog-admin', expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to access blog admin page.') self.logout() def test_blog_admin_can_access_blog_admin_page(self): self.login(self.BLOG_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/blog-admin') self.assertEqual(response['success'], 1) self.logout() class CanManageBlogPostEditorsDecoratorTests(test_utils.GenericTestBase): username = 'user' user_email = 'user@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'GET': {}, } @acl_decorators.can_manage_blog_post_editors def get(self): return self.render_json({'success': 1}) def setUp(self): super(CanManageBlogPostEditorsDecoratorTests, self).setUp() self.signup(self.user_email, self.username) self.signup(self.BLOG_ADMIN_EMAIL, self.BLOG_ADMIN_USERNAME) self.signup(self.BLOG_EDITOR_EMAIL, self.BLOG_EDITOR_USERNAME) self.add_user_role( self.BLOG_ADMIN_USERNAME, feconf.ROLE_ID_BLOG_ADMIN) self.add_user_role( self.BLOG_EDITOR_USERNAME, feconf.ROLE_ID_BLOG_POST_EDITOR) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/blogadminrolehandler', self.MockHandler)], debug=feconf.DEBUG, )) def test_normal_user_cannot_manage_blog_post_editors(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/blogadminrolehandler', expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to add or remove blog post editors.') self.logout() def test_guest_user_cannot_manage_blog_post_editors(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/blogadminrolehandler', expected_status_int=401) self.assertEqual( response['error'], 'You must be logged in to access this resource.') self.logout() def test_blog_post_editors_cannot_manage_blog_post_editors(self): self.login(self.BLOG_EDITOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/blogadminrolehandler', expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to add or remove blog post editors.') self.logout() def test_blog_admin_can_manage_blog_editors(self): self.login(self.BLOG_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/blogadminrolehandler') self.assertEqual(response['success'], 1) self.logout() class CanAccessBlogDashboardDecoratorTests(test_utils.GenericTestBase): username = 'user' user_email = 'user@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'GET': {}, } @acl_decorators.can_access_blog_dashboard def get(self): return self.render_json({'success': 1}) def setUp(self): super(CanAccessBlogDashboardDecoratorTests, self).setUp() self.signup(self.user_email, self.username) self.signup(self.BLOG_EDITOR_EMAIL, self.BLOG_EDITOR_USERNAME) self.signup(self.BLOG_ADMIN_EMAIL, self.BLOG_ADMIN_USERNAME) self.add_user_role( self.BLOG_ADMIN_USERNAME, feconf.ROLE_ID_BLOG_ADMIN) self.add_user_role( self.BLOG_EDITOR_USERNAME, feconf.ROLE_ID_BLOG_POST_EDITOR) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/blog-dashboard', self.MockHandler)], debug=feconf.DEBUG, )) def test_normal_user_cannot_access_blog_dashboard(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/blog-dashboard', expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to access blog dashboard page.') self.logout() def test_guest_user_cannot_access_blog_dashboard(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/blog-dashboard', expected_status_int=401) self.assertEqual( response['error'], 'You must be logged in to access this resource.') self.logout() def test_blog_editors_can_access_blog_dashboard(self): self.login(self.BLOG_EDITOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/blog-dashboard') self.assertEqual(response['success'], 1) self.logout() def test_blog_admins_can_access_blog_dashboard(self): self.login(self.BLOG_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/blog-dashboard') self.assertEqual(response['success'], 1) self.logout() class CanDeleteBlogPostTests(test_utils.GenericTestBase): """Tests for can_delete_blog_post decorator.""" username = 'userone' user_email = 'user@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'blog_post_id': { 'schema': { 'type': 'unicode' } } } HANDLER_ARGS_SCHEMAS = { 'GET': {}, } @acl_decorators.can_delete_blog_post def get(self, blog_post_id): self.render_json({'blog_id': blog_post_id}) def setUp(self): super(CanDeleteBlogPostTests, self).setUp() self.signup(self.user_email, self.username) self.signup(self.BLOG_EDITOR_EMAIL, self.BLOG_EDITOR_USERNAME) self.signup(self.BLOG_ADMIN_EMAIL, self.BLOG_ADMIN_USERNAME) self.add_user_role( self.BLOG_EDITOR_USERNAME, feconf.ROLE_ID_BLOG_POST_EDITOR) self.add_user_role( self.BLOG_ADMIN_USERNAME, feconf.ROLE_ID_BLOG_ADMIN) self.add_user_role(self.username, feconf.ROLE_ID_BLOG_POST_EDITOR) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_delete_blog_post/<blog_post_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.user_id = self.get_user_id_from_email(self.user_email) self.blog_editor_id = ( self.get_user_id_from_email(self.BLOG_EDITOR_EMAIL)) blog_post = blog_services.create_new_blog_post(self.blog_editor_id) self.blog_post_id = blog_post.id def test_guest_can_not_delete_blog_post(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_delete_blog_post/%s' % self.blog_post_id, expected_status_int=401) self.assertEqual( response['error'], 'You must be logged in to access this resource.') def test_blog_editor_can_delete_owned_blog_post(self): self.login(self.BLOG_EDITOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_delete_blog_post/%s' % self.blog_post_id) self.assertEqual(response['blog_id'], self.blog_post_id) self.logout() def test_blog_admin_can_delete_any_blog_post(self): self.login(self.BLOG_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_delete_blog_post/%s' % self.blog_post_id) self.assertEqual(response['blog_id'], self.blog_post_id) self.logout() def test_blog_editor_cannot_delete_not_owned_blog_post(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_delete_blog_post/%s' % self.blog_post_id, expected_status_int=401) self.assertEqual( response['error'], 'User %s does not have permissions to delete blog post %s' % (self.user_id, self.blog_post_id)) self.logout() class CanEditBlogPostTests(test_utils.GenericTestBase): """Tests for can_edit_blog_post decorator.""" username = 'userone' user_email = 'user@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'blog_post_id': { 'schema': { 'type': 'unicode' } } } HANDLER_ARGS_SCHEMAS = { 'GET': {}, } @acl_decorators.can_edit_blog_post def get(self, blog_post_id): self.render_json({'blog_id': blog_post_id}) def setUp(self): super(CanEditBlogPostTests, self).setUp() self.signup( self.BLOG_EDITOR_EMAIL, self.BLOG_EDITOR_USERNAME) self.signup(self.BLOG_ADMIN_EMAIL, self.BLOG_ADMIN_USERNAME) self.signup(self.user_email, self.username) self.add_user_role( self.BLOG_EDITOR_USERNAME, feconf.ROLE_ID_BLOG_POST_EDITOR) self.add_user_role( self.BLOG_ADMIN_USERNAME, feconf.ROLE_ID_BLOG_ADMIN) self.add_user_role(self.username, feconf.ROLE_ID_BLOG_POST_EDITOR) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_edit_blog_post/<blog_post_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.blog_editor_id = ( self.get_user_id_from_email(self.BLOG_EDITOR_EMAIL)) self.user_id = self.get_user_id_from_email(self.user_email) blog_post = blog_services.create_new_blog_post(self.blog_editor_id) self.blog_post_id = blog_post.id def test_guest_can_not_edit_blog_post(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_edit_blog_post/%s' % self.blog_post_id, expected_status_int=401) self.assertEqual( response['error'], 'You must be logged in to access this resource.') def test_blog_editor_can_edit_owned_blog_post(self): self.login(self.BLOG_EDITOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_edit_blog_post/%s' % self.blog_post_id) self.assertEqual(response['blog_id'], self.blog_post_id) self.logout() def test_blog_admin_can_edit_any_blog_post(self): self.login(self.BLOG_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_edit_blog_post/%s' % self.blog_post_id) self.assertEqual(response['blog_id'], self.blog_post_id) self.logout() def test_blog_editor_cannot_edit_not_owned_blog_post(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_edit_blog_post/%s' % self.blog_post_id, expected_status_int=401) self.assertEqual( response['error'], 'User %s does not have permissions to edit blog post %s' % (self.user_id, self.blog_post_id)) self.logout() class CanRunAnyJobDecoratorTests(test_utils.GenericTestBase): username = 'user' user_email = 'user@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_run_any_job def get(self): return self.render_json({'success': 1}) def setUp(self): super(CanRunAnyJobDecoratorTests, self).setUp() self.signup(feconf.SYSTEM_EMAIL_ADDRESS, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.user_email, self.username) self.signup( self.RELEASE_COORDINATOR_EMAIL, self.RELEASE_COORDINATOR_USERNAME) self.add_user_role( self.RELEASE_COORDINATOR_USERNAME, feconf.ROLE_ID_RELEASE_COORDINATOR) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/run-anny-job', self.MockHandler)], debug=feconf.DEBUG, )) def test_normal_user_cannot_access_release_coordinator_page(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/run-anny-job', expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to run jobs.') self.logout() def test_guest_user_cannot_access_release_coordinator_page(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/run-anny-job', expected_status_int=401) self.assertEqual( response['error'], 'You must be logged in to access this resource.') self.logout() def test_super_admin_cannot_access_release_coordinator_page(self): self.login(feconf.SYSTEM_EMAIL_ADDRESS) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/run-anny-job', expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to run jobs.') self.logout() def test_release_coordinator_can_run_any_job(self): self.login(self.RELEASE_COORDINATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/run-anny-job') self.assertEqual(response['success'], 1) self.logout() class CanManageMemcacheDecoratorTests(test_utils.GenericTestBase): username = 'user' user_email = 'user@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_manage_memcache def get(self): return self.render_json({'success': 1}) def setUp(self): super(CanManageMemcacheDecoratorTests, self).setUp() self.signup(feconf.SYSTEM_EMAIL_ADDRESS, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.user_email, self.username) self.signup( self.RELEASE_COORDINATOR_EMAIL, self.RELEASE_COORDINATOR_USERNAME) self.add_user_role( self.RELEASE_COORDINATOR_USERNAME, feconf.ROLE_ID_RELEASE_COORDINATOR) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/manage-memcache', self.MockHandler)], debug=feconf.DEBUG, )) def test_normal_user_cannot_access_release_coordinator_page(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/manage-memcache', expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to manage memcache.') self.logout() def test_guest_user_cannot_access_release_coordinator_page(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/manage-memcache', expected_status_int=401) self.assertEqual( response['error'], 'You must be logged in to access this resource.') self.logout() def test_super_admin_cannot_access_release_coordinator_page(self): self.login(feconf.SYSTEM_EMAIL_ADDRESS) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/manage-memcache', expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to manage memcache.') self.logout() def test_release_coordinator_can_run_any_job(self): self.login(self.RELEASE_COORDINATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/manage-memcache') self.assertEqual(response['success'], 1) self.logout() class CanManageContributorsRoleDecoratorTests(test_utils.GenericTestBase): username = 'user' user_email = 'user@example.com' QUESTION_ADMIN_EMAIL = 'questionExpert@app.com' QUESTION_ADMIN_USERNAME = 'questionExpert' TRANSLATION_ADMIN_EMAIL = 'translatorExpert@app.com' TRANSLATION_ADMIN_USERNAME = 'translationExpert' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'category': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = { 'GET': {} } @acl_decorators.can_manage_contributors_role def get(self, unused_category): return self.render_json({'success': 1}) def setUp(self): super(CanManageContributorsRoleDecoratorTests, self).setUp() self.signup(self.user_email, self.username) self.signup( self.TRANSLATION_ADMIN_EMAIL, self.TRANSLATION_ADMIN_USERNAME) self.signup(self.QUESTION_ADMIN_EMAIL, self.QUESTION_ADMIN_USERNAME) self.add_user_role( self.TRANSLATION_ADMIN_USERNAME, feconf.ROLE_ID_TRANSLATION_ADMIN) self.add_user_role( self.QUESTION_ADMIN_USERNAME, feconf.ROLE_ID_QUESTION_ADMIN) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication([ webapp2.Route( '/can_manage_contributors_role/<category>', self.MockHandler) ], debug=feconf.DEBUG)) def test_normal_user_cannot_access_release_coordinator_page(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/can_manage_contributors_role/translation', expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to modify contributor\'s role.') self.logout() def test_guest_user_cannot_manage_contributors_role(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/can_manage_contributors_role/translation', expected_status_int=401) self.assertEqual( response['error'], 'You must be logged in to access this resource.') self.logout() def test_translation_admin_can_manage_translation_role(self): self.login(self.TRANSLATION_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/can_manage_contributors_role/translation') self.assertEqual(response['success'], 1) self.logout() def test_translation_admin_cannot_manage_question_role(self): self.login(self.TRANSLATION_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/can_manage_contributors_role/question', expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to modify contributor\'s role.') self.logout() def test_question_admin_can_manage_question_role(self): self.login(self.QUESTION_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/can_manage_contributors_role/question') self.assertEqual(response['success'], 1) self.logout() def test_question_admin_cannot_manage_translation_role(self): self.login(self.QUESTION_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/can_manage_contributors_role/translation', expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to modify contributor\'s role.') self.logout() def test_invalid_category_raise_error(self): self.login(self.QUESTION_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/can_manage_contributors_role/invalid', expected_status_int=400) self.assertEqual(response['error'], 'Invalid category: invalid') self.logout() class DeleteAnyUserTests(test_utils.GenericTestBase): username = 'user' user_email = 'user@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_delete_any_user def get(self): return self.render_json({'success': 1}) def setUp(self): super(DeleteAnyUserTests, self).setUp() self.signup(feconf.SYSTEM_EMAIL_ADDRESS, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.user_email, self.username) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock/', self.MockHandler)], debug=feconf.DEBUG, )) def test_normal_user_cannot_delete_any_user(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/', expected_status_int=401) self.logout() def test_not_logged_user_cannot_delete_any_user(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/', expected_status_int=401) def test_primary_admin_can_delete_any_user(self): self.login(feconf.SYSTEM_EMAIL_ADDRESS) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/') self.assertEqual(response['success'], 1) self.logout() class VoiceoverExplorationTests(test_utils.GenericTestBase): """Tests for can_voiceover_exploration decorator.""" role = rights_domain.ROLE_VOICE_ARTIST username = 'user' user_email = 'user@example.com' banned_username = 'banneduser' banned_user_email = 'banneduser@example.com' published_exp_id_1 = 'exp_1' published_exp_id_2 = 'exp_2' private_exp_id_1 = 'exp_3' private_exp_id_2 = 'exp_4' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'exploration_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_voiceover_exploration def get(self, exploration_id): self.render_json({'exploration_id': exploration_id}) def setUp(self): super(VoiceoverExplorationTests, self).setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.user_email, self.username) self.signup(self.banned_user_email, self.banned_username) self.signup(self.VOICE_ARTIST_EMAIL, self.VOICE_ARTIST_USERNAME) self.signup(self.VOICEOVER_ADMIN_EMAIL, self.VOICEOVER_ADMIN_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.voice_artist_id = self.get_user_id_from_email( self.VOICE_ARTIST_EMAIL) self.voiceover_admin_id = self.get_user_id_from_email( self.VOICEOVER_ADMIN_EMAIL) self.set_moderators([self.MODERATOR_USERNAME]) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.mark_user_banned(self.banned_username) self.owner = user_services.get_user_actions_info(self.owner_id) self.add_user_role( self.VOICEOVER_ADMIN_USERNAME, feconf.ROLE_ID_VOICEOVER_ADMIN) self.voiceover_admin = user_services.get_user_actions_info( self.voiceover_admin_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock/<exploration_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.save_new_valid_exploration( self.published_exp_id_1, self.owner_id) self.save_new_valid_exploration( self.published_exp_id_2, self.owner_id) self.save_new_valid_exploration( self.private_exp_id_1, self.owner_id) self.save_new_valid_exploration( self.private_exp_id_2, self.owner_id) rights_manager.publish_exploration(self.owner, self.published_exp_id_1) rights_manager.publish_exploration(self.owner, self.published_exp_id_2) rights_manager.assign_role_for_exploration( self.voiceover_admin, self.published_exp_id_1, self.voice_artist_id, self.role) def test_banned_user_cannot_voiceover_exploration(self): self.login(self.banned_user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/%s' % self.private_exp_id_1, expected_status_int=401) self.logout() def test_owner_can_voiceover_exploration(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.private_exp_id_1) self.assertEqual(response['exploration_id'], self.private_exp_id_1) self.logout() def test_moderator_can_voiceover_public_exploration(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.published_exp_id_1) self.assertEqual(response['exploration_id'], self.published_exp_id_1) self.logout() def test_moderator_can_voiceover_private_exploration(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.private_exp_id_1) self.assertEqual(response['exploration_id'], self.private_exp_id_1) self.logout() def test_admin_can_voiceover_private_exploration(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.private_exp_id_1) self.assertEqual(response['exploration_id'], self.private_exp_id_1) self.logout() def test_voice_artist_can_only_voiceover_assigned_public_exploration(self): self.login(self.VOICE_ARTIST_EMAIL) # Checking voice artist can voiceover assigned public exploration. with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.published_exp_id_1) self.assertEqual(response['exploration_id'], self.published_exp_id_1) # Checking voice artist cannot voiceover public exploration which he/she # is not assigned for. with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/%s' % self.published_exp_id_2, expected_status_int=401) self.logout() def test_user_without_voice_artist_role_of_exploration_cannot_voiceover_public_exploration(self): # pylint: disable=line-too-long self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/%s' % self.published_exp_id_1, expected_status_int=401) self.logout() def test_user_without_voice_artist_role_of_exploration_cannot_voiceover_private_exploration(self): # pylint: disable=line-too-long self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/%s' % self.private_exp_id_1, expected_status_int=401) self.logout() class VoiceArtistManagementTests(test_utils.GenericTestBase): role = rights_domain.ROLE_VOICE_ARTIST username = 'user' user_email = 'user@example.com' banned_username = 'banneduser' banned_user_email = 'banneduser@example.com' published_exp_id_1 = 'exp_1' published_exp_id_2 = 'exp_2' private_exp_id_1 = 'exp_3' private_exp_id_2 = 'exp_4' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'entity_type': { 'schema': { 'type': 'basestring' } }, 'entity_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'POST': {}} @acl_decorators.can_manage_voice_artist def post(self, entity_type, entity_id): self.render_json({ 'entity_type': entity_type, 'entity_id': entity_id}) def setUp(self): super(VoiceArtistManagementTests, self).setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.VOICEOVER_ADMIN_EMAIL, self.VOICEOVER_ADMIN_USERNAME) self.signup(self.user_email, self.username) self.signup(self.banned_user_email, self.banned_username) self.signup(self.VOICE_ARTIST_EMAIL, self.VOICE_ARTIST_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.voiceover_admin_id = self.get_user_id_from_email( self.VOICEOVER_ADMIN_EMAIL) self.voice_artist_id = self.get_user_id_from_email( self.VOICE_ARTIST_EMAIL) self.set_moderators([self.MODERATOR_USERNAME]) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.mark_user_banned(self.banned_username) user_services.add_user_role( self.voiceover_admin_id, feconf.ROLE_ID_VOICEOVER_ADMIN) self.owner = user_services.get_user_actions_info(self.owner_id) self.voiceover_admin = user_services.get_user_actions_info( self.voiceover_admin_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock/<entity_type>/<entity_id>', self.MockHandler)], debug=feconf.DEBUG,)) self.save_new_valid_exploration( self.published_exp_id_1, self.owner_id) self.save_new_valid_exploration( self.published_exp_id_2, self.owner_id) self.save_new_valid_exploration( self.private_exp_id_1, self.owner_id) self.save_new_valid_exploration( self.private_exp_id_2, self.owner_id) rights_manager.publish_exploration(self.owner, self.published_exp_id_1) rights_manager.publish_exploration(self.owner, self.published_exp_id_2) rights_manager.assign_role_for_exploration( self.voiceover_admin, self.published_exp_id_1, self.voice_artist_id, self.role) def test_voiceover_admin_can_manage_voice_artist_in_public_exp(self): self.login(self.VOICEOVER_ADMIN_EMAIL) csrf_token = self.get_new_csrf_token() with self.swap(self, 'testapp', self.mock_testapp): self.post_json( '/mock/exploration/%s' % self.published_exp_id_1, {}, csrf_token=csrf_token) self.logout() def test_assigning_voice_artist_for_unsupported_entity_type_raise_400(self): unsupported_entity_type = 'topic' self.login(self.VOICEOVER_ADMIN_EMAIL) csrf_token = self.get_new_csrf_token() with self.swap(self, 'testapp', self.mock_testapp): response = self.post_json( '/mock/%s/%s' % ( unsupported_entity_type, self.published_exp_id_1), {}, csrf_token=csrf_token, expected_status_int=400) self.assertEqual( response['error'], 'Unsupported entity_type: topic') self.logout() def test_voiceover_admin_cannot_assign_voice_artist_in_private_exp(self): self.login(self.VOICEOVER_ADMIN_EMAIL) csrf_token = self.get_new_csrf_token() with self.swap(self, 'testapp', self.mock_testapp): response = self.post_json( '/mock/exploration/%s' % self.private_exp_id_1, {}, csrf_token=csrf_token, expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to manage voice artists.') self.logout() def test_owner_cannot_assign_voice_artist_in_public_exp(self): self.login(self.OWNER_EMAIL) csrf_token = self.get_new_csrf_token() with self.swap(self, 'testapp', self.mock_testapp): response = self.post_json( '/mock/exploration/%s' % self.private_exp_id_1, {}, csrf_token=csrf_token, expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to manage voice artists.') self.logout() def test_random_user_cannot_assign_voice_artist_in_public_exp(self): self.login(self.user_email) csrf_token = self.get_new_csrf_token() with self.swap(self, 'testapp', self.mock_testapp): response = self.post_json( '/mock/exploration/%s' % self.private_exp_id_1, {}, csrf_token=csrf_token, expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to manage voice artists.') self.logout() def test_voiceover_admin_cannot_assign_voice_artist_in_invalid_exp(self): self.login(self.VOICEOVER_ADMIN_EMAIL) csrf_token = self.get_new_csrf_token() with self.swap(self, 'testapp', self.mock_testapp): self.post_json( '/mock/exploration/invalid_exp_id', {}, csrf_token=csrf_token, expected_status_int=404) self.logout() def test_voiceover_admin_cannot_assign_voice_artist_without_login(self): csrf_token = self.get_new_csrf_token() with self.swap(self, 'testapp', self.mock_testapp): self.post_json( '/mock/exploration/%s' % self.private_exp_id_1, {}, csrf_token=csrf_token, expected_status_int=401) class EditExplorationTests(test_utils.GenericTestBase): """Tests for can_edit_exploration decorator.""" username = 'banneduser' user_email = 'user@example.com' published_exp_id = 'exp_0' private_exp_id = 'exp_1' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'exploration_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_edit_exploration def get(self, exploration_id): self.render_json({'exploration_id': exploration_id}) def setUp(self): super(EditExplorationTests, self).setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.user_email, self.username) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.set_moderators([self.MODERATOR_USERNAME]) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.mark_user_banned(self.username) self.owner = user_services.get_user_actions_info(self.owner_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_edit_exploration/<exploration_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.save_new_valid_exploration( self.published_exp_id, self.owner_id) self.save_new_valid_exploration( self.private_exp_id, self.owner_id) rights_manager.publish_exploration(self.owner, self.published_exp_id) def test_can_not_edit_exploration_with_invalid_exp_id(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_edit_exploration/invalid_exp_id', expected_status_int=404) self.logout() def test_banned_user_cannot_edit_exploration(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_edit_exploration/%s' % self.private_exp_id, expected_status_int=401) self.logout() def test_owner_can_edit_exploration(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_edit_exploration/%s' % self.private_exp_id) self.assertEqual(response['exploration_id'], self.private_exp_id) self.logout() def test_moderator_can_edit_public_exploration(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_edit_exploration/%s' % self.published_exp_id) self.assertEqual(response['exploration_id'], self.published_exp_id) self.logout() def test_moderator_can_edit_private_exploration(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_edit_exploration/%s' % self.private_exp_id) self.assertEqual(response['exploration_id'], self.private_exp_id) self.logout() def test_admin_can_edit_private_exploration(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_edit_exploration/%s' % self.private_exp_id) self.assertEqual(response['exploration_id'], self.private_exp_id) self.logout() class ManageOwnAccountTests(test_utils.GenericTestBase): """Tests for decorator can_manage_own_account.""" banned_user = 'banneduser' banned_user_email = 'banned@example.com' username = 'user' user_email = 'user@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_manage_own_account def get(self): return self.render_json({'success': 1}) def setUp(self): super(ManageOwnAccountTests, self).setUp() self.signup(self.banned_user_email, self.banned_user) self.signup(self.user_email, self.username) self.mark_user_banned(self.banned_user) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock/', self.MockHandler)], debug=feconf.DEBUG, )) def test_banned_user_cannot_update_preferences(self): self.login(self.banned_user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/', expected_status_int=401) self.logout() def test_normal_user_can_manage_preferences(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/') self.assertEqual(response['success'], 1) self.logout() class UploadExplorationTests(test_utils.GenericTestBase): """Tests for can_upload_exploration decorator.""" class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_upload_exploration def get(self): return self.render_json({}) def setUp(self): super(UploadExplorationTests, self).setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock_upload_exploration/', self.MockHandler)], debug=feconf.DEBUG, )) def test_super_admin_can_upload_explorations(self): self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_upload_exploration/') self.logout() def test_normal_user_cannot_upload_explorations(self): self.login(self.EDITOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_upload_exploration/', expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to upload explorations.') self.logout() def test_guest_cannot_upload_explorations(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_upload_exploration/', expected_status_int=401) self.assertEqual( response['error'], 'You must be logged in to access this resource.') class DeleteExplorationTests(test_utils.GenericTestBase): """Tests for can_delete_exploration decorator.""" private_exp_id = 'exp_0' published_exp_id = 'exp_1' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'exploration_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_delete_exploration def get(self, exploration_id): self.render_json({'exploration_id': exploration_id}) def setUp(self): super(DeleteExplorationTests, self).setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.set_moderators([self.MODERATOR_USERNAME]) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.owner = user_services.get_user_actions_info(self.owner_id) self.moderator_id = self.get_user_id_from_email(self.MODERATOR_EMAIL) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_delete_exploration/<exploration_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.save_new_valid_exploration( self.published_exp_id, self.owner_id) self.save_new_valid_exploration( self.private_exp_id, self.owner_id) rights_manager.publish_exploration(self.owner, self.published_exp_id) def test_guest_can_not_delete_exploration(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_delete_exploration/%s' % self.private_exp_id, expected_status_int=401) self.assertEqual( response['error'], 'You must be logged in to access this resource.') def test_owner_can_delete_owned_private_exploration(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_delete_exploration/%s' % self.private_exp_id) self.assertEqual(response['exploration_id'], self.private_exp_id) self.logout() def test_moderator_can_delete_published_exploration(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_delete_exploration/%s' % self.published_exp_id) self.assertEqual(response['exploration_id'], self.published_exp_id) self.logout() def test_owner_cannot_delete_published_exploration(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_delete_exploration/%s' % self.published_exp_id, expected_status_int=401) self.assertEqual( response['error'], 'User %s does not have permissions to delete exploration %s' % (self.owner_id, self.published_exp_id)) self.logout() def test_moderator_can_delete_private_exploration(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_delete_exploration/%s' % self.private_exp_id) self.assertEqual(response['exploration_id'], self.private_exp_id) self.logout() class SuggestChangesToExplorationTests(test_utils.GenericTestBase): """Tests for can_suggest_changes_to_exploration decorator.""" username = 'user' user_email = 'user@example.com' banned_username = 'banneduser' banned_user_email = 'banned@example.com' exploration_id = 'exp_id' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'exploration_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_suggest_changes_to_exploration def get(self, exploration_id): self.render_json({'exploration_id': exploration_id}) def setUp(self): super(SuggestChangesToExplorationTests, self).setUp() self.signup(self.user_email, self.username) self.signup(self.banned_user_email, self.banned_username) self.mark_user_banned(self.banned_username) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock/<exploration_id>', self.MockHandler)], debug=feconf.DEBUG, )) def test_banned_user_cannot_suggest_changes(self): self.login(self.banned_user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/%s' % self.exploration_id, expected_status_int=401) self.logout() def test_normal_user_can_suggest_changes(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.exploration_id) self.assertEqual(response['exploration_id'], self.exploration_id) self.logout() class SuggestChangesDecoratorsTests(test_utils.GenericTestBase): """Tests for can_suggest_changes decorator.""" username = 'user' user_email = 'user@example.com' banned_username = 'banneduser' banned_user_email = 'banned@example.com' exploration_id = 'exp_id' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_suggest_changes def get(self): self.render_json({}) def setUp(self): super(SuggestChangesDecoratorsTests, self).setUp() self.signup(self.user_email, self.username) self.signup(self.banned_user_email, self.banned_username) self.mark_user_banned(self.banned_username) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock', self.MockHandler)], debug=feconf.DEBUG, )) def test_banned_user_cannot_suggest_changes(self): self.login(self.banned_user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock', expected_status_int=401) self.logout() def test_normal_user_can_suggest_changes(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock') self.logout() class ResubmitSuggestionDecoratorsTests(test_utils.GenericTestBase): """Tests for can_resubmit_suggestion decorator.""" owner_username = 'owner' owner_email = 'owner@example.com' author_username = 'author' author_email = 'author@example.com' username = 'user' user_email = 'user@example.com' TARGET_TYPE = 'exploration' SUGGESTION_TYPE = 'edit_exploration_state_content' exploration_id = 'exp_id' target_version_id = 1 change_dict = { 'cmd': 'edit_state_property', 'property_name': 'content', 'state_name': 'Introduction', 'new_value': '' } class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'suggestion_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_resubmit_suggestion def get(self, suggestion_id): self.render_json({'suggestion_id': suggestion_id}) def setUp(self): super(ResubmitSuggestionDecoratorsTests, self).setUp() self.signup(self.author_email, self.author_username) self.signup(self.user_email, self.username) self.signup(self.owner_email, self.owner_username) self.author_id = self.get_user_id_from_email(self.author_email) self.owner_id = self.get_user_id_from_email(self.owner_email) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock/<suggestion_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.save_new_default_exploration(self.exploration_id, self.owner_id) suggestion_services.create_suggestion( self.SUGGESTION_TYPE, self.TARGET_TYPE, self.exploration_id, self.target_version_id, self.author_id, self.change_dict, '') suggestion = suggestion_services.query_suggestions( [('author_id', self.author_id), ('target_id', self.exploration_id)])[0] self.suggestion_id = suggestion.suggestion_id def test_author_can_resubmit_suggestion(self): self.login(self.author_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.suggestion_id) self.assertEqual(response['suggestion_id'], self.suggestion_id) self.logout() def test_non_author_cannot_resubmit_suggestion(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/%s' % self.suggestion_id, expected_status_int=401) self.logout() class DecoratorForAcceptingSuggestionTests(test_utils.GenericTestBase): """Tests for get_decorator_for_accepting_suggestion decorator.""" AUTHOR_USERNAME = 'author' AUTHOR_EMAIL = 'author@example.com' VIEWER_USERNAME = 'user' VIEWER_EMAIL = 'user@example.com' TARGET_TYPE = 'exploration' SUGGESTION_TYPE = 'edit_exploration_state_content' EXPLORATION_ID = 'exp_id' TARGET_VERSION_ID = 1 CHANGE_DICT = { 'cmd': 'edit_state_property', 'property_name': 'content', 'state_name': 'Introduction', 'new_value': '' } class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'suggestion_id': { 'schema': { 'type': 'basestring' } }, 'target_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.get_decorator_for_accepting_suggestion( acl_decorators.can_edit_exploration) def get(self, target_id, suggestion_id): self.render_json({ 'target_id': target_id, 'suggestion_id': suggestion_id }) def setUp(self): super(DecoratorForAcceptingSuggestionTests, self).setUp() self.signup(self.AUTHOR_EMAIL, self.AUTHOR_USERNAME) self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.owner = user_services.get_user_actions_info(self.owner_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_accept_suggestion/<target_id>/<suggestion_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.save_new_default_exploration(self.EXPLORATION_ID, self.owner_id) rights_manager.publish_exploration(self.owner, self.EXPLORATION_ID) suggestion_services.create_suggestion( self.SUGGESTION_TYPE, self.TARGET_TYPE, self.EXPLORATION_ID, self.TARGET_VERSION_ID, self.author_id, self.CHANGE_DICT, '') suggestion = suggestion_services.query_suggestions( [('author_id', self.author_id), ('target_id', self.EXPLORATION_ID)])[0] self.suggestion_id = suggestion.suggestion_id def test_guest_cannot_accept_suggestion(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_accept_suggestion/%s/%s' % (self.EXPLORATION_ID, self.suggestion_id), expected_status_int=401) self.assertEqual( response['error'], 'You must be logged in to access this resource.') def test_owner_can_accept_suggestion(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_accept_suggestion/%s/%s' % (self.EXPLORATION_ID, self.suggestion_id)) self.assertEqual(response['suggestion_id'], self.suggestion_id) self.assertEqual(response['target_id'], self.EXPLORATION_ID) self.logout() def test_viewer_cannot_accept_suggestion(self): self.login(self.VIEWER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_accept_suggestion/%s/%s' % (self.EXPLORATION_ID, self.suggestion_id), expected_status_int=401) self.logout() class PublishExplorationTests(test_utils.GenericTestBase): """Tests for can_publish_exploration decorator.""" private_exp_id = 'exp_0' public_exp_id = 'exp_1' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'exploration_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_publish_exploration def get(self, exploration_id): self.render_json({'exploration_id': exploration_id}) def setUp(self): super(PublishExplorationTests, self).setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_moderators([self.MODERATOR_USERNAME]) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.owner = user_services.get_user_actions_info(self.owner_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_publish_exploration/<exploration_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.save_new_valid_exploration( self.public_exp_id, self.owner_id) self.save_new_valid_exploration( self.private_exp_id, self.owner_id) rights_manager.publish_exploration(self.owner, self.public_exp_id) def test_cannot_publish_exploration_with_invalid_exp_id(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_publish_exploration/invalid_exp_id', expected_status_int=404) self.logout() def test_owner_can_publish_owned_exploration(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_publish_exploration/%s' % self.private_exp_id) self.assertEqual(response['exploration_id'], self.private_exp_id) self.logout() def test_already_published_exploration_cannot_be_published(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_publish_exploration/%s' % self.public_exp_id, expected_status_int=401) self.logout() def test_moderator_cannot_publish_private_exploration(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_publish_exploration/%s' % self.private_exp_id, expected_status_int=401) self.logout() def test_admin_can_publish_any_exploration(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_publish_exploration/%s' % self.private_exp_id) self.assertEqual(response['exploration_id'], self.private_exp_id) class ModifyExplorationRolesTests(test_utils.GenericTestBase): """Tests for can_modify_exploration_roles decorator.""" private_exp_id = 'exp_0' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'exploration_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_modify_exploration_roles def get(self, exploration_id): self.render_json({'exploration_id': exploration_id}) def setUp(self): super(ModifyExplorationRolesTests, self).setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_moderators([self.MODERATOR_USERNAME]) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock/<exploration_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.save_new_valid_exploration( self.private_exp_id, self.owner_id) def test_owner_can_modify_exploration_roles(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.private_exp_id) self.assertEqual(response['exploration_id'], self.private_exp_id) self.logout() def test_moderator_can_modify_roles_of_unowned_exploration(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/%s' % self.private_exp_id) self.logout() def test_admin_can_modify_roles_of_any_exploration(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.private_exp_id) self.assertEqual(response['exploration_id'], self.private_exp_id) self.logout() class CollectionPublishStatusTests(test_utils.GenericTestBase): """Tests can_publish_collection and can_unpublish_collection decorators.""" user_email = 'user@example.com' username = 'user' published_exp_id = 'exp_id_1' private_exp_id = 'exp_id_2' published_col_id = 'col_id_1' private_col_id = 'col_id_2' class MockPublishHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'collection_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_publish_collection def get(self, collection_id): return self.render_json({'collection_id': collection_id}) class MockUnpublishHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'collection_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_unpublish_collection def get(self, collection_id): return self.render_json({'collection_id': collection_id}) def setUp(self): super(CollectionPublishStatusTests, self).setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.user_email, self.username) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.set_moderators([self.MODERATOR_USERNAME]) self.set_collection_editors([self.OWNER_USERNAME]) self.owner = user_services.get_user_actions_info(self.owner_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [ webapp2.Route( '/mock_publish_collection/<collection_id>', self.MockPublishHandler), webapp2.Route( '/mock_unpublish_collection/<collection_id>', self.MockUnpublishHandler) ], debug=feconf.DEBUG, )) self.save_new_valid_exploration( self.published_exp_id, self.owner_id) self.save_new_valid_exploration( self.private_exp_id, self.owner_id) self.save_new_valid_collection( self.published_col_id, self.owner_id, exploration_id=self.published_col_id) self.save_new_valid_collection( self.private_col_id, self.owner_id, exploration_id=self.private_col_id) rights_manager.publish_exploration(self.owner, self.published_exp_id) rights_manager.publish_collection(self.owner, self.published_col_id) def test_cannot_publish_collection_with_invalid_exp_id(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_publish_collection/invalid_col_id', expected_status_int=404) self.logout() def test_cannot_unpublish_collection_with_invalid_exp_id(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_unpublish_collection/invalid_col_id', expected_status_int=404) self.logout() def test_owner_can_publish_collection(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_publish_collection/%s' % self.private_col_id) self.assertEqual(response['collection_id'], self.private_col_id) self.logout() def test_owner_cannot_unpublish_public_collection(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_unpublish_collection/%s' % self.published_col_id, expected_status_int=401) self.logout() def test_moderator_can_unpublish_public_collection(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_unpublish_collection/%s' % self.published_col_id) self.assertEqual(response['collection_id'], self.published_col_id) self.logout() def test_admin_can_publish_any_collection(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_publish_collection/%s' % self.private_col_id) self.assertEqual(response['collection_id'], self.private_col_id) self.logout() def test_admin_cannot_publish_already_published_collection(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_publish_collection/%s' % self.published_col_id, expected_status_int=401) self.logout() class AccessLearnerDashboardDecoratorTests(test_utils.GenericTestBase): """Tests the decorator can_access_learner_dashboard.""" user = 'user' user_email = 'user@example.com' banned_user = 'banneduser' banned_user_email = 'banned@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_access_learner_dashboard def get(self): return self.render_json({}) def setUp(self): super(AccessLearnerDashboardDecoratorTests, self).setUp() self.signup(self.user_email, self.user) self.signup(self.banned_user_email, self.banned_user) self.mark_user_banned(self.banned_user) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock/', self.MockHandler)], debug=feconf.DEBUG, )) def test_banned_user_is_redirected(self): self.login(self.banned_user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/', expected_status_int=401) self.logout() def test_exploration_editor_can_access_learner_dashboard(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock/') self.logout() class EditTopicDecoratorTests(test_utils.GenericTestBase): """Tests the decorator can_edit_topic.""" manager_username = 'topicmanager' manager_email = 'topicmanager@example.com' viewer_username = 'viewer' viewer_email = 'viewer@example.com' topic_id = 'topic_1' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'topic_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_edit_topic def get(self, topic_id): self.render_json({'topic_id': topic_id}) def setUp(self): super(EditTopicDecoratorTests, self).setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.manager_email, self.manager_username) self.signup(self.viewer_email, self.viewer_username) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) self.manager_id = self.get_user_id_from_email(self.manager_email) self.viewer_id = self.get_user_id_from_email(self.viewer_email) self.admin = user_services.get_user_actions_info(self.admin_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock_edit_topic/<topic_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.topic_id = topic_fetchers.get_new_topic_id() self.save_new_topic( self.topic_id, self.viewer_id, name='Name', description='Description', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[], next_subtopic_id=1) topic_services.create_new_topic_rights(self.topic_id, self.admin_id) self.set_topic_managers([self.manager_username], self.topic_id) self.manager = user_services.get_user_actions_info(self.manager_id) def test_can_not_edit_topic_with_invalid_topic_id(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_edit_topic/invalid_topic_id', expected_status_int=404) self.logout() def test_admin_can_edit_topic(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_edit_topic/%s' % self.topic_id) self.assertEqual(response['topic_id'], self.topic_id) self.logout() def test_topic_manager_can_edit_topic(self): self.login(self.manager_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_edit_topic/%s' % self.topic_id) self.assertEqual(response['topic_id'], self.topic_id) self.logout() def test_normal_user_cannot_edit_topic(self): self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_edit_topic/%s' % self.topic_id, expected_status_int=401) self.logout() class EditStoryDecoratorTests(test_utils.GenericTestBase): """Tests the decorator can_edit_story.""" manager_username = 'topicmanager' manager_email = 'topicmanager@example.com' viewer_username = 'viewer' viewer_email = 'viewer@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'story_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_edit_story def get(self, story_id): self.render_json({'story_id': story_id}) def setUp(self): super(EditStoryDecoratorTests, self).setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) self.admin = user_services.get_user_actions_info(self.admin_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock_edit_story/<story_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.story_id = story_services.get_new_story_id() self.topic_id = topic_fetchers.get_new_topic_id() self.save_new_story(self.story_id, self.admin_id, self.topic_id) self.save_new_topic( self.topic_id, self.admin_id, name='Name', description='Description', canonical_story_ids=[self.story_id], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[], next_subtopic_id=1) topic_services.create_new_topic_rights(self.topic_id, self.admin_id) def test_can_not_edit_story_with_invalid_story_id(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_edit_story/story_id_new', expected_status_int=404) self.logout() def test_can_not_edit_story_with_invalid_topic_id(self): self.login(self.CURRICULUM_ADMIN_EMAIL) story_id = story_services.get_new_story_id() topic_id = topic_fetchers.get_new_topic_id() self.save_new_story(story_id, self.admin_id, topic_id) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_edit_story/%s' % story_id, expected_status_int=404) self.logout() def test_admin_can_edit_story(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_edit_story/%s' % self.story_id) self.assertEqual(response['story_id'], self.story_id) self.logout() def test_topic_manager_can_edit_story(self): self.signup(self.manager_email, self.manager_username) self.set_topic_managers([self.manager_username], self.topic_id) self.login(self.manager_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_edit_story/%s' % self.story_id) self.assertEqual(response['story_id'], self.story_id) self.logout() def test_normal_user_cannot_edit_story(self): self.signup(self.viewer_email, self.viewer_username) self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_edit_story/%s' % self.story_id, expected_status_int=401) self.logout() class AddStoryToTopicTests(test_utils.GenericTestBase): """Tests for decorator can_add_new_story_to_topic.""" manager_username = 'topicmanager' manager_email = 'topicmanager@example.com' viewer_username = 'viewer' viewer_email = 'viewer@example.com' topic_id = 'topic_1' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'topic_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_add_new_story_to_topic def get(self, topic_id): self.render_json({'topic_id': topic_id}) def setUp(self): super(AddStoryToTopicTests, self).setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.manager_email, self.manager_username) self.signup(self.viewer_email, self.viewer_username) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) self.manager_id = self.get_user_id_from_email(self.manager_email) self.admin = user_services.get_user_actions_info(self.admin_id) self.viewer_id = self.get_user_id_from_email(self.viewer_email) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_add_story_to_topic/<topic_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.topic_id = topic_fetchers.get_new_topic_id() self.save_new_topic( self.topic_id, self.viewer_id, name='Name', description='Description', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[], next_subtopic_id=1) topic_services.create_new_topic_rights(self.topic_id, self.admin_id) self.set_topic_managers([self.manager_username], self.topic_id) self.manager = user_services.get_user_actions_info(self.manager_id) def test_can_not_add_story_to_topic_with_invalid_topic_id(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_add_story_to_topic/invalid_topic_id', expected_status_int=404) self.logout() def test_admin_can_add_story_to_topic(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_add_story_to_topic/%s' % self.topic_id) self.assertEqual(response['topic_id'], self.topic_id) self.logout() def test_topic_manager_cannot_add_story_to_topic_with_invalid_topic_id( self): self.login(self.manager_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_add_story_to_topic/incorrect_id', expected_status_int=404) self.logout() def test_topic_manager_can_add_story_to_topic(self): self.login(self.manager_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_add_story_to_topic/%s' % self.topic_id) self.assertEqual(response['topic_id'], self.topic_id) self.logout() def test_normal_user_cannot_add_story_to_topic(self): self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_add_story_to_topic/%s' % self.topic_id, expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to add a story to this topic.') self.logout() def test_guest_cannot_add_story_to_topic(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_add_story_to_topic/%s' % self.topic_id, expected_status_int=401) self.assertEqual( response['error'], 'You must be logged in to access this resource.') class StoryViewerTests(test_utils.GenericTestBase): """Tests for decorator can_access_story_viewer_page.""" banned_user = 'banneduser' banned_user_email = 'banned@example.com' class MockDataHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'topic_url_fragment': { 'schema': { 'type': 'basestring' } }, 'story_url_fragment': { 'schema': { 'type': 'basestring' } }, 'classroom_url_fragment': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_access_story_viewer_page def get(self, story_url_fragment): self.render_json({'story_url_fragment': story_url_fragment}) class MockPageHandler(base.BaseHandler): URL_PATH_ARGS_SCHEMAS = { 'topic_url_fragment': { 'schema': { 'type': 'basestring' } }, 'story_url_fragment': { 'schema': { 'type': 'basestring' } }, 'classroom_url_fragment': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_access_story_viewer_page def get(self, _): self.render_template('oppia-root.mainpage.html') def setUp(self): super(StoryViewerTests, self).setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) self.admin = user_services.get_user_actions_info(self.admin_id) self.signup(self.banned_user_email, self.banned_user) self.mark_user_banned(self.banned_user) story_data_url = ( '/mock_story_data/<classroom_url_fragment>/' '<topic_url_fragment>/<story_url_fragment>') story_page_url = ( '/mock_story_page/<classroom_url_fragment>/' '<topic_url_fragment>/story/<story_url_fragment>') self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [ webapp2.Route(story_data_url, self.MockDataHandler), webapp2.Route(story_page_url, self.MockPageHandler) ], debug=feconf.DEBUG, )) self.topic_id = topic_fetchers.get_new_topic_id() self.story_id = story_services.get_new_story_id() self.story_url_fragment = 'story-frag' self.save_new_story( self.story_id, self.admin_id, self.topic_id, url_fragment=self.story_url_fragment) subtopic_1 = topic_domain.Subtopic.create_default_subtopic( 1, 'Subtopic Title 1') subtopic_1.skill_ids = ['skill_id_1'] subtopic_1.url_fragment = 'sub-one-frag' self.save_new_topic( self.topic_id, self.admin_id, name='Name', description='Description', canonical_story_ids=[self.story_id], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[subtopic_1], next_subtopic_id=2) def test_cannot_access_non_existent_story(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_story_data/staging/topic/non-existent-frag', expected_status_int=404) def test_cannot_access_story_when_topic_is_not_published(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_story_data/staging/topic/%s' % self.story_url_fragment, expected_status_int=404) def test_cannot_access_story_when_story_is_not_published(self): topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_story_data/staging/topic/%s' % self.story_url_fragment, expected_status_int=404) def test_can_access_story_when_story_and_topic_are_published(self): topic_services.publish_topic(self.topic_id, self.admin_id) topic_services.publish_story( self.topic_id, self.story_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_story_data/staging/topic/%s' % self.story_url_fragment, expected_status_int=200) def test_can_access_story_when_all_url_fragments_are_valid(self): topic_services.publish_topic(self.topic_id, self.admin_id) topic_services.publish_story( self.topic_id, self.story_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): self.get_html_response( '/mock_story_page/staging/topic/story/%s' % self.story_url_fragment, expected_status_int=200) def test_redirect_to_story_page_if_story_url_fragment_is_invalid(self): topic_services.publish_topic(self.topic_id, self.admin_id) topic_services.publish_story( self.topic_id, self.story_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_html_response( '/mock_story_page/staging/topic/story/000', expected_status_int=302) self.assertEqual( 'http://localhost/learn/staging/topic/story', response.headers['location']) def test_redirect_to_correct_url_if_abbreviated_topic_is_invalid(self): topic_services.publish_topic(self.topic_id, self.admin_id) topic_services.publish_story( self.topic_id, self.story_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_html_response( '/mock_story_page/staging/invalid-topic/story/%s' % self.story_url_fragment, expected_status_int=302) self.assertEqual( 'http://localhost/learn/staging/topic/story/%s' % self.story_url_fragment, response.headers['location']) def test_redirect_with_correct_classroom_name_in_url(self): topic_services.publish_topic(self.topic_id, self.admin_id) topic_services.publish_story( self.topic_id, self.story_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_html_response( '/mock_story_page/math/topic/story/%s' % self.story_url_fragment, expected_status_int=302) self.assertEqual( 'http://localhost/learn/staging/topic/story/%s' % self.story_url_fragment, response.headers['location']) def test_redirect_lowercase_story_url_fragment(self): topic_services.publish_topic(self.topic_id, self.admin_id) topic_services.publish_story( self.topic_id, self.story_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_html_response( '/mock_story_page/staging/topic/story/Story-frag', expected_status_int=302) self.assertEqual( 'http://localhost/learn/staging/topic/story/story-frag', response.headers['location']) class SubtopicViewerTests(test_utils.GenericTestBase): """Tests for decorator can_access_subtopic_viewer_page.""" banned_user = 'banneduser' banned_user_email = 'banned@example.com' class MockDataHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'topic_url_fragment': { 'schema': { 'type': 'basestring' } }, 'subtopic_url_fragment': { 'schema': { 'type': 'basestring' } }, 'classroom_url_fragment': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_access_subtopic_viewer_page def get(self, unused_topic_url_fragment, subtopic_url_fragment): self.render_json({'subtopic_url_fragment': subtopic_url_fragment}) class MockPageHandler(base.BaseHandler): URL_PATH_ARGS_SCHEMAS = { 'topic_url_fragment': { 'schema': { 'type': 'basestring' } }, 'subtopic_url_fragment': { 'schema': { 'type': 'basestring' } }, 'classroom_url_fragment': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_access_subtopic_viewer_page def get(self, unused_topic_url_fragment, unused_subtopic_url_fragment): self.render_template('subtopic-viewer-page.mainpage.html') def setUp(self): super(SubtopicViewerTests, self).setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) self.admin = user_services.get_user_actions_info(self.admin_id) self.signup(self.banned_user_email, self.banned_user) self.mark_user_banned(self.banned_user) subtopic_data_url = ( '/mock_subtopic_data/<classroom_url_fragment>/' '<topic_url_fragment>/<subtopic_url_fragment>') subtopic_page_url = ( '/mock_subtopic_page/<classroom_url_fragment>/' '<topic_url_fragment>/revision/<subtopic_url_fragment>') self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [ webapp2.Route(subtopic_data_url, self.MockDataHandler), webapp2.Route(subtopic_page_url, self.MockPageHandler) ], debug=feconf.DEBUG, )) self.topic_id = topic_fetchers.get_new_topic_id() subtopic_1 = topic_domain.Subtopic.create_default_subtopic( 1, 'Subtopic Title 1') subtopic_1.skill_ids = ['skill_id_1'] subtopic_1.url_fragment = 'sub-one-frag' subtopic_2 = topic_domain.Subtopic.create_default_subtopic( 2, 'Subtopic Title 2') subtopic_2.skill_ids = ['skill_id_2'] subtopic_2.url_fragment = 'sub-two-frag' self.subtopic_page_1 = ( subtopic_page_domain.SubtopicPage.create_default_subtopic_page( 1, self.topic_id)) subtopic_page_services.save_subtopic_page( self.admin_id, self.subtopic_page_1, 'Added subtopic', [topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'subtopic_id': 1, 'title': 'Sample' })] ) self.save_new_topic( self.topic_id, self.admin_id, name='topic name', description='Description', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[subtopic_1, subtopic_2], next_subtopic_id=3, url_fragment='topic-frag') def test_cannot_access_non_existent_subtopic(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_subtopic_data/staging/topic-frag/non-existent-frag', expected_status_int=404) def test_cannot_access_subtopic_when_topic_is_not_published(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_subtopic_data/staging/topic-frag/sub-one-frag', expected_status_int=404) def test_can_access_subtopic_when_topic_is_published(self): topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_subtopic_data/staging/topic-frag/sub-one-frag', expected_status_int=200) def test_can_access_subtopic_when_all_url_fragments_are_valid(self): topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): self.get_html_response( '/mock_subtopic_page/staging/topic-frag/revision/sub-one-frag', expected_status_int=200) def test_fall_back_to_revision_page_if_subtopic_url_frag_is_invalid(self): topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_html_response( '/mock_subtopic_page/staging/topic-frag/revision/000', expected_status_int=302) self.assertEqual( 'http://localhost/learn/staging/topic-frag/revision', response.headers['location']) def test_redirect_to_classroom_if_abbreviated_topic_is_invalid(self): topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_html_response( '/mock_subtopic_page/math/invalid-topic/revision/sub-one-frag', expected_status_int=302) self.assertEqual( 'http://localhost/learn/math', response.headers['location']) def test_redirect_with_correct_classroom_name_in_url(self): topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_html_response( '/mock_subtopic_page/math/topic-frag/revision/sub-one-frag', expected_status_int=302) self.assertEqual( 'http://localhost/learn/staging/topic-frag/revision' '/sub-one-frag', response.headers['location']) def test_redirect_with_lowercase_subtopic_url_fragment(self): topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_html_response( '/mock_subtopic_page/staging/topic-frag/revision/Sub-One-Frag', expected_status_int=302) self.assertEqual( 'http://localhost/learn/staging/topic-frag/revision' '/sub-one-frag', response.headers['location']) class TopicViewerTests(test_utils.GenericTestBase): """Tests for decorator can_access_topic_viewer_page.""" banned_user = 'banneduser' banned_user_email = 'banned@example.com' class MockDataHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'topic_url_fragment': { 'schema': { 'type': 'basestring' } }, 'classroom_url_fragment': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_access_topic_viewer_page def get(self, topic_name): self.render_json({'topic_name': topic_name}) class MockPageHandler(base.BaseHandler): URL_PATH_ARGS_SCHEMAS = { 'topic_url_fragment': { 'schema': { 'type': 'basestring' } }, 'classroom_url_fragment': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_access_topic_viewer_page def get(self, unused_topic_name): self.render_template('topic-viewer-page.mainpage.html') def setUp(self): super(TopicViewerTests, self).setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) self.admin = user_services.get_user_actions_info(self.admin_id) self.signup(self.banned_user_email, self.banned_user) self.mark_user_banned(self.banned_user) topic_data_url = ( '/mock_topic_data/<classroom_url_fragment>/<topic_url_fragment>') topic_page_url = ( '/mock_topic_page/<classroom_url_fragment>/<topic_url_fragment>') self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [ webapp2.Route(topic_data_url, self.MockDataHandler), webapp2.Route(topic_page_url, self.MockPageHandler) ], debug=feconf.DEBUG, )) self.topic_id = topic_fetchers.get_new_topic_id() subtopic_1 = topic_domain.Subtopic.create_default_subtopic( 1, 'Subtopic Title 1') subtopic_1.skill_ids = ['skill_id_1'] subtopic_1.url_fragment = 'sub-one-frag' self.save_new_topic( self.topic_id, self.admin_id, name='Name', description='Description', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[subtopic_1], next_subtopic_id=2) def test_cannot_access_non_existent_topic(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_topic_data/staging/invalid-topic', expected_status_int=404) def test_cannot_access_unpublished_topic(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_topic_data/staging/topic', expected_status_int=404) def test_can_access_published_topic(self): topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_topic_data/staging/topic', expected_status_int=200) def test_can_access_topic_when_all_url_fragments_are_valid(self): topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): self.get_html_response( '/mock_topic_page/staging/topic', expected_status_int=200) def test_redirect_to_classroom_if_abbreviated_topic_is_invalid(self): topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_html_response( '/mock_topic_page/math/invalid-topic', expected_status_int=302) self.assertEqual( 'http://localhost/learn/math', response.headers['location']) def test_redirect_with_correct_classroom_name_in_url(self): topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_html_response( '/mock_topic_page/math/topic', expected_status_int=302) self.assertEqual( 'http://localhost/learn/staging/topic', response.headers['location']) def test_redirect_with_lowercase_topic_url_fragment(self): topic_services.publish_topic(self.topic_id, self.admin_id) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_html_response( '/mock_topic_page/staging/TOPIC', expected_status_int=302) self.assertEqual( 'http://localhost/learn/staging/topic', response.headers['location']) class CreateSkillTests(test_utils.GenericTestBase): """Tests for decorator can_create_skill.""" banned_user = 'banneduser' banned_user_email = 'banned@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_create_skill def get(self): self.render_json({}) def setUp(self): super(CreateSkillTests, self).setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) self.admin = user_services.get_user_actions_info(self.admin_id) self.signup(self.banned_user_email, self.banned_user) self.mark_user_banned(self.banned_user) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock_create_skill', self.MockHandler)], debug=feconf.DEBUG, )) def test_admin_can_create_skill(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_create_skill') self.logout() def test_banned_user_cannot_create_skill(self): self.login(self.banned_user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_create_skill', expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to create a skill.') self.logout() def test_guest_cannot_add_create_skill(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_create_skill', expected_status_int=401) self.assertEqual( response['error'], 'You must be logged in to access this resource.') class ManageQuestionSkillStatusTests(test_utils.GenericTestBase): """Tests for decorator can_manage_question_skill_status.""" viewer_username = 'viewer' viewer_email = 'viewer@example.com' skill_id = '1' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'skill_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_manage_question_skill_status def get(self, skill_id): self.render_json({'skill_id': skill_id}) def setUp(self): super(ManageQuestionSkillStatusTests, self).setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) self.admin = user_services.get_user_actions_info(self.admin_id) self.signup(self.viewer_email, self.viewer_username) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_manage_question_skill_status/<skill_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.question_id = question_services.get_new_question_id() self.question = self.save_new_question( self.question_id, self.admin_id, self._create_valid_question_data('ABC'), [self.skill_id]) question_services.create_new_question_skill_link( self.admin_id, self.question_id, self.skill_id, 0.5) def test_admin_can_manage_question_skill_status(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_manage_question_skill_status/%s' % self.skill_id) self.assertEqual(response['skill_id'], self.skill_id) self.logout() def test_viewer_cannot_manage_question_skill_status(self): self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_manage_question_skill_status/%s' % self.skill_id, expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to publish a question.') self.logout() def test_guest_cannot_manage_question_skill_status(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_manage_question_skill_status/%s' % self.skill_id, expected_status_int=401) self.assertEqual( response['error'], 'You must be logged in to access this resource.') class CreateTopicTests(test_utils.GenericTestBase): """Tests for decorator can_create_topic.""" banned_user = 'banneduser' banned_user_email = 'banned@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_create_topic def get(self): self.render_json({}) def setUp(self): super(CreateTopicTests, self).setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) self.admin = user_services.get_user_actions_info(self.admin_id) self.signup(self.banned_user_email, self.banned_user) self.mark_user_banned(self.banned_user) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock_create_topic', self.MockHandler)], debug=feconf.DEBUG, )) def test_admin_can_create_topic(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_create_topic') self.logout() def test_banned_user_cannot_create_topic(self): self.login(self.banned_user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_create_topic', expected_status_int=401) self.assertIn( 'does not have enough rights to create a topic.', response['error']) self.logout() def test_guest_cannot_create_topic(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_create_topic', expected_status_int=401) self.assertEqual( response['error'], 'You must be logged in to access this resource.') class ManageRightsForTopicTests(test_utils.GenericTestBase): """Tests for decorator can_manage_rights_for_topic.""" banned_user = 'banneduser' banned_user_email = 'banned@example.com' topic_id = 'topic_1' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'topic_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_manage_rights_for_topic def get(self, topic_id): self.render_json({'topic_id': topic_id}) def setUp(self): super(ManageRightsForTopicTests, self).setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) self.admin = user_services.get_user_actions_info(self.admin_id) self.signup(self.banned_user_email, self.banned_user) self.mark_user_banned(self.banned_user) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_manage_rights_for_topic/<topic_id>', self.MockHandler)], debug=feconf.DEBUG, )) topic_services.create_new_topic_rights(self.topic_id, self.admin_id) def test_admin_can_manage_rights(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_manage_rights_for_topic/%s' % self.topic_id) self.logout() def test_banned_user_cannot_manage_rights(self): self.login(self.banned_user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_manage_rights_for_topic/%s' % self.topic_id, expected_status_int=401) self.assertIn( 'does not have enough rights to assign roles for the topic.', response['error']) self.logout() def test_guest_cannot_manage_rights(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_manage_rights_for_topic/%s' % self.topic_id, expected_status_int=401) self.assertEqual( response['error'], 'You must be logged in to access this resource.') class ChangeTopicPublicationStatusTests(test_utils.GenericTestBase): """Tests for decorator can_change_topic_publication_status.""" banned_user = 'banneduser' banned_user_email = 'banned@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'topic_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_change_topic_publication_status def get(self, topic_id): self.render_json({ topic_id: topic_id }) def setUp(self): super(ChangeTopicPublicationStatusTests, self).setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) self.admin = user_services.get_user_actions_info(self.admin_id) self.signup(self.banned_user_email, self.banned_user) self.mark_user_banned(self.banned_user) self.topic_id = topic_fetchers.get_new_topic_id() self.save_new_topic( self.topic_id, self.admin_id, name='Name1', description='Description', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[], next_subtopic_id=1) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_change_publication_status/<topic_id>', self.MockHandler)], debug=feconf.DEBUG, )) def test_admin_can_change_topic_publication_status(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_change_publication_status/%s' % self.topic_id) self.logout() def test_can_not_change_topic_publication_status_with_invalid_topic_id( self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_change_publication_status/invalid_topic_id', expected_status_int=404) self.logout() def test_banned_user_cannot_change_topic_publication_status(self): self.login(self.banned_user_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_change_publication_status/%s' % self.topic_id, expected_status_int=401) self.assertIn( 'does not have enough rights to publish or unpublish the ' 'topic.', response['error']) self.logout() def test_guest_cannot_change_topic_publication_status(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_change_publication_status/%s' % self.topic_id, expected_status_int=401) self.assertEqual( response['error'], 'You must be logged in to access this resource.') class PerformTasksInTaskqueueTests(test_utils.GenericTestBase): """Tests for decorator can_perform_tasks_in_taskqueue.""" viewer_username = 'viewer' viewer_email = 'viewer@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_perform_tasks_in_taskqueue def get(self): self.render_json({}) def setUp(self): super(PerformTasksInTaskqueueTests, self).setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) self.admin = user_services.get_user_actions_info(self.admin_id) self.signup(self.viewer_email, self.viewer_username) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_perform_tasks_in_taskqueue', self.MockHandler)], debug=feconf.DEBUG, )) def test_super_admin_can_perform_tasks_in_taskqueue(self): self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_perform_tasks_in_taskqueue') self.logout() def test_normal_user_cannot_perform_tasks_in_taskqueue(self): self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_perform_tasks_in_taskqueue', expected_status_int=401) self.assertEqual( response['error'], 'You do not have the credentials to access this page.') self.logout() def test_request_with_appropriate_header_can_perform_tasks_in_taskqueue( self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_perform_tasks_in_taskqueue', headers={'X-AppEngine-QueueName': 'name'}) class PerformCronTaskTests(test_utils.GenericTestBase): """Tests for decorator can_perform_cron_tasks.""" viewer_username = 'viewer' viewer_email = 'viewer@example.com' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_perform_cron_tasks def get(self): self.render_json({}) def setUp(self): super(PerformCronTaskTests, self).setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) self.admin = user_services.get_user_actions_info(self.admin_id) self.signup(self.viewer_email, self.viewer_username) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock_perform_cron_task', self.MockHandler)], debug=feconf.DEBUG, )) def test_super_admin_can_perform_cron_tasks(self): self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True) with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_perform_cron_task') self.logout() def test_normal_user_cannot_perform_cron_tasks(self): self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_perform_cron_task', expected_status_int=401) self.assertEqual( response['error'], 'You do not have the credentials to access this page.') self.logout() def test_request_with_appropriate_header_can_perform_cron_tasks(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_perform_cron_task', headers={'X-AppEngine-Cron': 'true'}) class EditSkillDecoratorTests(test_utils.GenericTestBase): """Tests permissions for accessing the skill editor.""" second_admin_username = 'adm2' second_admin_email = 'adm2@example.com' manager_username = 'topicmanager' manager_email = 'topicmanager@example.com' viewer_username = 'viewer' viewer_email = 'viewer@example.com' skill_id = '1' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'skill_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_edit_skill def get(self, skill_id): self.render_json({'skill_id': skill_id}) def setUp(self): super(EditSkillDecoratorTests, self).setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.second_admin_email, self.second_admin_username) self.signup(self.manager_email, self.manager_username) self.signup(self.viewer_email, self.viewer_username) self.set_curriculum_admins( [self.CURRICULUM_ADMIN_USERNAME, self.second_admin_username]) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) self.second_admin_id = self.get_user_id_from_email( self.second_admin_email) self.manager_id = self.get_user_id_from_email(self.manager_email) self.admin = user_services.get_user_actions_info(self.admin_id) self.manager = user_services.get_user_actions_info(self.manager_id) self.topic_id = topic_fetchers.get_new_topic_id() subtopic_1 = topic_domain.Subtopic.create_default_subtopic( 1, 'Subtopic Title 1') subtopic_1.skill_ids = ['skill_id_1'] subtopic_1.url_fragment = 'sub-one-frag' self.save_new_topic( self.topic_id, self.admin_id, name='Name', description='Description', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[subtopic_1], next_subtopic_id=2) self.set_topic_managers([self.manager_username], self.topic_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock_edit_skill/<skill_id>', self.MockHandler)], debug=feconf.DEBUG, )) def test_cannot_edit_skill_with_invalid_skill_id(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_custom_response( '/mock_edit_skill/', 'text/plain', expected_status_int=404) self.logout() def test_admin_can_edit_skill(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_edit_skill/%s' % self.skill_id) self.assertEqual(response['skill_id'], self.skill_id) self.logout() def test_admin_can_edit_other_public_skill(self): self.login(self.second_admin_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_edit_skill/%s' % self.skill_id) self.assertEqual(response['skill_id'], self.skill_id) self.logout() def test_topic_manager_can_edit_public_skill(self): self.login(self.manager_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_edit_skill/%s' % self.skill_id) self.assertEqual(response['skill_id'], self.skill_id) self.logout() def test_normal_user_can_not_edit_public_skill(self): self.login(self.viewer_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_edit_skill/%s' % self.skill_id, expected_status_int=401) class EditQuestionDecoratorTests(test_utils.GenericTestBase): """Tests the decorator can_edit_question.""" question_id = 'question_id' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'question_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_edit_question def get(self, question_id): self.render_json({'question_id': question_id}) def setUp(self): super(EditQuestionDecoratorTests, self).setUp() self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup('a@example.com', 'A') self.signup('b@example.com', 'B') self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.user_id_admin = ( self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL)) self.user_id_a = self.get_user_id_from_email('a@example.com') self.user_id_b = self.get_user_id_from_email('b@example.com') self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) self.manager_id = self.get_user_id_from_email('a@example.com') self.question_id = 'question_id' self.topic_id = topic_fetchers.get_new_topic_id() subtopic_1 = topic_domain.Subtopic.create_default_subtopic( 1, 'Subtopic Title 1') subtopic_1.skill_ids = ['skill_id_1'] subtopic_1.url_fragment = 'sub-one-frag' self.save_new_topic( self.topic_id, self.admin_id, name='Name', description='Description', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[subtopic_1], next_subtopic_id=2) self.save_new_question( self.question_id, self.owner_id, self._create_valid_question_data('ABC'), ['skill_1']) self.set_topic_managers( [user_services.get_username(self.user_id_a)], self.topic_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_edit_question/<question_id>', self.MockHandler)], debug=feconf.DEBUG, )) def test_guest_cannot_edit_question(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_edit_question/%s' % self.question_id, expected_status_int=401) self.assertEqual( response['error'], 'You must be logged in to access this resource.') def test_cannot_edit_question_with_invalid_question_id(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_edit_question/invalid_question_id', expected_status_int=404) self.logout() def test_admin_can_edit_question(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_edit_question/%s' % self.question_id) self.assertEqual(response['question_id'], self.question_id) self.logout() def test_topic_manager_can_edit_question(self): self.login('a@example.com') with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_edit_question/%s' % self.question_id) self.assertEqual(response['question_id'], self.question_id) self.logout() def test_any_user_cannot_edit_question(self): self.login('b@example.com') with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_edit_question/%s' % self.question_id, expected_status_int=401) self.logout() class PlayQuestionDecoratorTests(test_utils.GenericTestBase): """Tests the decorator can_play_question.""" question_id = 'question_id' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'question_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_play_question def get(self, question_id): self.render_json({'question_id': question_id}) def setUp(self): super(PlayQuestionDecoratorTests, self).setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_play_question/<question_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.save_new_question( self.question_id, self.owner_id, self._create_valid_question_data('ABC'), ['skill_1']) def test_can_play_question_with_valid_question_id(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_play_question/%s' % ( self.question_id)) self.assertEqual(response['question_id'], self.question_id) class PlayEntityDecoratorTests(test_utils.GenericTestBase): """Test the decorator can_play_entity.""" user_email = 'user@example.com' username = 'user' published_exp_id = 'exp_id_1' private_exp_id = 'exp_id_2' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'entity_type': { 'schema': { 'type': 'basestring' } }, 'entity_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_play_entity def get(self, entity_type, entity_id): self.render_json( {'entity_type': entity_type, 'entity_id': entity_id}) def setUp(self): super(PlayEntityDecoratorTests, self).setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.user_email, self.username) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.owner = user_services.get_user_actions_info(self.owner_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_play_entity/<entity_type>/<entity_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.question_id = question_services.get_new_question_id() self.save_new_question( self.question_id, self.owner_id, self._create_valid_question_data('ABC'), ['skill_1']) self.save_new_valid_exploration( self.published_exp_id, self.owner_id) self.save_new_valid_exploration( self.private_exp_id, self.owner_id) rights_manager.publish_exploration(self.owner, self.published_exp_id) def test_cannot_play_exploration_on_disabled_exploration_ids(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_play_entity/%s/%s' % ( feconf.ENTITY_TYPE_EXPLORATION, feconf.DISABLED_EXPLORATION_IDS[0]), expected_status_int=404) def test_guest_can_play_exploration_on_published_exploration(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_play_entity/%s/%s' % ( feconf.ENTITY_TYPE_EXPLORATION, self.published_exp_id)) self.assertEqual( response['entity_type'], feconf.ENTITY_TYPE_EXPLORATION) self.assertEqual( response['entity_id'], self.published_exp_id) def test_guest_cannot_play_exploration_on_private_exploration(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_play_entity/%s/%s' % ( feconf.ENTITY_TYPE_EXPLORATION, self.private_exp_id), expected_status_int=404) def test_cannot_play_exploration_with_none_exploration_rights(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_play_entity/%s/%s' % (feconf.ENTITY_TYPE_EXPLORATION, 'fake_exp_id'), expected_status_int=404) def test_can_play_question_for_valid_question_id(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_play_entity/%s/%s' % ( feconf.ENTITY_TYPE_QUESTION, self.question_id)) self.assertEqual( response['entity_type'], feconf.ENTITY_TYPE_QUESTION) self.assertEqual(response['entity_id'], self.question_id) self.assertEqual(response['entity_type'], 'question') def test_cannot_play_question_invalid_question_id(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_play_entity/%s/%s' % ( feconf.ENTITY_TYPE_QUESTION, 'question_id'), expected_status_int=404) def test_cannot_play_entity_for_invalid_entity(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_play_entity/%s/%s' % ( 'fake_entity_type', 'fake_entity_id'), expected_status_int=404) class EditEntityDecoratorTests(test_utils.GenericTestBase): username = 'banneduser' user_email = 'user@example.com' published_exp_id = 'exp_0' private_exp_id = 'exp_1' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'entity_type': { 'schema': { 'type': 'basestring' } }, 'entity_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_edit_entity def get(self, entity_type, entity_id): return self.render_json( {'entity_type': entity_type, 'entity_id': entity_id}) def setUp(self): super(EditEntityDecoratorTests, self).setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.user_email, self.username) self.signup(self.BLOG_ADMIN_EMAIL, self.BLOG_ADMIN_USERNAME) self.add_user_role( self.BLOG_ADMIN_USERNAME, feconf.ROLE_ID_BLOG_ADMIN) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL) self.set_moderators([self.MODERATOR_USERNAME]) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.mark_user_banned(self.username) self.owner = user_services.get_user_actions_info(self.owner_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/mock_edit_entity/<entity_type>/<entity_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.question_id = question_services.get_new_question_id() self.save_new_question( self.question_id, self.owner_id, self._create_valid_question_data('ABC'), ['skill_1']) self.save_new_valid_exploration( self.published_exp_id, self.owner_id) self.save_new_valid_exploration( self.private_exp_id, self.owner_id) rights_manager.publish_exploration(self.owner, self.published_exp_id) def test_can_edit_exploration_with_valid_exp_id(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock_edit_entity/exploration/%s' % ( self.published_exp_id)) self.assertEqual( response['entity_type'], feconf.ENTITY_TYPE_EXPLORATION) self.assertEqual( response['entity_id'], self.published_exp_id) self.logout() def test_cannot_edit_exploration_with_invalid_exp_id(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_edit_entity/exploration/invalid_exp_id', expected_status_int=404) self.logout() def test_banned_user_cannot_edit_exploration(self): self.login(self.user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_edit_entity/%s/%s' % ( feconf.ENTITY_TYPE_EXPLORATION, self.private_exp_id), expected_status_int=401) self.logout() def test_can_edit_question_with_valid_question_id(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_edit_entity/%s/%s' % ( feconf.ENTITY_TYPE_QUESTION, self.question_id)) self.assertEqual(response['entity_id'], self.question_id) self.assertEqual(response['entity_type'], 'question') self.logout() def test_can_edit_topic(self): self.login(self.CURRICULUM_ADMIN_EMAIL) topic_id = topic_fetchers.get_new_topic_id() self.save_new_topic( topic_id, self.admin_id, name='Name', description='Description', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[], next_subtopic_id=1) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_edit_entity/%s/%s' % ( feconf.ENTITY_TYPE_TOPIC, topic_id)) self.assertEqual(response['entity_id'], topic_id) self.assertEqual(response['entity_type'], 'topic') self.logout() def test_cannot_edit_topic_with_invalid_topic_id(self): self.login(self.CURRICULUM_ADMIN_EMAIL) topic_id = 'incorrect_id' with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock_edit_entity/%s/%s' % ( feconf.ENTITY_TYPE_TOPIC, topic_id), expected_status_int=404) self.logout() def test_can_edit_skill(self): self.login(self.CURRICULUM_ADMIN_EMAIL) skill_id = skill_services.get_new_skill_id() self.save_new_skill(skill_id, self.admin_id, description='Description') with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_edit_entity/%s/%s' % ( feconf.ENTITY_TYPE_SKILL, skill_id)) self.assertEqual(response['entity_id'], skill_id) self.assertEqual(response['entity_type'], 'skill') self.logout() def test_can_submit_images_to_questions(self): self.login(self.CURRICULUM_ADMIN_EMAIL) skill_id = skill_services.get_new_skill_id() self.save_new_skill(skill_id, self.admin_id, description='Description') with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_edit_entity/%s/%s' % ( feconf.IMAGE_CONTEXT_QUESTION_SUGGESTIONS, skill_id)) self.assertEqual(response['entity_id'], skill_id) self.assertEqual(response['entity_type'], 'question_suggestions') self.logout() def test_unauthenticated_users_cannot_submit_images_to_questions(self): skill_id = skill_services.get_new_skill_id() self.save_new_skill(skill_id, self.admin_id, description='Description') with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_edit_entity/%s/%s' % ( feconf.IMAGE_CONTEXT_QUESTION_SUGGESTIONS, skill_id), expected_status_int=401) def test_cannot_submit_images_to_questions_without_having_permissions(self): self.login(self.user_email) skill_id = skill_services.get_new_skill_id() self.save_new_skill(skill_id, self.admin_id, description='Description') with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_edit_entity/%s/%s' % ( feconf.IMAGE_CONTEXT_QUESTION_SUGGESTIONS, skill_id), expected_status_int=401) self.assertEqual( response['error'], 'You do not have credentials to submit' ' images to questions.') self.logout() def test_can_edit_blog_post(self): self.login(self.BLOG_ADMIN_EMAIL) blog_admin_id = ( self.get_user_id_from_email(self.BLOG_ADMIN_EMAIL)) blog_post = blog_services.create_new_blog_post(blog_admin_id) blog_post_id = blog_post.id with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_edit_entity/%s/%s' % ( feconf.ENTITY_TYPE_BLOG_POST, blog_post_id)) self.assertEqual(response['entity_id'], blog_post_id) self.assertEqual(response['entity_type'], 'blog_post') self.logout() def test_can_edit_story(self): self.login(self.CURRICULUM_ADMIN_EMAIL) story_id = story_services.get_new_story_id() topic_id = topic_fetchers.get_new_topic_id() self.save_new_story(story_id, self.admin_id, topic_id) self.save_new_topic( topic_id, self.admin_id, name='Name', description='Description', canonical_story_ids=[story_id], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[], next_subtopic_id=1) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock_edit_entity/%s/%s' % ( feconf.ENTITY_TYPE_STORY, story_id)) self.assertEqual(response['entity_id'], story_id) self.assertEqual(response['entity_type'], 'story') self.logout() def test_cannot_edit_entity_invalid_entity(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json('/mock_edit_entity/%s/%s' % ( 'invalid_entity_type', 'q_id'), expected_status_int=404) class SaveExplorationTests(test_utils.GenericTestBase): """Tests for can_save_exploration decorator.""" role = rights_domain.ROLE_VOICE_ARTIST username = 'user' user_email = 'user@example.com' banned_username = 'banneduser' banned_user_email = 'banneduser@example.com' published_exp_id_1 = 'exp_1' published_exp_id_2 = 'exp_2' private_exp_id_1 = 'exp_3' private_exp_id_2 = 'exp_4' class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'exploration_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_save_exploration def get(self, exploration_id): self.render_json({'exploration_id': exploration_id}) def setUp(self): super(SaveExplorationTests, self).setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME) self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup(self.user_email, self.username) self.signup(self.banned_user_email, self.banned_username) self.signup(self.VOICE_ARTIST_EMAIL, self.VOICE_ARTIST_USERNAME) self.signup(self.VOICEOVER_ADMIN_EMAIL, self.VOICEOVER_ADMIN_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.voice_artist_id = self.get_user_id_from_email( self.VOICE_ARTIST_EMAIL) self.voiceover_admin_id = self.get_user_id_from_email( self.VOICEOVER_ADMIN_EMAIL) self.set_moderators([self.MODERATOR_USERNAME]) self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME]) self.mark_user_banned(self.banned_username) self.add_user_role( self.VOICEOVER_ADMIN_USERNAME, feconf.ROLE_ID_VOICEOVER_ADMIN) self.owner = user_services.get_user_actions_info(self.owner_id) self.voiceover_admin = user_services.get_user_actions_info( self.voiceover_admin_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock/<exploration_id>', self.MockHandler)], debug=feconf.DEBUG, )) self.save_new_valid_exploration( self.published_exp_id_1, self.owner_id) self.save_new_valid_exploration( self.published_exp_id_2, self.owner_id) self.save_new_valid_exploration( self.private_exp_id_1, self.owner_id) self.save_new_valid_exploration( self.private_exp_id_2, self.owner_id) rights_manager.publish_exploration(self.owner, self.published_exp_id_1) rights_manager.publish_exploration(self.owner, self.published_exp_id_2) rights_manager.assign_role_for_exploration( self.voiceover_admin, self.published_exp_id_1, self.voice_artist_id, self.role) def test_unautheticated_user_cannot_save_exploration(self): with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/%s' % self.private_exp_id_1, expected_status_int=401) def test_can_not_save_exploration_with_invalid_exp_id(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/invalid_exp_id', expected_status_int=404) self.logout() def test_banned_user_cannot_save_exploration(self): self.login(self.banned_user_email) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/%s' % self.private_exp_id_1, expected_status_int=401) self.logout() def test_owner_can_save_exploration(self): self.login(self.OWNER_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.private_exp_id_1) self.assertEqual(response['exploration_id'], self.private_exp_id_1) self.logout() def test_moderator_can_save_public_exploration(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.published_exp_id_1) self.assertEqual(response['exploration_id'], self.published_exp_id_1) self.logout() def test_moderator_can_save_private_exploration(self): self.login(self.MODERATOR_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.private_exp_id_1) self.assertEqual(response['exploration_id'], self.private_exp_id_1) self.logout() def test_admin_can_save_private_exploration(self): self.login(self.CURRICULUM_ADMIN_EMAIL) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.private_exp_id_1) self.assertEqual(response['exploration_id'], self.private_exp_id_1) self.logout() def test_voice_artist_can_only_save_assigned_exploration(self): self.login(self.VOICE_ARTIST_EMAIL) # Checking voice artist can only save assigned public exploration. with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.published_exp_id_1) self.assertEqual(response['exploration_id'], self.published_exp_id_1) # Checking voice artist cannot save public exploration which he/she # is not assigned for. with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/%s' % self.published_exp_id_2, expected_status_int=401) self.logout() class OppiaMLAccessDecoratorTest(test_utils.GenericTestBase): """Tests for oppia_ml_access decorator.""" class MockHandler(base.OppiaMLVMHandler): REQUIRE_PAYLOAD_CSRF_CHECK = False GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'POST': { 'vm_id': { 'schema': { 'type': 'basestring' } }, 'message': { 'schema': { 'type': 'basestring' } }, 'signature': { 'schema': { 'type': 'basestring' } } } } def extract_request_message_vm_id_and_signature(self): """Returns message, vm_id and signature retrived from incoming request. Returns: tuple(str). Message at index 0, vm_id at index 1 and signature at index 2. """ signature = self.payload.get('signature') vm_id = self.payload.get('vm_id') message = self.payload.get('message') return classifier_domain.OppiaMLAuthInfo(message, vm_id, signature) @acl_decorators.is_from_oppia_ml def post(self): self.render_json({'job_id': 'new_job'}) def setUp(self): super(OppiaMLAccessDecoratorTest, self).setUp() self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/ml/nextjobhandler', self.MockHandler)], debug=feconf.DEBUG, )) def test_unauthorized_vm_cannot_fetch_jobs(self): payload = {} payload['vm_id'] = 'fake_vm' secret = 'fake_secret' payload['message'] = json.dumps('malicious message') payload['signature'] = classifier_services.generate_signature( secret.encode('utf-8'), payload['message'].encode('utf-8'), payload['vm_id']) with self.swap(self, 'testapp', self.mock_testapp): self.post_json( '/ml/nextjobhandler', payload, expected_status_int=401) def test_default_vm_id_raises_exception_in_prod_mode(self): payload = {} payload['vm_id'] = feconf.DEFAULT_VM_ID secret = feconf.DEFAULT_VM_SHARED_SECRET payload['message'] = json.dumps('malicious message') payload['signature'] = classifier_services.generate_signature( secret.encode('utf-8'), payload['message'].encode('utf-8'), payload['vm_id']) with self.swap(self, 'testapp', self.mock_testapp): with self.swap(constants, 'DEV_MODE', False): self.post_json( '/ml/nextjobhandler', payload, expected_status_int=401) def test_that_invalid_signature_raises_exception(self): payload = {} payload['vm_id'] = feconf.DEFAULT_VM_ID secret = feconf.DEFAULT_VM_SHARED_SECRET payload['message'] = json.dumps('malicious message') payload['signature'] = classifier_services.generate_signature( secret.encode('utf-8'), 'message'.encode('utf-8'), payload['vm_id']) with self.swap(self, 'testapp', self.mock_testapp): self.post_json( '/ml/nextjobhandler', payload, expected_status_int=401) def test_that_no_excpetion_is_raised_when_valid_vm_access(self): payload = {} payload['vm_id'] = feconf.DEFAULT_VM_ID secret = feconf.DEFAULT_VM_SHARED_SECRET payload['message'] = json.dumps('message') payload['signature'] = classifier_services.generate_signature( secret.encode('utf-8'), payload['message'].encode('utf-8'), payload['vm_id']) with self.swap(self, 'testapp', self.mock_testapp): json_response = self.post_json('/ml/nextjobhandler', payload) self.assertEqual(json_response['job_id'], 'new_job') class DecoratorForUpdatingSuggestionTests(test_utils.GenericTestBase): """Tests for can_update_suggestion decorator.""" curriculum_admin_username = 'adn' curriculum_admin_email = 'admin@example.com' author_username = 'author' author_email = 'author@example.com' hi_language_reviewer = 'reviewer1@example.com' en_language_reviewer = 'reviewer2@example.com' username = 'user' user_email = 'user@example.com' TARGET_TYPE = 'exploration' exploration_id = 'exp_id' target_version_id = 1 change_dict = { 'cmd': 'add_written_translation', 'content_id': 'content', 'language_code': 'hi', 'content_html': '<p>old content html</p>', 'state_name': 'State 1', 'translation_html': '<p>Translation for content.</p>', 'data_format': 'html' } class MockHandler(base.BaseHandler): GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = { 'suggestion_id': { 'schema': { 'type': 'basestring' } } } HANDLER_ARGS_SCHEMAS = {'GET': {}} @acl_decorators.can_update_suggestion def get(self, suggestion_id): self.render_json({'suggestion_id': suggestion_id}) def setUp(self): super(DecoratorForUpdatingSuggestionTests, self).setUp() self.signup(self.author_email, self.author_username) self.signup(self.user_email, self.username) self.signup(self.curriculum_admin_email, self.curriculum_admin_username) self.signup(self.hi_language_reviewer, 'reviewer1') self.signup(self.en_language_reviewer, 'reviewer2') self.author_id = self.get_user_id_from_email(self.author_email) self.admin_id = self.get_user_id_from_email(self.curriculum_admin_email) self.hi_language_reviewer_id = self.get_user_id_from_email( self.hi_language_reviewer) self.en_language_reviewer_id = self.get_user_id_from_email( self.en_language_reviewer) self.admin = user_services.get_user_actions_info(self.admin_id) self.author = user_services.get_user_actions_info(self.author_id) user_services.add_user_role( self.admin_id, feconf.ROLE_ID_CURRICULUM_ADMIN) user_services.allow_user_to_review_translation_in_language( self.hi_language_reviewer_id, 'hi') user_services.allow_user_to_review_translation_in_language( self.en_language_reviewer_id, 'en') user_services.allow_user_to_review_question( self.hi_language_reviewer_id) self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route('/mock/<suggestion_id>', self.MockHandler)], debug=feconf.DEBUG, )) exploration = ( self.save_new_linear_exp_with_state_names_and_interactions( self.exploration_id, self.author_id, [ 'State 1', 'State 2', 'State 3'], ['TextInput'], category='Algebra')) self.old_content = state_domain.SubtitledHtml( 'content', '<p>old content html</p>').to_dict() exploration.states['State 1'].update_content( state_domain.SubtitledHtml.from_dict(self.old_content)) exploration.states['State 2'].update_content( state_domain.SubtitledHtml.from_dict(self.old_content)) exploration.states['State 3'].update_content( state_domain.SubtitledHtml.from_dict(self.old_content)) exp_services._save_exploration(self.author_id, exploration, '', []) # pylint: disable=protected-access rights_manager.publish_exploration(self.author, self.exploration_id) self.new_content = state_domain.SubtitledHtml( 'content', '<p>new content html</p>').to_dict() self.resubmit_change_content = state_domain.SubtitledHtml( 'content', '<p>resubmit change content html</p>').to_dict() self.save_new_skill('skill_123', self.admin_id) add_question_change_dict = { 'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION, 'question_dict': { 'question_state_data': self._create_valid_question_data( 'default_state').to_dict(), 'language_code': 'en', 'question_state_data_schema_version': ( feconf.CURRENT_STATE_SCHEMA_VERSION), 'linked_skill_ids': ['skill_1'], 'inapplicable_skill_misconception_ids': ['skillid12345-1'] }, 'skill_id': 'skill_123', 'skill_difficulty': 0.3 } suggestion_services.create_suggestion( feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT, self.TARGET_TYPE, self.exploration_id, self.target_version_id, self.author_id, self.change_dict, '') suggestion_services.create_suggestion( feconf.SUGGESTION_TYPE_ADD_QUESTION, feconf.ENTITY_TYPE_SKILL, 'skill_123', feconf.CURRENT_STATE_SCHEMA_VERSION, self.author_id, add_question_change_dict, 'test description') suggestion_services.create_suggestion( feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT, feconf.ENTITY_TYPE_EXPLORATION, self.exploration_id, exploration.version, self.author_id, { 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'State 2', 'old_value': self.old_content, 'new_value': self.new_content }, 'change to state 1') translation_suggestions = suggestion_services.get_submitted_suggestions( self.author_id, feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT) question_suggestions = suggestion_services.get_submitted_suggestions( self.author_id, feconf.SUGGESTION_TYPE_ADD_QUESTION) edit_state_suggestions = suggestion_services.get_submitted_suggestions( self.author_id, feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT) self.assertEqual(len(translation_suggestions), 1) self.assertEqual(len(question_suggestions), 1) self.assertEqual(len(edit_state_suggestions), 1) translation_suggestion = translation_suggestions[0] question_suggestion = question_suggestions[0] edit_state_suggestion = edit_state_suggestions[0] self.translation_suggestion_id = translation_suggestion.suggestion_id self.question_suggestion_id = question_suggestion.suggestion_id self.edit_state_suggestion_id = edit_state_suggestion.suggestion_id def test_authors_cannot_update_suggestion_that_they_created(self): self.login(self.author_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock/%s' % self.translation_suggestion_id, expected_status_int=401) self.assertEqual( response['error'], 'The user, %s is not allowed to update self-created' 'suggestions.' % self.author_username) self.logout() def test_admin_can_update_any_given_translation_suggestion(self): self.login(self.curriculum_admin_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock/%s' % self.translation_suggestion_id) self.assertEqual( response['suggestion_id'], self.translation_suggestion_id) self.logout() def test_admin_can_update_any_given_question_suggestion(self): self.login(self.curriculum_admin_email) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.question_suggestion_id) self.assertEqual(response['suggestion_id'], self.question_suggestion_id) self.logout() def test_reviewer_can_update_translation_suggestion(self): self.login(self.hi_language_reviewer) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock/%s' % self.translation_suggestion_id) self.assertEqual( response['suggestion_id'], self.translation_suggestion_id) self.logout() def test_reviewer_can_update_question_suggestion(self): self.login(self.hi_language_reviewer) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json('/mock/%s' % self.question_suggestion_id) self.assertEqual( response['suggestion_id'], self.question_suggestion_id) self.logout() def test_guest_cannot_update_any_suggestion(self): with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock/%s' % self.translation_suggestion_id, expected_status_int=401) self.assertEqual( response['error'], 'You must be logged in to access this resource.') def test_reviewers_without_permission_cannot_update_any_suggestion(self): self.login(self.en_language_reviewer) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock/%s' % self.translation_suggestion_id, expected_status_int=401) self.assertEqual( response['error'], 'You are not allowed to update the suggestion.') self.logout() def test_suggestions_with_invalid_suggestion_id_cannot_be_updated(self): self.login(self.hi_language_reviewer) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock/%s' % 'suggestion-id', expected_status_int=400) self.assertEqual( response['error'], 'Invalid format for suggestion_id. ' + 'It must contain 3 parts separated by \'.\'') self.logout() def test_non_existent_suggestions_cannot_be_updated(self): self.login(self.hi_language_reviewer) with self.swap(self, 'testapp', self.mock_testapp): self.get_json( '/mock/%s' % 'exploration.exp1.' + 'WzE2MTc4NzExNzExNDEuOTE0XQ==WzQ5NTs', expected_status_int=404) self.logout() def test_not_allowed_suggestions_cannot_be_updated(self): self.login(self.en_language_reviewer) with self.swap(self, 'testapp', self.mock_testapp): response = self.get_json( '/mock/%s' % self.edit_state_suggestion_id, expected_status_int=400) self.assertEqual( response['error'], 'Invalid suggestion type.') self.logout() class OppiaAndroidDecoratorTest(test_utils.GenericTestBase): """Tests for is_from_oppia_android decorator.""" class MockHandler(base.BaseHandler): REQUIRE_PAYLOAD_CSRF_CHECK = False GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON URL_PATH_ARGS_SCHEMAS = {} HANDLER_ARGS_SCHEMAS = { 'POST': { 'report': { 'schema': { 'type': 'object_dict', 'object_class': ( app_feedback_report_domain.AppFeedbackReport) } } } } @acl_decorators.is_from_oppia_android def post(self): return self.render_json({}) REPORT_JSON = { 'platform_type': 'android', 'android_report_info_schema_version': 1, 'app_context': { 'entry_point': { 'entry_point_name': 'navigation_drawer', 'entry_point_exploration_id': None, 'entry_point_story_id': None, 'entry_point_topic_id': None, 'entry_point_subtopic_id': None, }, 'text_size': 'large_text_size', 'text_language_code': 'en', 'audio_language_code': 'en', 'only_allows_wifi_download_and_update': True, 'automatically_update_topics': False, 'account_is_profile_admin': False, 'event_logs': ['example', 'event'], 'logcat_logs': ['example', 'log'] }, 'device_context': { 'android_device_model': 'example_model', 'android_sdk_version': 23, 'build_fingerprint': 'example_fingerprint_id', 'network_type': 'wifi' }, 'report_submission_timestamp_sec': 1615519337, 'report_submission_utc_offset_hrs': 0, 'system_context': { 'platform_version': '0.1-alpha-abcdef1234', 'package_version_code': 1, 'android_device_country_locale_code': 'in', 'android_device_language_locale_code': 'en' }, 'user_supplied_feedback': { 'report_type': 'suggestion', 'category': 'language_suggestion', 'user_feedback_selected_items': [], 'user_feedback_other_text_input': 'french' } } ANDROID_APP_VERSION_NAME = '1.0.0-flavor-commithash' ANDROID_APP_VERSION_CODE = '2' def setUp(self): super(OppiaAndroidDecoratorTest, self).setUp() self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication( [webapp2.Route( '/appfeedbackreporthandler/incoming_android_report', self.MockHandler)], debug=feconf.DEBUG, )) def test_that_no_exception_is_raised_when_valid_oppia_android_headers(self): headers = { 'api_key': android_validation_constants.ANDROID_API_KEY, 'app_package_name': ( android_validation_constants.ANDROID_APP_PACKAGE_NAME), 'app_version_name': self.ANDROID_APP_VERSION_NAME, 'app_version_code': self.ANDROID_APP_VERSION_CODE } payload = {} payload['report'] = self.REPORT_JSON with self.swap(self, 'testapp', self.mock_testapp): self.post_json( '/appfeedbackreporthandler/incoming_android_report', payload, headers=headers) def test_invalid_api_key_raises_exception(self): invalid_headers = { 'api_key': 'bad_key', 'app_package_name': ( android_validation_constants.ANDROID_APP_PACKAGE_NAME), 'app_version_name': self.ANDROID_APP_VERSION_NAME, 'app_version_code': self.ANDROID_APP_VERSION_CODE } payload = {} payload['report'] = self.REPORT_JSON with self.swap(self, 'testapp', self.mock_testapp): self.post_json( '/appfeedbackreporthandler/incoming_android_report', payload, headers=invalid_headers, expected_status_int=401) def test_invalid_package_name_raises_exception(self): invalid_headers = { 'api_key': android_validation_constants.ANDROID_API_KEY, 'app_package_name': 'bad_package_name', 'app_version_name': self.ANDROID_APP_VERSION_NAME, 'app_version_code': self.ANDROID_APP_VERSION_CODE } payload = {} payload['report'] = self.REPORT_JSON with self.swap(self, 'testapp', self.mock_testapp): self.post_json( '/appfeedbackreporthandler/incoming_android_report', payload, headers=invalid_headers, expected_status_int=401) def test_invalid_version_name_raises_exception(self): invalid_headers = { 'api_key': android_validation_constants.ANDROID_API_KEY, 'app_package_name': ( android_validation_constants.ANDROID_APP_PACKAGE_NAME), 'app_version_name': 'bad_version_name', 'app_version_code': self.ANDROID_APP_VERSION_CODE } payload = {} payload['report'] = self.REPORT_JSON with self.swap(self, 'testapp', self.mock_testapp): self.post_json( '/appfeedbackreporthandler/incoming_android_report', payload, headers=invalid_headers, expected_status_int=401) def test_invalid_version_code_raises_exception(self): invalid_headers = { 'api_key': android_validation_constants.ANDROID_API_KEY, 'app_package_name': ( android_validation_constants.ANDROID_APP_PACKAGE_NAME), 'app_version_name': self.ANDROID_APP_VERSION_NAME, 'app_version_code': 'bad_version_code' } payload = {} payload['report'] = self.REPORT_JSON with self.swap(self, 'testapp', self.mock_testapp): self.post_json( '/appfeedbackreporthandler/incoming_android_report', payload, headers=invalid_headers, expected_status_int=401)
40.2051
134
0.65012
ace81d70db1c96576b1367c8ca45cd8a313e2029
2,533
py
Python
gamestonk_terminal/cryptocurrency/nft/nftcalendar_model.py
GarnixJu2015/GamestonkTerminal
ec400e46ddce4ac934af836b863528f14a13d865
[ "MIT" ]
1
2022-01-18T00:49:22.000Z
2022-01-18T00:49:22.000Z
gamestonk_terminal/cryptocurrency/nft/nftcalendar_model.py
rikokir/GamestonkTerminal
599fadd07b1fb1589c6bc09f1671cb3d9985697e
[ "MIT" ]
1
2022-01-15T01:24:24.000Z
2022-01-15T01:24:24.000Z
gamestonk_terminal/cryptocurrency/nft/nftcalendar_model.py
rikokir/GamestonkTerminal
599fadd07b1fb1589c6bc09f1671cb3d9985697e
[ "MIT" ]
1
2021-11-07T20:59:25.000Z
2021-11-07T20:59:25.000Z
""" nftcalendar.io Model """ __docformat__ = "numpy" import pandas as pd import requests from bs4 import BeautifulSoup from gamestonk_terminal.helper_funcs import get_user_agent def get_nft_drops(url: str) -> pd.DataFrame: """Get NFT drops [Source: nftcalendar.io] Parameters ------- url : str url to get NFT drops from Returns ------- pd.DataFrame NFT drops """ nft_calendar = BeautifulSoup( requests.get( url, headers={"User-Agent": get_user_agent()}, ).text, "lxml", ) drop_titles = list() for droptitle in nft_calendar.find_all("h2", {"class": "text-2xl"}): drop_titles.append(droptitle.text.strip()) drop_descriptions = list() for dropdesc in nft_calendar.find_all( "div", {"class": "pb-0 md:pb-2 text-normal text-black dark:text-yellow-50"} ): drop_descriptions.append(dropdesc.text.strip()) drop_dates = list() for dropdate in nft_calendar.find_all( "div", {"class": "py-2 text-normal text-black dark:text-yellow-50 md:text-lg"} ): drop_dates.append(dropdate.text.strip().replace("\n", " ")) base_url = "https://nftcalendar.io" drop_readmores = list() for readmore in nft_calendar.find_all( "div", {"class": "pt-4 pb-0 md:pt-2 md:pb-2 text-right md:text-left"} ): drop_readmores.append(base_url + readmore.find("a")["href"]) return pd.DataFrame( list(zip(drop_titles, drop_dates, drop_readmores, drop_descriptions)), columns=["Title", "Dates", "Link", "Description"], ) def get_nft_today_drops() -> pd.DataFrame: """Get NFT today drops [Source: nftcalendar.io] Returns ------- pd.DataFrame NFT drops """ return get_nft_drops("https://nftcalendar.io/") def get_nft_upcoming_drops() -> pd.DataFrame: """Get NFT upcoming drops [Source: nftcalendar.io] Returns ------- pd.DataFrame NFT drops """ return get_nft_drops("https://nftcalendar.io/events") def get_nft_ongoing_drops() -> pd.DataFrame: """Get NFT ongoing drops [Source: nftcalendar.io] Returns ------- pd.DataFrame NFT drops """ return get_nft_drops("https://nftcalendar.io/events/ongoing/") def get_nft_newest_drops() -> pd.DataFrame: """Get NFT newest drops [Source: nftcalendar.io] Returns ------- pd.DataFrame NFT drops """ return get_nft_drops("https://nftcalendar.io/events/newest/")
25.079208
86
0.621003
ace81dcfe288eba67647515ed83b993415c2febf
2,266
py
Python
python/modified/facedetect2.py
DiUS/Physiognomy
e3fea173e25e221ee38fe38b040b0c70e2583bfa
[ "MIT" ]
8
2015-09-13T08:09:57.000Z
2021-11-08T10:53:29.000Z
python/modified/facedetect2.py
DiUS/Physiognomy
e3fea173e25e221ee38fe38b040b0c70e2583bfa
[ "MIT" ]
null
null
null
python/modified/facedetect2.py
DiUS/Physiognomy
e3fea173e25e221ee38fe38b040b0c70e2583bfa
[ "MIT" ]
4
2017-10-11T07:39:17.000Z
2021-12-06T11:12:29.000Z
import numpy as np import cv2 import cv2.cv as cv from video import create_capture from common import clock, draw_str help_message = ''' USAGE: facedetect.py [--cascade <cascade_fn>] [--nested-cascade <cascade_fn>] [<video_source>] ''' def detect(img, cascade): rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), flags = cv.CV_HAAR_SCALE_IMAGE) if len(rects) == 0: return [] rects[:,2:] += rects[:,:2] return rects def draw_rects(img, rects, color): for x1, y1, x2, y2 in rects: cv2.rectangle(img, (x1, y1), (x2, y2), color, 2) if __name__ == '__main__': import sys, getopt print help_message args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade=']) try: video_src = video_src[0] except: video_src = 'synth:bg=../cpp/lena.jpg:noise=0.05' args = dict(args) cascade_fn = args.get('--cascade', "../../data/haarcascades/haarcascade_frontalface_alt.xml") nested_fn = args.get('--nested-cascade', "../../data/haarcascades/haarcascade_eye.xml") smiley_fn = args.get('--smile-cascade', "../../data/haarcascades/haarcascade_mcs_mouth.xml") cascade = cv2.CascadeClassifier(cascade_fn) nested = cv2.CascadeClassifier(nested_fn) smiley = cv2.CascadeClassifier(smiley_fn) cam = create_capture(video_src) while True: ret, img = cam.read() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gray = cv2.equalizeHist(gray) t = clock() rects = detect(gray, cascade) vis = img.copy() draw_rects(vis, rects, (0, 255, 0)) for x1, y1, x2, y2 in rects: roi = gray[y1:y2, x1:x2] vis_roi = vis[y1:y2, x1:x2] subrects = detect(roi.copy(), nested) draw_rects(vis_roi, subrects, (255, 0, 0)) y3 = y2/2 + y1/2 roi2 = gray[y3:y2, x1:x2] subrects2 = detect(roi.copy(), smiley) draw_rects(vis_roi, subrects2, (0, 0, 255)) dt = clock() - t draw_str(vis, (20, 20), 'time: %.1f ms' % (dt*1000)) cv2.imshow('facedetect', vis) if cv2.waitKey(5) == 27: break
34.333333
125
0.585172
ace81e77ae9536b1c539920e7bfe66adb703d0b2
2,484
py
Python
Source Code/Python API/set_burst.py
D-TACQ/acq400_lv
684e06a294ceb865511a7568e5038b209bdc3374
[ "MIT" ]
null
null
null
Source Code/Python API/set_burst.py
D-TACQ/acq400_lv
684e06a294ceb865511a7568e5038b209bdc3374
[ "MIT" ]
2
2018-04-23T16:37:19.000Z
2018-07-11T10:51:19.000Z
Source Code/Python API/set_burst.py
D-TACQ/acq400_lv
684e06a294ceb865511a7568e5038b209bdc3374
[ "MIT" ]
3
2018-04-20T11:53:29.000Z
2018-04-25T15:25:55.000Z
#!/usr/bin/env python """ set burst mode run_gpg.py [opts] uut """ import sys import acq400_hapi import argparse import re def configure_bm(args): uuts = [acq400_hapi.Acq400(u) for u in args.uuts] for u in uuts: u.s0.trace = args.trace u.s1.trace = args.trace u.s0.GPG_ENABLE = '0' # needed if running set.burst multiple times u.clear_counters() # makes COUNTERS opi easier to read u.s0.transient = 'POST={}'.format(args.post) u.s1.trg = args.trg u.s1.RGM = args.rgm u.s1.RGM_DX = args.dx u.s1.RGM_SENSE = args.sense u.s1.RTM_TRANSLEN = args.rtm_translen if args.rgm == 'RTM' else 0 u.s1.es_enable = args.es_enable u.s0.set_knob('SIG_SRC_TRG_1', 'GPG1' if args.gpg == 'on' and args.dx == 'd1' else 'STRIG') u.s0.set_knob('SIG_SRC_TRG_0', 'GPG0' if args.gpg == 'on' and args.dx == 'd0' else 'HDMI_TRG' if args.hdmi_slave == 'yes' else 'EXT') u.s0.set_arm = 1 for u in uuts: u.statmon.wait_armed() # warning: this is a RACE for the case of a free-running trigger and multiple UUTs if args.gpg == 'on': for u in uuts: u.s0.GPG_ENABLE = '1' if args.trg == '1,1,1': for u in uuts: u.s0.soft_trigger def run_main(): parser = argparse.ArgumentParser(description='set_burst mode') parser.add_argument('--rgm', default='RTM', type=str, help="mode RGM|RTM") parser.add_argument('--dx', default='d0', type=str, help='dx d0|d1|d2') parser.add_argument('--gpg', default='off', type=str, help='source from gpg on|off') parser.add_argument('--sense', default='rising', type=str, help='rising|falling') parser.add_argument('--rtm_translen', default=1234, type=int, help='transient length') parser.add_argument('--post', default=100000, type=int, help='shot length') parser.add_argument('--trg', default='1,0,1', type=str, help='shot trigger triplet') parser.add_argument('--hdmi_slave', default='no', type=str, help='no: use FPTRG, yes: use HDMI trg on d0') parser.add_argument('--es_enable', default=1, type=int, help='0 disables Event Signature') parser.add_argument('--trace', default=0, type=int, help='1: enable command trace') parser.add_argument('uuts', nargs='+', help="uut") configure_bm(parser.parse_args()) # execution starts here if __name__ == '__main__': run_main()
37.636364
141
0.623188
ace81eb32a0d08100791e34df3ab757ea7559c56
1,079
py
Python
bin/Mode.py
trevor-wu/e-mission-server
2e31986bd7c0faab7110b7eb69541b0b9eac62df
[ "BSD-3-Clause" ]
21
2015-02-09T00:35:17.000Z
2021-12-14T16:41:05.000Z
bin/Mode.py
trevor-wu/e-mission-server
2e31986bd7c0faab7110b7eb69541b0b9eac62df
[ "BSD-3-Clause" ]
672
2015-01-29T18:10:56.000Z
2022-03-24T13:04:51.000Z
bin/Mode.py
trevor-wu/e-mission-server
2e31986bd7c0faab7110b7eb69541b0b9eac62df
[ "BSD-3-Clause" ]
110
2015-01-29T18:11:10.000Z
2022-03-29T17:58:14.000Z
from __future__ import unicode_literals from __future__ import print_function from __future__ import division from __future__ import absolute_import from future import standard_library standard_library.install_aliases() from builtins import * __author__ = 'Yin' from pymongo import MongoClient from get_database import get_mode_db, get_section_db, get_trip_db Modes=get_mode_db() for items in Modes.find(): Modes.remove() modes_todo={ 'mode_id': 1,'mode_name':'walking'} Modes.insert(modes_todo) modes_todo={ 'mode_id': 2,'mode_name':'running'} Modes.insert(modes_todo) modes_todo={ 'mode_id': 3,'mode_name':'cycling'} Modes.insert(modes_todo) modes_todo={ 'mode_id': 4,'mode_name':'transport'} Modes.insert(modes_todo) modes_todo={ 'mode_id': 5,'mode_name':'bus'} Modes.insert(modes_todo) modes_todo={ 'mode_id': 6,'mode_name':'train'} Modes.insert(modes_todo) modes_todo={ 'mode_id': 7,'mode_name':'car'} Modes.insert(modes_todo) modes_todo={ 'mode_id': 8,'mode_name':'mixed'} Modes.insert(modes_todo) modes_todo={ 'mode_id': 9,'mode_name':'air'} Modes.insert(modes_todo)
32.69697
65
0.778499
ace81f66073648c6320135b2938db7e0d79984f6
804
py
Python
tests/counterfit/frameworks/textattack/test_textattack.py
sebszyller/counterfit
e3075b76e3616f2cf2e7767152d1be4852b4a213
[ "MIT" ]
495
2021-05-03T17:11:52.000Z
2022-03-31T19:22:40.000Z
tests/counterfit/frameworks/textattack/test_textattack.py
sebszyller/counterfit
e3075b76e3616f2cf2e7767152d1be4852b4a213
[ "MIT" ]
25
2021-05-06T00:07:19.000Z
2022-03-31T12:16:25.000Z
tests/counterfit/frameworks/textattack/test_textattack.py
sebszyller/counterfit
e3075b76e3616f2cf2e7767152d1be4852b4a213
[ "MIT" ]
79
2021-05-03T21:31:44.000Z
2022-03-13T14:42:08.000Z
import pytest from counterfit.core.state import CFState from counterfit.core.utils import set_id class TestTextAttackFramework: @pytest.fixture(params=["movie_reviews"]) def target(self, request): yield request.param @pytest.fixture(scope='function') def cfstate_state(self, target): cfstate = CFState.state() cfstate._init_state() cfstate.load_framework("textattack") cfstate.load_target(target) return cfstate def test_build_run(self, cfstate_state, target): scan_id = set_id() cfattack_id = cfstate_state.build_new_attack(target, "DeepWordBugGao2018", scan_id) assert cfattack_id is not None attack_complete = cfstate_state.run_attack(target, cfattack_id) assert attack_complete is True
29.777778
91
0.707711
ace81f6626f2acfe08ce489e88ea6fa35f04a8df
2,867
py
Python
brownie_tokens/template.py
samwerner/brownie-token-tester
a0887111ae131a220d8b4b5e71a676752938bf64
[ "MIT" ]
26
2020-12-12T18:29:43.000Z
2022-02-23T16:00:10.000Z
brownie_tokens/template.py
samwerner/brownie-token-tester
a0887111ae131a220d8b4b5e71a676752938bf64
[ "MIT" ]
2
2021-05-26T22:11:53.000Z
2022-02-14T17:42:33.000Z
brownie_tokens/template.py
samwerner/brownie-token-tester
a0887111ae131a220d8b4b5e71a676752938bf64
[ "MIT" ]
5
2021-01-22T17:58:44.000Z
2021-06-21T14:26:56.000Z
from brownie import Contract, compile_source from brownie.network.account import Account from pathlib import Path from typing import Dict, Union RETURN_TYPE: Dict = { True: " -> bool", False: " -> bool", None: "", } RETURN_STATEMENT: Dict = { True: "return True", False: "return False", None: "return", } FAIL_STATEMENT: Dict = { "revert": "raise", True: "return True", False: "return False", None: "return", } STRING_CONVERT: Dict = { "true": True, "false": False, "none": None, } with Path(__file__).parent.joinpath("token-template.vy").open() as fp: TEMPLATE = fp.read() def ERC20( name: str = "Test Token", symbol: str = "TST", decimals: int = 18, success: Union[bool, str, None] = True, fail: Union[bool, str, None] = "revert", deployer: Union[Account, str, None] = None, ) -> Contract: """ Deploy an ERC20 contract for testing purposes. Arguments --------- name : str, optional Full name of the token. symbol: str, optional Short symbol for the token. decimals : int, optional Number of token decimal places. success : bool, optional Value returned upon successful transfer or approval. fail : bool | str, optional Value or action upon failed transfer or approval. Use "revert" to make the transaction revert. deployer: Account | str, optional Address to deploy the contract from. Returns ------- Contract Deployed ERC20 contract """ # understand success and fail when given as strings if isinstance(success, str) and success.lower() in STRING_CONVERT: success = STRING_CONVERT[success.lower()] if isinstance(fail, str) and fail.lower() in STRING_CONVERT: fail = STRING_CONVERT[fail.lower()] if success not in RETURN_STATEMENT: valid_keys = [str(i) for i in RETURN_STATEMENT.keys()] raise ValueError(f"Invalid value for `success`, valid options are: {', '.join(valid_keys)}") if fail not in FAIL_STATEMENT: valid_keys = [str(i) for i in FAIL_STATEMENT.keys()] raise ValueError(f"Invalid value for `fail`, valid options are: {', '.join(valid_keys)}") if None in (fail, success) and fail is not success and fail != "revert": raise ValueError("Cannot use `None` for only one of `success` and `fail`.") source = TEMPLATE.format( return_type=RETURN_TYPE[success], return_statement=RETURN_STATEMENT[success], fail_statement=FAIL_STATEMENT[fail], ) contract = compile_source(source, vyper_version="0.2.12").Vyper if deployer is None: tx_params = {"from": "0x0000000000000000000000000000000000001337", "silent": True} else: tx_params = {"from": deployer} return contract.deploy(name, symbol, decimals, tx_params)
30.5
100
0.645274
ace820f40185e8e3a74c82b8f7a4f2ffd8cc0e06
1,098
py
Python
packages/jet_bridge_base/jet_bridge_base/views/external_auth/login.py
F2210/jet-bridge
72b1af5cd7df585a4026d65170d3607f8cdf6bea
[ "MIT" ]
1,247
2019-01-10T22:22:08.000Z
2022-03-29T20:54:32.000Z
packages/jet_bridge_base/jet_bridge_base/views/external_auth/login.py
F2210/jet-bridge
72b1af5cd7df585a4026d65170d3607f8cdf6bea
[ "MIT" ]
12
2019-03-15T20:06:14.000Z
2022-01-07T10:28:20.000Z
packages/jet_bridge_base/jet_bridge_base/views/external_auth/login.py
F2210/jet-bridge
72b1af5cd7df585a4026d65170d3607f8cdf6bea
[ "MIT" ]
130
2019-02-26T17:36:53.000Z
2022-03-17T22:46:27.000Z
from social_core.actions import do_auth from jet_bridge_base.configuration import configuration from jet_bridge_base.external_auth.mixin import ExternalAuthMixin from jet_bridge_base.views.base.api import BaseAPIView AUTH_URI_KEY = 'auth' PROJECT_KEY = 'project' REDIRECT_URI_KEY = 'redirect_uri' class ExternalAuthLoginView(ExternalAuthMixin, BaseAPIView): def get(self, request, *args, **kwargs): app = kwargs.get('app') return self._auth(request, app) def post(self, request, *args, **kwargs): app = kwargs.get('app') return self._auth(request, app) def _auth(self, request, app): auth_uri = request.get_argument('auth_uri', None) project = request.get_argument('project', None) redirect_uri = request.get_argument('redirect_uri', None) configuration.session_set(request, AUTH_URI_KEY, auth_uri) configuration.session_set(request, PROJECT_KEY, project) configuration.session_set(request, REDIRECT_URI_KEY, redirect_uri) self.init_auth(request, app) return do_auth(self.backend)
33.272727
74
0.724954
ace821426a77c83ec7764acdab842a9e128ca480
2,103
py
Python
tensorflow_datasets/scripts/documentation/build_api_docs.py
shubhamkumaR630/datasets
fe9ee91849cefed0953141ea3588f73b7def78fd
[ "Apache-2.0" ]
2
2022-02-14T09:51:39.000Z
2022-02-14T13:27:49.000Z
tensorflow_datasets/scripts/documentation/build_api_docs.py
shubhamkumaR630/datasets
fe9ee91849cefed0953141ea3588f73b7def78fd
[ "Apache-2.0" ]
null
null
null
tensorflow_datasets/scripts/documentation/build_api_docs.py
shubhamkumaR630/datasets
fe9ee91849cefed0953141ea3588f73b7def78fd
[ "Apache-2.0" ]
1
2020-12-13T22:11:33.000Z
2020-12-13T22:11:33.000Z
# coding=utf-8 # Copyright 2022 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """generates api_docs for tensorflow_datasets.""" import os from absl import app from absl import flags import tensorflow_datasets as tfds from tensorflow_datasets import testing from tensorflow_docs.api_generator import generate_lib FLAGS = flags.FLAGS flags.DEFINE_string("output_dir", "/tmp/datasets_api", "Where to output the docs") flags.DEFINE_string("code_url_prefix", "https://github.com/tensorflow/datasets/tree/master/tensorflow_datasets/", "The url prefix for links to code.") flags.DEFINE_bool("search_hints", True, "Include metadata search hints in the generated files") flags.DEFINE_string("site_path", "datasets/api_docs/python", "Path prefix in the _toc.yaml") def execute(output_dir, code_url_prefix, search_hints, site_path): """Builds API docs for tensorflow_datasets.""" # Internally, tfds.testing defaults to None. Fill it in here so that we get # documentation. tfds.testing = testing doc_generator = generate_lib.DocGenerator( root_title="TensorFlow Datasets", py_modules=[("tfds", tfds)], base_dir=os.path.dirname(tfds.__file__), search_hints=search_hints, code_url_prefix=code_url_prefix, site_path=site_path) doc_generator.build(output_dir) def main(unused_argv): execute(FLAGS.output_dir, FLAGS.code_url_prefix, FLAGS.search_hints, FLAGS.site_path) if __name__ == "__main__": app.run(main)
32.353846
94
0.727057
ace8218798c9c031a708f9278c89ce403a6c69b7
582
py
Python
monocliche/tests/actions/DrawCardActionTest.py
AndreaBiondaro/Monocliche
8b8a7a09ae7bd2acd32c6f124686b2dc3f21b6e3
[ "MIT" ]
1
2021-01-09T18:31:14.000Z
2021-01-09T18:31:14.000Z
monocliche/tests/actions/DrawCardActionTest.py
AndreaBiondaro/Monocliche
8b8a7a09ae7bd2acd32c6f124686b2dc3f21b6e3
[ "MIT" ]
12
2021-01-05T13:53:11.000Z
2021-02-09T20:01:40.000Z
monocliche/tests/actions/DrawCardActionTest.py
AndreaBiondaro/Monocliche
8b8a7a09ae7bd2acd32c6f124686b2dc3f21b6e3
[ "MIT" ]
1
2021-01-09T10:09:15.000Z
2021-01-09T10:09:15.000Z
import unittest from monocliche.src.Card import Card from monocliche.src.Deck import Deck from monocliche.src.actions.DrawCardAction import DrawCardAction class DrawCardActionTest(unittest.TestCase): def test_execute(self): cards = [Card('card1', '', None), Card('card2', '', None)] deck = Deck(cards) action = DrawCardAction(deck) card = action.execute(None) self.assertEqual('card1', card.title) card = action.execute(None) self.assertEqual('card2', card.title) if __name__ == '__main__': unittest.main()
21.555556
66
0.670103
ace822b9ccfaf4ddfbb047994528095e1d5eb98f
2,040
py
Python
pipeline.py
eunsu-park/DeepIRI
e81e69c679e86e2e265d06e9e8a41993321e2a2a
[ "MIT" ]
null
null
null
pipeline.py
eunsu-park/DeepIRI
e81e69c679e86e2e265d06e9e8a41993321e2a2a
[ "MIT" ]
null
null
null
pipeline.py
eunsu-park/DeepIRI
e81e69c679e86e2e265d06e9e8a41993321e2a2a
[ "MIT" ]
1
2021-07-26T07:57:23.000Z
2021-07-26T07:57:23.000Z
import torch from torch.utils import data import numpy as np import torchvision.transforms as T from glob import glob import os class Normalizer: def __call__(self, data): return (data/50.)-1. class DeNormalizer: def __call__(self, data): return (data+1.)*50. class SnapMaker: def __call__(self, data): tmp = (data+1.)*(255/2.) snap = np.clip(tmp, 0, 255).astype(np.uint8) return snap def get_transforms(): transforms = [] transforms.append(Normalizer()) transforms.append(T.ToTensor()) return T.Compose(transforms) class CustomDataset: def __init__(self, opt): self.is_train = opt.is_train if self.is_train == True : pattern_inp = os.path.join(opt.root_data, 'train', opt.name_inp, '*.npy') self.list_inp = sorted(glob(pattern_inp)) pattern_tar = os.path.join(opt.root_data, 'train', opt.name_tar, '*.npy') self.list_tar = sorted(glob(pattern_tar)) assert len(self.list_inp) == len(self.list_tar) self.nb_data = len(self.list_inp) else : pattern_inp = os.path.join(opt.root_data, 'test', opt.name_inp, '*.npy') self.list_inp = sorted(glob(pattern_inp)) self.nb_data = len(self.list_inp) self.norm = Normalizer() def __len__(self): return self.nb_data def __getitem__(self, idx): inp = np.load(self.list_inp[idx])[None,:,:] inp = self.norm(inp) inp = torch.from_numpy(inp) if self.is_train == True : tar = np.load(self.list_tar[idx])[None,:,:] tar = self.norm(tar) tar = torch.from_numpy(tar) else : tar = torch.zeros_like(inp, dtype=torch.float) return inp, tar def get_data_loader(opt): dataset = CustomDataset(opt) dataloader = data.DataLoader(dataset=dataset, batch_size=opt.batch_size, shuffle=opt.is_train, num_workers=opt.num_workers) return dataloader
32.380952
85
0.608824
ace822f6ea0bfa969517034ad2b723d9c529786f
1,611
py
Python
tests/API1/testutils.py
sdrees/param
7cb0678726f05f5c8470d29ad894c58757b2718e
[ "BSD-3-Clause" ]
90
2015-02-09T19:32:15.000Z
2018-12-29T17:07:17.000Z
tests/API1/testutils.py
sdrees/param
7cb0678726f05f5c8470d29ad894c58757b2718e
[ "BSD-3-Clause" ]
239
2015-01-14T11:32:47.000Z
2019-01-18T20:17:17.000Z
tests/API1/testutils.py
sdrees/param
7cb0678726f05f5c8470d29ad894c58757b2718e
[ "BSD-3-Clause" ]
21
2015-01-14T13:12:59.000Z
2018-09-18T02:28:32.000Z
import datetime as dt import param import pytest from param import guess_param_types try: import numpy as np except ImportError: np = None try: import pandas as pd except ImportError: pd = None now = dt.datetime.now() today = dt.date.today() guess_param_types_data = { 'Parameter': (param.Parameter(), param.Parameter), 'Date': (today, param.Date), 'Datetime': (now, param.Date), 'Boolean': (True, param.Boolean), 'Integer': (1, param.Integer), 'Number': (1.2, param.Number), 'String': ('test', param.String), 'Dict': (dict(a=1), param.Dict), 'NumericTuple': ((1, 2), param.NumericTuple), 'Tuple': (('a', 'b'), param.Tuple), 'DateRange': ((dt.date(2000, 1, 1), dt.date(2001, 1, 1)), param.DateRange), 'List': ([1, 2], param.List), 'Unsupported_None': (None, param.Parameter), } if np: guess_param_types_data.update({ 'Array':(np.ndarray([1, 2]), param.Array), }) if pd: guess_param_types_data.update({ 'DataFrame': (pd.DataFrame(data=dict(a=[1])), param.DataFrame), 'Series': (pd.Series([1, 2]), param.Series), }) @pytest.mark.parametrize('val,p', guess_param_types_data.values(), ids=guess_param_types_data.keys()) def test_guess_param_types(val, p): input = {'key': val} output = guess_param_types(**input) assert isinstance(output, dict) assert len(output) == 1 assert 'key' in output out_param = output['key'] assert isinstance(out_param, p) if not type(out_param) == param.Parameter: assert out_param.default is val assert out_param.constant
27.305085
101
0.637492
ace8250148e96c224c782a2849887c12595ae331
3,515
py
Python
projects/Helloworld/Helloworld/settings.py
Ankuraxz/Django-Practice
a2b302f8a99352da1d5e9ba028c0107b5dec2ca2
[ "MIT" ]
null
null
null
projects/Helloworld/Helloworld/settings.py
Ankuraxz/Django-Practice
a2b302f8a99352da1d5e9ba028c0107b5dec2ca2
[ "MIT" ]
null
null
null
projects/Helloworld/Helloworld/settings.py
Ankuraxz/Django-Practice
a2b302f8a99352da1d5e9ba028c0107b5dec2ca2
[ "MIT" ]
null
null
null
""" Django settings for Helloworld project. Generated by 'django-admin startproject' using Django 3.2.4. For more information on this file, see https://docs.djangoproject.com/en/3.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.2/ref/settings/ """ import os from pathlib import Path # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent print(BASE_DIR) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'django-insecure-$x9-@9o@sqqtbyciehaud*_co3lfzqqy)&+m7p^=(6l(iu=!07' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'adio_web.apps.AdioWebConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'Helloworld.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'Helloworld.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql', 'NAME': 'adio', 'USER': 'postgres', 'PASSWORD': 123, 'HOST': 'localhost' } } # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.2/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = [ os.path.join(BASE_DIR, 'static') ] STATIC_ROOT = os.path.join(BASE_DIR, 'assets') # Default primary key field type # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
27.038462
91
0.69872
ace8252d6aac558582a4a8df4398d1c8f682b3f9
538
py
Python
tools/ncbi_entrez_eutils/egquery.py
hexylena/tools-iuc
811337eaab815f54f0fd93a3dd23a1153993ea2a
[ "MIT" ]
null
null
null
tools/ncbi_entrez_eutils/egquery.py
hexylena/tools-iuc
811337eaab815f54f0fd93a3dd23a1153993ea2a
[ "MIT" ]
null
null
null
tools/ncbi_entrez_eutils/egquery.py
hexylena/tools-iuc
811337eaab815f54f0fd93a3dd23a1153993ea2a
[ "MIT" ]
null
null
null
#!/usr/bin/env python import argparse import eutils if __name__ == '__main__': parser = argparse.ArgumentParser(description='EGQuery', epilog='') parser.add_argument('term', help='Query') # parser.add_argument('--user_email', help="User email") parser.add_argument('--admin_email', help="Admin email") args = parser.parse_args() c = eutils.Client(user_email=args.user_email, admin_email=args.admin_email) payload = { 'term': args.term, } results = c.gquery(**payload) print results
25.619048
79
0.671004
ace825482ffa1a4ef398961e7714c28933827b98
5,053
py
Python
core/changelog.py
RPS1222/modmailbot
19aad963b8c1f51528f09d65d987ce85a4e75cbe
[ "MIT" ]
null
null
null
core/changelog.py
RPS1222/modmailbot
19aad963b8c1f51528f09d65d987ce85a4e75cbe
[ "MIT" ]
null
null
null
core/changelog.py
RPS1222/modmailbot
19aad963b8c1f51528f09d65d987ce85a4e75cbe
[ "MIT" ]
null
null
null
import re from typing import List from discord import Embed from core.models import getLogger from core.utils import truncate logger = getLogger(__name__) class Version: """ This class represents a single version of Modmail. Parameters ---------- bot : Bot The Modmail bot. version : str The version string (ie. "v2.12.0"). lines : str The lines of changelog messages for this version. Attributes ---------- bot : Bot The Modmail bot. version : str The version string (ie. "v2.12.0"). lines : str A list of lines of changelog messages for this version. fields : Dict[str, str] A dict of fields separated by "Fixed", "Changed", etc sections. description : str General description of the version. Class Attributes ---------------- ACTION_REGEX : str The regex used to parse the actions. DESCRIPTION_REGEX: str The regex used to parse the description. """ ACTION_REGEX = r"###\s*(.+?)\s*\n(.*?)(?=###\s*.+?|$)" DESCRIPTION_REGEX = r"^(.*?)(?=###\s*.+?|$)" def __init__(self, bot, branch: str, version: str, lines: str): self.bot = bot self.version = version.lstrip("vV") self.lines = lines.strip() self.fields = {} self.changelog_url = f"https://github.com/kyb3r/modmail/blob/{branch}/CHANGELOG.md" self.description = "" self.parse() def __repr__(self) -> str: return f'Version(v{self.version}, description="{self.description}")' def parse(self) -> None: """ Parse the lines and split them into `description` and `fields`. """ self.description = re.match(self.DESCRIPTION_REGEX, self.lines, re.DOTALL) self.description = ( self.description.group(1).strip() if self.description is not None else "" ) matches = re.finditer(self.ACTION_REGEX, self.lines, re.DOTALL) for m in matches: try: self.fields[m.group(1).strip()] = m.group(2).strip() except AttributeError: logger.error( "Something went wrong when parsing the changelog for version %s.", self.version, exc_info=True, ) @property def url(self) -> str: return f"{self.changelog_url}#v{self.version[::2]}" @property def embed(self) -> Embed: """ Embed: the formatted `Embed` of this `Version`. """ embed = Embed(color=self.bot.main_color, description=self.description) embed.set_author( name=f"v{self.version} - Changelog", icon_url=self.bot.user.avatar_url, url=self.url ) for name, value in self.fields.items(): embed.add_field(name=name, value=truncate(value, 1024)) embed.set_footer(text=f"Current version: v{self.bot.version}") embed.set_thumbnail(url=self.bot.user.avatar_url) return embed class Changelog: """ This class represents the complete changelog of Modmail. Parameters ---------- bot : Bot The Modmail bot. text : str The complete changelog text. Attributes ---------- bot : Bot The Modmail bot. text : str The complete changelog text. versions : List[Version] A list of `Version`'s within the changelog. Class Attributes ---------------- VERSION_REGEX : re.Pattern The regex used to parse the versions. """ VERSION_REGEX = re.compile( r"#\s*([vV]\d+\.\d+(?:\.\d+)?(?:-\w+?)?)\s+(.*?)(?=#\s*[vV]\d+\.\d+(?:\.\d+)(?:-\w+?)?|$)", flags=re.DOTALL, ) def __init__(self, bot, branch: str, text: str): self.bot = bot self.text = text logger.debug("Fetching changelog from GitHub.") self.versions = [Version(bot, branch, *m) for m in self.VERSION_REGEX.findall(text)] @property def latest_version(self) -> Version: """ Version: The latest `Version` of the `Changelog`. """ return self.versions[0] @property def embeds(self) -> List[Embed]: """ List[Embed]: A list of `Embed`'s for each of the `Version`. """ return [v.embed for v in self.versions] @classmethod async def from_url(cls, bot, url: str = "") -> "Changelog": """ Create a `Changelog` from a URL. Parameters ---------- bot : Bot The Modmail bot. url : str, optional The URL to the changelog. Returns ------- Changelog The newly created `Changelog` parsed from the `url`. """ branch = "master" if not bot.version.is_prerelease else "development" url = url or f"https://raw.githubusercontent.com/kyb3r/modmail/{branch}/CHANGELOG.md" async with await bot.session.get(url) as resp: return cls(bot, branch, await resp.text())
28.874286
99
0.563428
ace82631c7b5fe20f9b727448ddc56c4f457660d
4,608
py
Python
test/functional/p2p_invalid_block.py
codingbam/visioncoin
7be3fb2b7c0748b2c9b902a88356f21ffacac45c
[ "MIT" ]
null
null
null
test/functional/p2p_invalid_block.py
codingbam/visioncoin
7be3fb2b7c0748b2c9b902a88356f21ffacac45c
[ "MIT" ]
null
null
null
test/functional/p2p_invalid_block.py
codingbam/visioncoin
7be3fb2b7c0748b2c9b902a88356f21ffacac45c
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # Copyright (c) 2015-2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test node responses to invalid blocks. In this test we connect to one node over p2p, and test block requests: 1) Valid blocks should be requested and become chain tip. 2) Invalid block with duplicated transaction should be re-requested. 3) Invalid block with bad coinbase value should be rejected and not re-requested. """ import copy from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script from test_framework.messages import COIN from test_framework.mininode import P2PDataStore from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class InvalidBlockRequestTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.extra_args = [["-whitelist=127.0.0.1"]] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): # Add p2p connection to node0 node = self.nodes[0] # convenience reference to the node node.add_p2p_connection(P2PDataStore()) best_block = node.getblock(node.getbestblockhash()) tip = int(node.getbestblockhash(), 16) height = best_block["height"] + 1 block_time = best_block["time"] + 1 self.log.info("Create a new block with an anyone-can-spend coinbase") height = 1 block = create_block(tip, create_coinbase(height), block_time) block.nVersion = 0x20000000 block.solve() # Save the coinbase for later block1 = block tip = block.sha256 node.p2p.send_blocks_and_test([block1], node, success=True) self.log.info("Mature the block.") node.generate(100) best_block = node.getblock(node.getbestblockhash()) tip = int(node.getbestblockhash(), 16) height = best_block["height"] + 1 block_time = best_block["time"] + 1 # Use merkle-root malleability to generate an invalid block with # same blockheader. # Manufacture a block with 3 transactions (coinbase, spend of prior # coinbase, spend of that spend). Duplicate the 3rd transaction to # leave merkle root and blockheader unchanged but invalidate the block. self.log.info("Test merkle root malleability.") block2 = create_block(tip, create_coinbase(height), block_time) block2.nVersion = 0x20000000 block_time += 1 # b'0x51' is OP_TRUE tx1 = create_tx_with_script(block1.vtx[0], 0, script_sig=b'\x51', amount=50 * COIN) tx2 = create_tx_with_script(tx1, 0, script_sig=b'\x51', amount=50 * COIN) block2.vtx.extend([tx1, tx2]) block2.hashMerkleRoot = block2.calc_merkle_root() block2.rehash() block2.solve() orig_hash = block2.sha256 block2_orig = copy.deepcopy(block2) # Mutate block 2 block2.vtx.append(tx2) assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root()) assert_equal(orig_hash, block2.rehash()) assert block2_orig.vtx != block2.vtx node.p2p.send_blocks_and_test([block2], node, success=False, reject_code=16, reject_reason=b'bad-txns-duplicate') # Check transactions for duplicate inputs self.log.info("Test duplicate input block.") block2_orig.vtx[2].vin.append(block2_orig.vtx[2].vin[0]) block2_orig.vtx[2].rehash() block2_orig.hashMerkleRoot = block2_orig.calc_merkle_root() block2_orig.rehash() block2_orig.solve() node.p2p.send_blocks_and_test([block2_orig], node, success=False, reject_reason=b'bad-txns-inputs-duplicate') self.log.info("Test very broken block.") block3 = create_block(tip, create_coinbase(height), block_time) block3.nVersion = 0x20000000 block_time += 1 block3.vtx[0].vout[0].nValue = 100 * COIN # Too high! block3.vtx[0].sha256 = None block3.vtx[0].calc_sha256() block3.hashMerkleRoot = block3.calc_merkle_root() block3.rehash() block3.solve() node.p2p.send_blocks_and_test([block3], node, success=False, reject_code=16, reject_reason=b'bad-cb-amount') if __name__ == '__main__': InvalidBlockRequestTest().main()
40.069565
122
0.667535
ace8266072741b240691a7e639fb8a5bb4369c24
3,905
py
Python
util/divider/util/extract.py
JGU-VC/activation-pattern-analysis
14da42ad541ee4faf35d360a6e871fd44decd33d
[ "MIT" ]
null
null
null
util/divider/util/extract.py
JGU-VC/activation-pattern-analysis
14da42ad541ee4faf35d360a6e871fd44decd33d
[ "MIT" ]
null
null
null
util/divider/util/extract.py
JGU-VC/activation-pattern-analysis
14da42ad541ee4faf35d360a6e871fd44decd33d
[ "MIT" ]
null
null
null
import os import re import sys import json from tqdm import tqdm import pickle import numpy as np from pathlib import Path from os import path from subprocess import Popen, PIPE # helpers word_re = "([A-Za-z0-9\-,.:]+)" value_re = "[A-Za-z0-9\-]+="+word_re def compile_filename(re_str): return re.compile((re_str+".json").format(word=word_re,value=value_re)) def get_expname(fileglob): expname = fileglob.replace("*","").split("/")[-1] if expname.endswith("_"): expname = expname[:-1] return expname def get_data(files, name_re, name_match_fn, exclude_unfinished=True, cache=True): meta = {"files": []} data = {} plots = [] # filter .bin files files = list(filter(lambda f: not f.endswith(".bin"), files)) common_prefix = os.path.commonprefix(files) dest = path.dirname(common_prefix) bin_file = "%s.bin" % common_prefix if cache and path.exists(bin_file): with open(bin_file, 'rb') as f: meta, data = pickle.load(f) # query asks for subset if len(meta["files"]) > 0 and set(files) < set(meta["files"]): print("queried subset") diff = set(data.keys()) - set([f.split(os.path.sep)[-1] for f in files]) for file in diff: if file in meta["files"]: meta["files"].remove(file) if file in data: del data[file] # if not same list, recreate database elif meta["files"] != files: meta = {} meta["files"] = files for file in tqdm(files, desc="Reading Data from Files", dynamic_ncols=True): jq = lambda cmd: Popen("jq '%s' %s " % (cmd,file), shell=True, stdout=PIPE, stderr=PIPE).communicate()[0].decode('utf-8') jq_json = lambda cmd: json.loads(jq(cmd)) jq_array = lambda cmd: np.array(jq_json(cmd)) try: keys = jq_json('.jsons | keys') keys.remove('diffs') mode_data = re.compile(".*scalar2d-\[(\w+\|\w+)\].*").match(",".join(keys))[1] except: print("skipping file %s. (seems incomplete)" % file) continue if not plots: plots = keys # show possible plots if no argument given # if plot.startswith("key"): # print(jq(".jsons | keys")) # sys.exit(0) # get data try: train_acc = float(jq('.jsons["scalar-accuracy"].content.data[-1].y[-1]')) test_acc = float(jq('.jsons["scalar-test_acc_1"].content.data[-1].y[-1]')) except: train_acc = 0 test_acc = 0 if exclude_unfinished: continue train_H = jq_array('.jsons["scalar2d-['+mode_data+'] % max Entropy"].content.data[0].z | transpose | .[-1]') test_H = jq_array('.jsons["bar-(Test) % max Entropy"].content.data[0].y') # test_H = 0 d = { "test_acc": test_acc, "train_acc": train_acc, "train_H": train_H, "test_H": test_H, "mode_data": mode_data } for p in plots: if p == "settings": continue d[p] = jq_json(".jsons[\"%s\"].content.data[0]" % p) data[path.basename(str(file))] = d if os.path.isfile(bin_file): os.remove(bin_file) if cache: with open(bin_file, 'wb') as f: pickle.dump((meta,data),f,protocol=pickle.HIGHEST_PROTOCOL) # add measures for name, d in data.items(): name = name.replace("basicblock_","basicblock") m = name_re.match(name) name_match_fn(d,m) for d in data.values(): if "mode_data" not in d: d["mode_data"] = "tm|trd" return data
30.992063
133
0.528809
ace8275f1cbff7c86e9cf599ee59fcaafe125b95
1,603
py
Python
setup.py
themotleyfool/django-pymssql
610a97d0930a44a92358ee4acdc5d2d2f9619a77
[ "MIT" ]
null
null
null
setup.py
themotleyfool/django-pymssql
610a97d0930a44a92358ee4acdc5d2d2f9619a77
[ "MIT" ]
null
null
null
setup.py
themotleyfool/django-pymssql
610a97d0930a44a92358ee4acdc5d2d2f9619a77
[ "MIT" ]
null
null
null
import os import setuptools # Avoid polluting the .tar.gz with ._* files under Mac OS X os.putenv('COPYFILE_DISABLE', 'true') # Prevent distutils from complaining that a standard file wasn't found README = os.path.join(os.path.dirname(__file__), 'README') if not os.path.exists(README): os.symlink(README + '.rst', README) description = ('Django database backend for Microsoft SQL Server ' 'that works on non-Windows systems.') with open(README) as f: long_description = '\n\n'.join(f.read().split('\n\n')[1:]) setuptools.setup( name='django-pymssql', version='1.7.1', author='Aymeric Augustin', author_email='aymeric.augustin@m4x.org', url='https://github.com/aaugustin/django-pymssql', description=description, long_description=long_description, download_url='http://pypi.python.org/pypi/django-pymssql', packages=[ 'sqlserver_pymssql', ], install_requires=[ 'Django', 'django-mssql', 'pymssql', ], classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Framework :: Django', 'Framework :: Django :: 1.7', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', ], platforms='all', license='BSD', )
30.245283
70
0.624454
ace82a080ee813b391c0946efc92c91327e15da0
262
py
Python
clients-test-server/subapp/tests.py
kaleido-public/django-client-framework
cd755261e001a0d446a85407550648563511f61b
[ "MIT" ]
null
null
null
clients-test-server/subapp/tests.py
kaleido-public/django-client-framework
cd755261e001a0d446a85407550648563511f61b
[ "MIT" ]
3
2021-06-28T20:36:39.000Z
2021-11-11T02:12:35.000Z
clients-test-server/subapp/tests.py
kaleido-public/django-client-framework
cd755261e001a0d446a85407550648563511f61b
[ "MIT" ]
null
null
null
from django.test import TestCase from .models import Product class BasicTest(TestCase): def test_clear(self): Product.objects.create() resp = self.client.post("/subapp/clear") self.assertContains(resp, "Successfully deleted all.")
23.818182
62
0.698473
ace82a78855392a88df562975f6b26817d198a6c
10,337
py
Python
lib/net/POINT_model.py
lekooooook/POINT2-pytorch
c9f5fad59e2f7da2c169255de5a730d861a1a96e
[ "MIT" ]
2
2021-01-06T08:40:35.000Z
2021-03-01T15:34:43.000Z
lib/net/POINT_model.py
lekooooook/POINT2-pytorch
c9f5fad59e2f7da2c169255de5a730d861a1a96e
[ "MIT" ]
1
2021-07-23T09:14:29.000Z
2021-07-23T09:14:29.000Z
lib/net/POINT_model.py
lekooooook/POINT2-pytorch
c9f5fad59e2f7da2c169255de5a730d861a1a96e
[ "MIT" ]
null
null
null
import torch import torch.nn as nn import torch.nn.functional as F from torchviz import make_dot from kornia import SpatialSoftArgmax2d from .unet_model import UNet from .FE_layer import FE_layer import matplotlib.pyplot as plt import math from graphviz import Digraph from torch.autograd import Variable, Function def gaussian2d(mu, sigma, shape): (r, c), (sr, sc), (H, W) = mu, sigma, shape pi = torch.tensor(math.pi) rr = torch.arange(r - 3 * sr, r + 3 * sr + 1).float() cc = torch.arange(c - 3 * sc, c + 3 * sc + 1).float() rr = rr[(rr >= 0) & (rr < H)] cc = cc[(cc >= 0) & (cc < W)] gr = torch.exp(-0.5 * ((rr - r) / sr)**2) / (torch.sqrt(2 * pi) * sr) gc = torch.exp(-0.5 * ((cc - c) / sc)**2) / (torch.sqrt(2 * pi) * sc) g = torch.ger(gr, gc).view(-1) rr, cc = torch.meshgrid(rr.long(), cc.long()) rr = rr.contiguous().view(-1) cc = cc.contiguous().view(-1) return rr, cc, g class PNet(nn.Module): def __init__(self, device, n_channels=1, n_classes=64, bilinear=True, patch_neighbor_size=10): super(PNet, self).__init__() self.n_channels = n_channels self.n_classes = n_classes self.bilinear = bilinear self.patch_neighbor_size = patch_neighbor_size self.UNet = UNet(self.n_channels, self.n_classes, bilinear=True) self.FE_layer = FE_layer(patch_neighbor_size) self.padding = nn.ReplicationPad2d(patch_neighbor_size) # self.loss = nn.BCELoss() self.batchnorm = nn.BatchNorm2d(1) self.kernel_weight = nn.Parameter(torch.ones((1, self.n_classes, 2 * patch_neighbor_size + 1, 2 * patch_neighbor_size + 1))) self.softArgmax = SpatialSoftArgmax2d(temperature=10000, normalized_coordinates=False) self.optimizer = torch.optim.Adam([ {'params': self.UNet.parameters()}, {'params': self.kernel_weight} ], lr=0.001, weight_decay=1e-8) self.device = device def set_input(self, input1, input2, correspondence_2D): self.input_drr1 = input1 self.input_drr2 = input2 self.correspondence_2D = correspondence_2D self.batch_size = self.correspondence_2D.shape[0] self.point_num = self.correspondence_2D.shape[2] print(self.correspondence_2D.shape) def generate_score_map_gt(self, single_POI, size_H, size_W): # score_map_gt = torch.zeros((size_H, size_W)).cuda() # # score_map_gt = score_map_gt * (-10) # up_bound = single_POI[2] + self.patch_neighbor_size # low_bound = single_POI[2] - self.patch_neighbor_size # right_bound = single_POI[3] + self.patch_neighbor_size # left_bound = single_POI[3] - self.patch_neighbor_size # if up_bound >= size_H: # up_bound = size_H - 1 # if low_bound < 0: # low_bound = 0 # if right_bound >= size_W: # right_bound = size_W - 1 # if left_bound < 0: # left_bound = 0 # score_map_gt[low_bound : up_bound + 1, left_bound : right_bound + 1] = 1 # score_map_gt = score_map_gt.unsqueeze(0).unsqueeze(0) score_map_gt = torch.zeros((size_H, size_W)) rr, cc, g = gaussian2d([single_POI[2], single_POI[3]], [self.patch_neighbor_size, self.patch_neighbor_size], shape=score_map_gt.shape) score_map_gt[rr, cc] = torch.max(score_map_gt[rr, cc], g / g.max()) score_map_gt = score_map_gt.unsqueeze(0).unsqueeze(0) score_map_gt = score_map_gt.clone() score_map_gt = score_map_gt.to(device=self.device, dtype=torch.float32) # print("max, min:", torch.max(score_map_gt), torch.min(score_map_gt)) return score_map_gt def forward(self): self.feature_map1 = self.UNet(self.input_drr1) self.feature_map2 = self.UNet(self.input_drr2) self.i_size_H = self.input_drr1.shape[2] self.i_size_W = self.input_drr1.shape[3] self.f_size_H = self.feature_map1.shape[2] self.f_size_W = self.feature_map1.shape[3] self.factor_H = self.i_size_H / self.f_size_H self.factor_W = self.i_size_W / self.f_size_W score_map_total_list = [] score_map_gt_total_list = [] for batch_index in range(self.batch_size): score_map_per_batch_list = [] score_map_gt_per_batch_list = [] for point_index in range(self.point_num): drr_POI = self.correspondence_2D[batch_index][0][point_index][0 : 4].clone() drr_POI_reverse = torch.flip(drr_POI, dims=[0]) drr_POI[0] = torch.floor(drr_POI_reverse[2] / self.factor_H) # No need to multiply factor drr_POI[1] = torch.floor(drr_POI_reverse[3] / self.factor_W) # No need to multiply factor drr_POI[2] = torch.floor(drr_POI_reverse[0] / self.factor_H) # No need to multiply factor drr_POI[3] = torch.floor(drr_POI_reverse[1] / self.factor_W) # No need to multiply factor drr_POI = drr_POI.int() # extract feature feature_map1_divided = self.feature_map1[batch_index].unsqueeze(0) feature_kernel = torch.mul(self.FE_layer(feature_map1_divided, drr_POI), self.kernel_weight) # get score map feature_map2_divided = self.feature_map2[batch_index].unsqueeze(0) score_map = F.conv2d(feature_map2_divided, feature_kernel) upsample = nn.Upsample(scale_factor=(self.f_size_H / score_map.shape[2], self.f_size_W / score_map.shape[3]), mode='bilinear', align_corners=True) score_map = upsample(score_map) score_map = self.batchnorm(score_map) score_map_per_batch_list.append(score_map) # get ground truth score map score_map_gt = self.generate_score_map_gt(drr_POI, self.f_size_H, self.f_size_W) score_map_gt_per_batch_list.append(score_map_gt) # -------------- Show ---------------- # show_flag = 0 if show_flag == 1: # Soft Argmax max_index = self.softArgmax(score_map) max_index_flattened = max_index.view(-1) max_index_flattened = torch.flip(max_index_flattened, dims=[0]) score_map_squeezed = torch.squeeze(score_map) score_map_flattened = score_map_squeezed.view(-1) print("original eular distance:", (drr_POI[0] - drr_POI[2]) ** 2 + (drr_POI[1] - drr_POI[3]) ** 2) score_map_squeezed_show = score_map_squeezed.cpu().data.numpy() feature_map1_divided_show = torch.mean(feature_map1_divided, dim=1) feature_map1_divided_show = torch.squeeze(feature_map1_divided_show) feature_map1_divided_show = feature_map1_divided_show.cpu().data.numpy() feature_map2_divided_show = torch.mean(feature_map2_divided, dim=1) feature_map2_divided_show = torch.squeeze(feature_map2_divided_show) feature_map2_divided_show = feature_map2_divided_show.cpu().data.numpy() max_index_show = max_index_flattened.cpu().data.numpy() drr_POI_show = drr_POI.cpu().data.numpy() input1 = torch.squeeze(self.input_drr1[batch_index]) input1_show = input1.cpu().data.numpy() input2 = torch.squeeze(self.input_drr2[batch_index]) input2_show = input2.cpu().data.numpy() fig = plt.figure() plt.subplot(221) plt.imshow(input1_show, cmap='gray') plt.scatter([drr_POI_show[1]], [drr_POI_show[0]], marker='+') plt.title('DRR') plt.subplot(223) plt.imshow(input2_show, cmap='gray') # plt.scatter([drr_POI_show[3]], [drr_POI_show[2]], marker='x') plt.scatter([max_index_show[1]], [max_index_show[0]], marker='o', cmap='green') plt.title('x-ray') plt.subplot(222) plt.imshow(feature_map2_divided_show, cmap='gray') # plt.scatter([drr_POI_show[3]], [drr_POI_show[2]], marker='x') plt.title('feature map') plt.subplot(224) plt.imshow(score_map_squeezed_show, cmap='gray') plt.scatter([drr_POI_show[1]], [drr_POI_show[0]], marker='+', cmap='orange') # plt.scatter([drr_POI_show[3]], [drr_POI_show[2]], marker='x', cmap='orange') plt.scatter([max_index_show[1]], [max_index_show[0]], marker='o', cmap='green') plt.title('score map') fig.tight_layout() plt.show() score_map_per_batch = torch.cat(score_map_per_batch_list, dim=1) score_map_total_list.append(score_map_per_batch) score_map_gt_per_batch = torch.cat(score_map_gt_per_batch_list, dim=1) score_map_gt_total_list.append(score_map_gt_per_batch) self.score_map_total = torch.cat(score_map_total_list, dim=0) self.score_map_gt_total = torch.cat(score_map_gt_total_list, dim=0) # # Soft Argmax # self.soft_max_index = self.softArgmax(self.score_map_total) # print(self.soft_max_index.shape) return self.score_map_total, self.score_map_gt_total def backward_basic(self): # Sum self.loss_total = F.binary_cross_entropy_with_logits(self.score_map_total, self.score_map_gt_total, reduction='mean') # g = make_dot(self.loss_total) # g.view() print("loss:", self.loss_total) self.loss_total.backward() # print(self.UNet.up1.conv.double_conv[0].weight.grad) def optimize_parameters(self): # forward self() self.optimizer.zero_grad() self.backward_basic() nn.utils.clip_grad_value_(self.UNet.parameters(), 0.1) nn.utils.clip_grad_value_(self.kernel_weight, 0.1) self.optimizer.step()
48.303738
162
0.604624
ace82b8867b07e881105284fd79abac322527ff8
11,952
py
Python
tensorflow_datasets/image/abstract_reasoning.py
shashwat9kumar/datasets
99b055408025f8e934fcbb0fc054488aa087ebfb
[ "Apache-2.0" ]
1
2019-07-19T15:01:45.000Z
2019-07-19T15:01:45.000Z
tensorflow_datasets/image/abstract_reasoning.py
shashwat9kumar/datasets
99b055408025f8e934fcbb0fc054488aa087ebfb
[ "Apache-2.0" ]
null
null
null
tensorflow_datasets/image/abstract_reasoning.py
shashwat9kumar/datasets
99b055408025f8e934fcbb0fc054488aa087ebfb
[ "Apache-2.0" ]
1
2021-08-02T22:12:40.000Z
2021-08-02T22:12:40.000Z
# coding=utf-8 # Copyright 2021 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """AbstractReasoning data set.""" import os import random import numpy as np import six import tensorflow.compat.v2 as tf import tensorflow_datasets.public_api as tfds _CITATION = """\ @InProceedings{pmlr-v80-barrett18a, title = {Measuring abstract reasoning in neural networks}, author = {Barrett, David and Hill, Felix and Santoro, Adam and Morcos, Ari and Lillicrap, Timothy}, booktitle = {Proceedings of the 35th International Conference on Machine Learning}, pages = {511--520}, year = {2018}, editor = {Dy, Jennifer and Krause, Andreas}, volume = {80}, series = {Proceedings of Machine Learning Research}, address = {Stockholmsmassan, Stockholm Sweden}, month = {10--15 Jul}, publisher = {PMLR}, pdf = {http://proceedings.mlr.press/v80/barrett18a/barrett18a.pdf}, url = {http://proceedings.mlr.press/v80/barrett18a.html}, abstract = {Whether neural networks can learn abstract reasoning or whether\ they merely rely on superficial statistics is a topic of recent debate. Here, \ we propose a dataset and challenge designed to probe abstract reasoning, \ inspired by a well-known human IQ test. To succeed at this challenge, models \ must cope with various generalisation 'regimes' in which the training data and \ test questions differ in clearly-defined ways. We show that popular models \ such as ResNets perform poorly, even when the training and test sets differ \ only minimally, and we present a novel architecture, with structure designed \ to encourage reasoning, that does significantly better. When we vary the way \ in which the test questions and training data differ, we find that our model \ is notably proficient at certain forms of generalisation, but notably weak at \ others. We further show that the model's ability to generalise improves \ markedly if it is trained to predict symbolic explanations for its answers. \ Altogether, we introduce and explore ways to both measure and induce stronger \ abstract reasoning in neural networks. Our freely-available dataset should \ motivate further progress in this direction.} } """ _URL = ("https://github.com/deepmind/abstract-reasoning-matrices") _DESCRIPTION = """\ Procedurally Generated Matrices (PGM) data from the paper Measuring Abstract \ Reasoning in Neural Networks, Barrett, Hill, Santoro et al. 2018. The goal is \ to infer the correct answer from the context panels based on abstract \ reasoning. To use this data set, please download all the *.tar.gz files from the data set \ page and place them in ~/tensorflow_datasets/abstract_reasoning/. $R$ denotes the set of relation types (progression, XOR, OR, AND, \ consistent union), $O$ denotes the object types (shape, line), and $A$ denotes \ the attribute types (size, colour, position, number). The structure of a \ matrix, $S$, is the set of triples $S={[r, o, a]}$ that determine the \ challenge posed by a particular matrix. """ _DESCRIPTION_NEUTRAL = r"""The structures encoding the matrices in both the \ training and testing sets contain any triples $[r, o, a]$ for $r \\in R$, \ $o \\in O$, and $a \\in A$. Training and testing sets are disjoint, with \ separation occurring at the level of the input variables (i.e. pixel \ manifestations).""" _DESCRIPTION_INTERPOLATION = r"""As in the neutral split, $S$ consisted of any \ triples $[r, o, a]$. For interpolation, in the training set, when the \ attribute was "colour" or "size" (i.e., the ordered attributes), the values of \ the attributes were restricted to even-indexed members of a discrete set, \ whereas in the test set only odd-indexed values were permitted. Note that all \ $S$ contained some triple $[r, o, a]$ with the colour or size attribute . \ Thus, generalisation is required for every question in the test set.""" _DESCRIPTION_EXTRAPOLATION = r"""Same as in interpolation, but the values of \ the attributes were restricted to the lower half of the discrete set during \ training, whereas in the test set they took values in the upper half.""" _DESCRIPTION_ATTR_REL_PAIRS = r"""All $S$ contained at least two triples, \ $([r_1,o_1,a_1],[r_2,o_2,a_2]) = (t_1, t_2)$, of which 400 are viable. We \ randomly allocated 360 to the training set and 40 to the test set. Members \ $(t_1, t_2)$ of the 40 held-out pairs did not occur together in structures $S$ \ in the training set, and all structures $S$ had at least one such pair \ $(t_1, t_2)$ as a subset.""" _DESCRIPTION_ATTR_RELS = r"""In our dataset, there are 29 possible unique \ triples $[r,o,a]$. We allocated seven of these for the test set, at random, \ but such that each of the attributes was represented exactly once in this set. \ These held-out triples never occurred in questions in the training set, and \ every $S$ in the test set contained at least one of them.""" _DESCRIPTION_ATTR_PAIRS = r"""$S$ contained at least two triples. There are 20 \ (unordered) viable pairs of attributes $(a_1, a_2)$ such that for some \ $r_i, o_i, ([r_1,o_1,a_1],[r_2,o_2,a_2])$ is a viable triple pair \ $([r_1,o_1,a_1],[r_2,o_2,a_2]) = (t_1, t_2)$. We allocated 16 of these pairs \ for training and four for testing. For a pair $(a_1, a_2)$ in the test set, \ $S$ in the training set contained triples with $a_1$ or $a_2$. In the test \ set, all $S$ contained triples with $a_1$ and $a_2$.""" _DESCRIPTION_ATTR_SHAPE_COLOR = r"""Held-out attribute shape-colour. $S$ in \ the training set contained no triples with $o$=shape and $a$=colour. \ All structures governing puzzles in the test set contained at least one triple \ with $o$=shape and $a$=colour.""" _DESCRIPTION_ATTR_LINE_TYPE = r"""Held-out attribute line-type. $S$ in \ the training set contained no triples with $o$=line and $a$=type. \ All structures governing puzzles in the test set contained at least one triple \ with $o$=line and $a$=type.""" class AbstractReasoningConfig(tfds.core.BuilderConfig): """BuilderConfig for AbstractReasoning.""" def __init__(self, *, split_type="neutral", **kwargs): """BuilderConfig for AbstractReasoning. Args: split_type: String with split_type to use. Should be one of ["neutral", "interpolation", "extrapolation", "attr.rel.pairs", "attr.rels", "attrs.pairs", "attrs.shape.color", "attrs.line.type",]. **kwargs: keyword arguments forwarded to super. """ super(AbstractReasoningConfig, self).__init__( version=tfds.core.Version("1.0.0"), **kwargs, ) self.split_type = split_type class AbstractReasoning(tfds.core.BeamBasedBuilder): """Abstract reasoning dataset.""" MANUAL_DOWNLOAD_INSTRUCTIONS = """\ Data can be downloaded from https://console.cloud.google.com/storage/browser/ravens-matrices Please put all the tar.gz files in manual_dir. """ BUILDER_CONFIGS = [ AbstractReasoningConfig( name="neutral", description=_DESCRIPTION_NEUTRAL, ), AbstractReasoningConfig( name="interpolation", description=_DESCRIPTION_INTERPOLATION, split_type="interpolation", ), AbstractReasoningConfig( name="extrapolation", description=_DESCRIPTION_EXTRAPOLATION, split_type="extrapolation", ), AbstractReasoningConfig( name="attr.rel.pairs", description=_DESCRIPTION_ATTR_REL_PAIRS, split_type="attr.rel.pairs", ), AbstractReasoningConfig( name="attr.rels", description=_DESCRIPTION_ATTR_RELS, split_type="attr.rels", ), AbstractReasoningConfig( name="attrs.pairs", description=_DESCRIPTION_ATTR_PAIRS, split_type="attrs.pairs", ), AbstractReasoningConfig( name="attrs.shape.color", description=_DESCRIPTION_ATTR_SHAPE_COLOR, split_type="attrs.shape.color", ), AbstractReasoningConfig( name="attrs.line.type", description=_DESCRIPTION_ATTR_LINE_TYPE, split_type="attrs.line.type", ), ] def _info(self): return tfds.core.DatasetInfo( builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({ "context": tfds.features.Video(shape=(8, 160, 160, 1)), "answers": tfds.features.Video(shape=(8, 160, 160, 1)), "target": tfds.features.ClassLabel(num_classes=8), "meta_target": tfds.features.Tensor(shape=[12], dtype=tf.int64), "relation_structure_encoded": tfds.features.Tensor(shape=[4, 12], dtype=tf.int64), "filename": tfds.features.Text(), }), homepage=_URL, citation=_CITATION, ) def _split_generators(self, dl_manager): path = dl_manager.manual_dir return [ tfds.core.SplitGenerator( name=tfds.Split.TRAIN, gen_kwargs={ "folder": path, "split": "train", }), tfds.core.SplitGenerator( name=tfds.Split.VALIDATION, gen_kwargs={ "folder": path, "split": "val", }), tfds.core.SplitGenerator( name=tfds.Split.TEST, gen_kwargs={ "folder": path, "split": "test", }), ] def _build_pcollection(self, pipeline, folder, split): """Generate examples as dicts.""" beam = tfds.core.lazy_imports.apache_beam split_type = self.builder_config.split_type filename = os.path.join(folder, "{}.tar.gz".format(split_type)) def _extract_data(inputs): """Extracts files from the tar archives.""" filename, split = inputs for name, fobj in tfds.download.iter_archive( filename, tfds.download.ExtractMethod.TAR_STREAM): split_name = name.split("_") if len(split_name) > 2 and split_name[2] == split: yield [name, fobj.read()] def _process_example(inputs): filename, data_string = inputs buf = six.BytesIO(data_string) buf.seek(0) data = np.load(buf) # Extract the images and convert to uint8. The reshape is required, see # https://github.com/deepmind/abstract-reasoning-matrices. all_images = np.uint8(data["image"].reshape(16, 160, 160, 1)) return filename, { "relation_structure_encoded": data["relation_structure_encoded"], "target": data["target"], "meta_target": data["meta_target"], "context": all_images[:8], "answers": all_images[8:], "filename": filename, } # Beam might fuse together the _extract_data and _process_example which # defeats the purpose of parallel processing. As a result, we reshard by # doing a GroupByKey on random keys, and then flattening again. def _add_random_keys(inputs): key = str(random.randrange(10**10)) return key, inputs def _remove_keys(inputs): _, rows = inputs for row in rows: yield row return (pipeline | beam.Create([(filename, split)]) | beam.FlatMap(_extract_data) | beam.Map(_add_random_keys) | beam.GroupByKey() | beam.FlatMap(_remove_keys) | beam.Map(_process_example))
40.931507
103
0.67746
ace82c44b84f04977e3d298347a1c6268c1b9bcb
1,138
py
Python
py3/IMGURdl/downloadIMGUR.py
samcheck/Scripts
283c62242ffe0dc9812a800e16419b998cf00af5
[ "MIT" ]
1
2017-03-28T00:28:55.000Z
2017-03-28T00:28:55.000Z
py3/IMGURdl/downloadIMGUR.py
samcheck/Scripts
283c62242ffe0dc9812a800e16419b998cf00af5
[ "MIT" ]
null
null
null
py3/IMGURdl/downloadIMGUR.py
samcheck/Scripts
283c62242ffe0dc9812a800e16419b998cf00af5
[ "MIT" ]
null
null
null
# example from: # https://www.toptal.com/python/beginners-guide-to-concurrency-and-parallelism-in-python import json import logging import os from pathlib import Path from urllib.request import urlopen, Request import requests logger = logging.getLogger(__name__) def get_links(client_id): headers = {'Authorization': 'Client-ID {}'.format(client_id)} url = 'https://api.imgur.com/3/gallery/random/random/' resp = requests.get(url, headers=headers) resp.raise_for_status() data = resp.json() # req = Request('https://api.imgur.com/3/gallery/random/random/', headers=headers, method='GET') # with urlopen(req) as resp: # data = json.loads(resp.read().decode('utf-8')) return map(lambda item: item['link'], data['data']) def download_link(directory, link): logger.info('Downloading %s', link) download_path = directory / os.path.basename(link) with urlopen(link) as image, download_path.open('wb') as f: f.write(image.read()) def setup_download_dir(): download_dir = Path('images') if not download_dir.exists(): download_dir.mkdir() return download_dir
30.756757
100
0.696837
ace82cbf420fe26ee812be6374dd4bc6ce4a6a1b
1,349
py
Python
tests/storage/test_txn_limit.py
mlakkadshaw/synapse
74a2365bd5066955567cc551e72632d6cece94b9
[ "Apache-2.0" ]
9,945
2015-01-02T07:41:06.000Z
2022-03-31T23:22:42.000Z
tests/storage/test_txn_limit.py
t2bot/synapse
62ca554ef09330cb88d46fca8296a859d0adc143
[ "Apache-2.0" ]
9,320
2015-01-08T14:09:03.000Z
2022-03-31T21:11:24.000Z
tests/storage/test_txn_limit.py
t2bot/synapse
62ca554ef09330cb88d46fca8296a859d0adc143
[ "Apache-2.0" ]
2,299
2015-01-31T22:16:29.000Z
2022-03-31T06:08:26.000Z
# Copyright 2014-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from tests import unittest class SQLTransactionLimitTestCase(unittest.HomeserverTestCase): """Test SQL transaction limit doesn't break transactions.""" def make_homeserver(self, reactor, clock): return self.setup_test_homeserver(db_txn_limit=1000) def test_config(self): db_config = self.hs.config.database.get_single_database() self.assertEqual(db_config.config["txn_limit"], 1000) def test_select(self): def do_select(txn): txn.execute("SELECT 1") db_pool = self.hs.get_datastores().databases[0] # force txn limit to roll over at least once for _ in range(0, 1001): self.get_success_or_raise(db_pool.runInteraction("test_select", do_select))
36.459459
87
0.72424