content
stringlengths
27
928k
path
stringlengths
4
230
size
int64
27
928k
nl_text
stringlengths
21
396k
nl_size
int64
21
396k
nl_language
stringlengths
2
3
nl_language_score
float64
0.04
1
# Copyright (C) 2020-2021 Intel Corporation # # SPDX-License-Identifier: MIT import csv import os import os.path as osp from datumaro.components.annotation import ( AnnotationType, Bbox, Label, LabelCategories, Points, ) from datumaro.components.converter import Converter from datumaro.components.extractor import DatasetItem, Extractor, Importer from datumaro.util.image import find_images class VggFace2Path: ANNOTATION_DIR = "bb_landmark" IMAGE_EXT = '.jpg' BBOXES_FILE = 'loose_bb_' LANDMARKS_FILE = 'loose_landmark_' LABELS_FILE = 'labels.txt' IMAGES_DIR_NO_LABEL = 'no_label' class VggFace2Extractor(Extractor): def __init__(self, path): subset = None if osp.isdir(path): self._path = path elif osp.isfile(path): subset = osp.splitext(osp.basename(path).split('_')[2])[0] self._path = osp.dirname(path) else: raise Exception("Can't read annotations from '%s'" % path) annotation_files = [p for p in os.listdir(self._path) if (osp.basename(p).startswith(VggFace2Path.BBOXES_FILE) or \ osp.basename(p).startswith(VggFace2Path.LANDMARKS_FILE)) and \ p.endswith('.csv')] if len(annotation_files) < 1: raise Exception("Can't find annotations in the directory '%s'" % path) super().__init__() self._dataset_dir = osp.dirname(self._path) self._subsets = {subset} if subset else set( osp.splitext(f.split('_')[2])[0] for f in annotation_files ) self._categories = {} self._items = [] self._load_categories() for subset in self._subsets: self._items.extend(list(self._load_items(subset).values())) def __iter__(self): return iter(self._items) def categories(self): return self._categories def _load_categories(self): label_cat = LabelCategories() path = osp.join(self._dataset_dir, VggFace2Path.LABELS_FILE) if osp.isfile(path): with open(path, encoding='utf-8') as labels_file: lines = [s.strip() for s in labels_file] for line in lines: objects = line.split() label = objects[0] class_name = None if 1 < len(objects): class_name = objects[1] label_cat.add(label, parent=class_name) else: for subset in self._subsets: subset_path = osp.join(self._dataset_dir, subset) if osp.isdir(subset_path): for images_dir in sorted(os.listdir(subset_path)): if osp.isdir(osp.join(subset_path, images_dir)) and \ images_dir != VggFace2Path.IMAGES_DIR_NO_LABEL: label_cat.add(images_dir) self._categories[AnnotationType.label] = label_cat def _load_items(self, subset): def _get_label(path): label_name = path.split('/')[0] label = None if label_name != VggFace2Path.IMAGES_DIR_NO_LABEL: label = \ self._categories[AnnotationType.label].find(label_name)[0] return label items = {} image_dir = osp.join(self._dataset_dir, subset) if osp.isdir(image_dir): images = { osp.splitext(osp.relpath(p, image_dir))[0].replace('\\', '/'): p for p in find_images(image_dir, recursive=True) } else: images = {} landmarks_path = osp.join(self._dataset_dir, VggFace2Path.ANNOTATION_DIR, VggFace2Path.LANDMARKS_FILE + subset + '.csv') if osp.isfile(landmarks_path): with open(landmarks_path, encoding='utf-8') as content: landmarks_table = list(csv.DictReader(content)) for row in landmarks_table: item_id = row['NAME_ID'] label = None if '/' in item_id: label = _get_label(item_id) if item_id not in items: items[item_id] = DatasetItem(id=item_id, subset=subset, image=images.get(row['NAME_ID'])) annotations = items[item_id].annotations if [a for a in annotations if a.type == AnnotationType.points]: raise Exception("Item %s: an image can have only one " "set of landmarks" % item_id) if len([p for p in row if row[p] == '']) == 0 and len(row) == 11: annotations.append(Points( [float(row[p]) for p in row if p != 'NAME_ID'], label=label) ) elif label is not None: annotations.append(Label(label=label)) bboxes_path = osp.join(self._dataset_dir, VggFace2Path.ANNOTATION_DIR, VggFace2Path.BBOXES_FILE + subset + '.csv') if osp.isfile(bboxes_path): with open(bboxes_path, encoding='utf-8') as content: bboxes_table = list(csv.DictReader(content)) for row in bboxes_table: item_id = row['NAME_ID'] label = None if '/' in item_id: label = _get_label(item_id) if item_id not in items: items[item_id] = DatasetItem(id=item_id, subset=subset, image=images.get(row['NAME_ID'])) annotations = items[item_id].annotations if [a for a in annotations if a.type == AnnotationType.bbox]: raise Exception("Item %s: an image can have only one " "bbox" % item_id) if len([p for p in row if row[p] == '']) == 0 and len(row) == 5: annotations.append(Bbox(float(row['X']), float(row['Y']), float(row['W']), float(row['H']), label=label)) return items class VggFace2Importer(Importer): @classmethod def find_sources(cls, path): if osp.isdir(path): annotation_dir = osp.join(path, VggFace2Path.ANNOTATION_DIR) if osp.isdir(annotation_dir): return [{ 'url': annotation_dir, 'format': VggFace2Extractor.NAME, }] elif osp.isfile(path): if (osp.basename(path).startswith(VggFace2Path.LANDMARKS_FILE) or \ osp.basename(path).startswith(VggFace2Path.BBOXES_FILE)) and \ path.endswith('.csv'): return [{ 'url': path, 'format': VggFace2Extractor.NAME, }] return [] class VggFace2Converter(Converter): DEFAULT_IMAGE_EXT = VggFace2Path.IMAGE_EXT def apply(self): def _get_name_id(item_parts, label_name): if 1 < len(item_parts) and item_parts[0] == label_name: return '/'.join([label_name, *item_parts[1:]]) else: return '/'.join([label_name, *item_parts]) save_dir = self._save_dir os.makedirs(save_dir, exist_ok=True) labels_path = osp.join(save_dir, VggFace2Path.LABELS_FILE) labels_file = '' for label in self._extractor.categories()[AnnotationType.label]: labels_file += '%s' % label.name if label.parent: labels_file += ' %s' % label.parent labels_file += '\n' with open(labels_path, 'w', encoding='utf-8') as f: f.write(labels_file) label_categories = self._extractor.categories()[AnnotationType.label] for subset_name, subset in self._extractor.subsets().items(): bboxes_table = [] landmarks_table = [] for item in subset: item_parts = item.id.split('/') if item.has_image and self._save_images: labels = set(p.label for p in item.annotations if getattr(p, 'label') != None) if labels: for label in labels: image_dir = label_categories[label].name if 1 < len(item_parts) and image_dir == item_parts[0]: image_dir = '' self._save_image(item, subdir=osp.join(subset_name, image_dir)) else: image_dir = VggFace2Path.IMAGES_DIR_NO_LABEL if 1 < len(item_parts) and image_dir == item_parts[0]: image_dir = '' self._save_image(item, subdir=osp.join(subset_name, image_dir)) landmarks = [a for a in item.annotations if a.type == AnnotationType.points] if 1 < len(landmarks): raise Exception("Item (%s, %s): an image can have only one " "set of landmarks" % (item.id, item.subset)) if landmarks: if landmarks[0].label is not None and \ label_categories[landmarks[0].label].name: name_id = _get_name_id(item_parts, label_categories[landmarks[0].label].name) else: name_id = _get_name_id(item_parts, VggFace2Path.IMAGES_DIR_NO_LABEL) points = landmarks[0].points if len(points) != 10: landmarks_table.append({'NAME_ID': name_id}) else: landmarks_table.append({'NAME_ID': name_id, 'P1X': points[0], 'P1Y': points[1], 'P2X': points[2], 'P2Y': points[3], 'P3X': points[4], 'P3Y': points[5], 'P4X': points[6], 'P4Y': points[7], 'P5X': points[8], 'P5Y': points[9]}) bboxes = [a for a in item.annotations if a.type == AnnotationType.bbox] if 1 < len(bboxes): raise Exception("Item (%s, %s): an image can have only one " "bbox" % (item.id, item.subset)) if bboxes: if bboxes[0].label is not None and \ label_categories[bboxes[0].label].name: name_id = _get_name_id(item_parts, label_categories[bboxes[0].label].name) else: name_id = _get_name_id(item_parts, VggFace2Path.IMAGES_DIR_NO_LABEL) bboxes_table.append({'NAME_ID': name_id, 'X': bboxes[0].x, 'Y': bboxes[0].y, 'W': bboxes[0].w, 'H': bboxes[0].h}) labels = [a for a in item.annotations if a.type == AnnotationType.label] for label in labels: if label.label is not None and \ label_categories[label.label].name: name_id = _get_name_id(item_parts, label_categories[labels[0].label].name) else: name_id = _get_name_id(item_parts, VggFace2Path.IMAGES_DIR_NO_LABEL) landmarks_table.append({'NAME_ID': name_id}) if not landmarks and not bboxes and not labels: landmarks_table.append({'NAME_ID': _get_name_id(item_parts, VggFace2Path.IMAGES_DIR_NO_LABEL)}) landmarks_path = osp.join(save_dir, VggFace2Path.ANNOTATION_DIR, VggFace2Path.LANDMARKS_FILE + subset_name + '.csv') os.makedirs(osp.dirname(landmarks_path), exist_ok=True) with open(landmarks_path, 'w', encoding='utf-8', newline='') as file: columns = ['NAME_ID', 'P1X', 'P1Y', 'P2X', 'P2Y', 'P3X', 'P3Y', 'P4X', 'P4Y', 'P5X', 'P5Y'] writer = csv.DictWriter(file, fieldnames=columns) writer.writeheader() writer.writerows(landmarks_table) if bboxes_table: bboxes_path = osp.join(save_dir, VggFace2Path.ANNOTATION_DIR, VggFace2Path.BBOXES_FILE + subset_name + '.csv') os.makedirs(osp.dirname(bboxes_path), exist_ok=True) with open(bboxes_path, 'w', encoding='utf-8', newline='') as file: columns = ['NAME_ID', 'X', 'Y', 'W', 'H'] writer = csv.DictWriter(file, fieldnames=columns) writer.writeheader() writer.writerows(bboxes_table)
datumaro/plugins/vgg_face2_format.py
13,023
Copyright (C) 2020-2021 Intel Corporation SPDX-License-Identifier: MIT
70
de
0.432609
# Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import argparse from mo.graph.graph import Graph from mo.pipeline.common import get_ir_version from mo.utils import class_registration def unified_pipeline(argv: argparse.Namespace): graph = Graph(cmd_params=argv, name=argv.model_name, ir_version=get_ir_version(argv)) class_registration.apply_replacements(graph, [ class_registration.ClassType.LOADER, class_registration.ClassType.FRONT_REPLACER, class_registration.ClassType.MIDDLE_REPLACER, class_registration.ClassType.BACK_REPLACER ]) return graph
model-optimizer/mo/pipeline/unified.py
637
Copyright (C) 2018-2021 Intel Corporation SPDX-License-Identifier: Apache-2.0
77
en
0.245491
from pyFilter.py_filter import PyFilter if __name__ == "__main__": p = PyFilter() try: p.run() except KeyboardInterrupt: print("\nClosing PyFilter") finally: p.make_persistent(loop=False) # Save any outstanding bans without the constant loop if p.settings["database"] == "sqlite": p.database_connection.sqlite_connection.close() print("Closed sqlite connection")
run.py
437
Save any outstanding bans without the constant loop
51
en
0.741078
import codecs import json from tqdm import tqdm import copy submit_result2 = [] with codecs.open('dialog_chinese-macbert.txt', mode='r', encoding='utf8') as f: reader = f.readlines(f) data_list = [] for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)): dialogue_ = json.loads(dialogue_) submit_result2.append(dialogue_) submit_result4 = [] with codecs.open('macbert2-f-f.txt', mode='r', encoding='utf8') as f: reader = f.readlines(f) data_list = [] for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)): dialogue_ = json.loads(dialogue_) submit_result4.append(dialogue_) submit_result3 = [] with codecs.open('macbert2-f.txt', mode='r', encoding='utf8') as f: reader = f.readlines(f) data_list = [] for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)): dialogue_ = json.loads(dialogue_) submit_result3.append(dialogue_) submit_result5 = [] with codecs.open('mcbert.txt', mode='r', encoding='utf8') as f: reader = f.readlines(f) data_list = [] for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)): dialogue_ = json.loads(dialogue_) submit_result5.append(dialogue_) submit_result6 = [] with codecs.open('medbert.txt', mode='r', encoding='utf8') as f: reader = f.readlines(f) data_list = [] for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)): dialogue_ = json.loads(dialogue_) submit_result6.append(dialogue_) submit_result = [] with codecs.open('macbert2-f.txt', mode='r', encoding='utf8') as f: reader = f.readlines(f) data_list = [] for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)): dialogue_ = json.loads(dialogue_) for content_idx_, contents_ in enumerate(dialogue_['dialog_info']): terms_ = contents_['ner'] if len(terms_) != 0: idx_ = 0 for _ner_idx, term_ in enumerate(terms_): if dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阳性' and dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result3[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']: dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result3[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阴性' and dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result3[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']: dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result3[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result2[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']: if submit_result2[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '不标注': dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result2[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阳性': if submit_result2[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '其他': dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result2[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result4[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']: if dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阴性': if submit_result4[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '不标注': dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result4[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result5[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']: if dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阴性': if submit_result5[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '不标注': dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result5[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] # elif submit_result5[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '其他': # dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result5[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result6[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']: if dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阳性': if submit_result6[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '其他': dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result6[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] submit_result.append(dialogue_) with open('./result.txt', 'w', encoding='utf-8') as output_data: for json_content in submit_result: output_data.write(json.dumps(json_content, ensure_ascii=False) + '\n')
predict/ensemble.py
6,187
elif submit_result5[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '其他': dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result5[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
275
en
0.103931
import pandas as pd import numpy as np df = pd.DataFrame(np.random.randn(10,3),columns=['a','b','c'],index=list('abcdefghij')) print(df) df.ix[::2,0] = np.nan; df.ix[::4,1] = np.nan; df.ix[::3,2] = np.nan; df = df.dropna(subset=['a','b']) #mid delete rows where df['htm3']==na bins = np.arange(-3,3,0.1) bins = [-100,0,100] indices = np.digitize(df.a,bins) ''' bins代表若干连续的区间0:[-1,2),1:[2,7),2:[7,9),3:[9,10),用数组表示为:[-1,2,7,9,10] np.digitize()函数生成一列数,对应位置的值表示参数一对应值在bins中所属区段的编号。 ''' groups = df.groupby(indices) print('#'*20) for i,group in groups: print(i,len(group)) print(group) print('#'*20) print(groups.mean())
BasicOperations/05_Pandas/05_Pandas_02_groupby.py
726
mid delete rows where df['htm3']==na
36
en
0.675062
"""All functions return a Component so you can easily pipe or compose them. There are two types of functions: - decorators: return the original component - containers: return a new component """ from functools import lru_cache import numpy as np from omegaconf import OmegaConf from pydantic import validate_arguments from gdsfactory.cell import cell from gdsfactory.component import Component from gdsfactory.components.text_rectangular import text_rectangular_multi_layer from gdsfactory.functools_ import partial from gdsfactory.port import auto_rename_ports from gdsfactory.types import ( Anchor, Axis, ComponentFactory, ComponentOrFactory, Float2, Layer, List, Optional, Strs, ) cache = lru_cache(maxsize=None) def add_port(component: Component, **kwargs) -> Component: """Return Component with a new port.""" component.add_port(**kwargs) return component @cell def add_text( component: ComponentOrFactory, text: str = "", text_offset: Float2 = (0, 0), text_anchor: Anchor = "cc", text_factory: ComponentFactory = text_rectangular_multi_layer, ) -> Component: """Return component inside a new component with text geometry. Args: component: text: text string. text_offset: relative to component anchor. Defaults to center (cc). text_anchor: relative to component (ce cw nc ne nw sc se sw center cc). text_factory: function to add text labels. """ component = component() if callable(component) else component component_new = Component() component_new.component = component ref = component_new.add_ref(component) t = component_new << text_factory(text) t.move((np.array(text_offset) + getattr(ref.size_info, text_anchor))) component_new.add_ports(ref.ports) component_new.copy_child_info(component) return component_new def add_texts( components: List[ComponentOrFactory], prefix: str = "", index0: int = 0, **kwargs, ) -> List[Component]: """Return a list of Component with text labels. Args: components: list of components prefix: Optional prefix for the labels index0: defaults to 0 (0, for first component, 1 for second ...) keyword Args: text_offset: relative to component size info anchor. Defaults to center. text_anchor: relative to component (ce cw nc ne nw sc se sw center cc). text_factory: function to add text labels. """ return [ add_text(component, text=f"{prefix}{i+index0}", **kwargs) for i, component in enumerate(components) ] @cell def rotate( component: ComponentOrFactory, angle: float = 90, ) -> Component: """Return rotated component inside a new component. Most times you just need to place a reference and rotate it. This rotate function just encapsulates the rotated reference into a new component. Args: component: angle: in degrees """ component = component() if callable(component) else component component_new = Component() component_new.component = component ref = component_new.add_ref(component) ref.rotate(angle) component_new.add_ports(ref.ports) component_new.copy_child_info(component) return component_new rotate90 = partial(rotate, angle=90) rotate90n = partial(rotate, angle=-90) rotate180 = partial(rotate, angle=180) @cell def mirror(component: Component, p1: Float2 = (0, 1), p2: Float2 = (0, 0)) -> Component: """Return new Component with a mirrored reference. Args: p1: first point to define mirror axis p2: second point to define mirror axis """ component_new = Component() component_new.component = component ref = component_new.add_ref(component) ref.mirror(p1=p1, p2=p2) component_new.add_ports(ref.ports) component_new.copy_child_info(component) return component_new @cell def move( component: Component, origin=(0, 0), destination=None, axis: Optional[Axis] = None, ) -> Component: """Return new Component with a moved reference to the original component. Args: origin: of component destination: axis: x or y axis """ component_new = Component() component_new.component = component ref = component_new.add_ref(component) ref.move(origin=origin, destination=destination, axis=axis) component_new.add_ports(ref.ports) component_new.copy_child_info(component) return component_new def move_port_to_zero(component: Component, port_name: str = "o1"): """Return a container that contains a reference to the original component. where the new component has port_name in (0, 0) """ if port_name not in component.ports: raise ValueError( f"port_name = {port_name!r} not in {list(component.ports.keys())}" ) return move(component, -component.ports[port_name].midpoint) def update_info(component: Component, **kwargs) -> Component: """Return Component with updated info.""" component.info.update(**kwargs) return component @validate_arguments def add_settings_label( component: Component, layer_label: Layer = (66, 0), settings: Optional[Strs] = None ) -> Component: """Add a settings label to a component. Args: component: layer_label: settings: tuple or list of settings. if None, adds all changed settings """ d = ( {setting: component.get_setting(setting) for setting in settings} if settings else component.info.changed ) component.add_label(text=OmegaConf.to_yaml(d), layer=layer_label) return component __all__ = ( "add_port", "add_text", "add_settings_label", "auto_rename_ports", "cache", "mirror", "move", "move_port_to_zero", "rotate", "update_info", ) if __name__ == "__main__": import gdsfactory as gf c = gf.components.mmi1x2( length_mmi=10, decorator=gf.partial(add_settings_label, settings=["name", "length_mmi"]), ) # c.show() cr = c.rotate() cr.pprint() cr.show() # cm = move(c, destination=(20, 20)) # cm.show() # cm = mirror(c) # cm.show() # cm = c.mirror() # cm.show() # cm2 = move_port_to_zero(cm) # cm2.show() # cm3 = add_text(c, "hi") # cm3.show() # cr = rotate(component=c) # cr.show() # print(component_rotated) # component_rotated.pprint # component_netlist = component.get_netlist() # component.pprint_netlist()
gdsfactory/functions.py
6,594
Return Component with a new port. Add a settings label to a component. Args: component: layer_label: settings: tuple or list of settings. if None, adds all changed settings Return component inside a new component with text geometry. Args: component: text: text string. text_offset: relative to component anchor. Defaults to center (cc). text_anchor: relative to component (ce cw nc ne nw sc se sw center cc). text_factory: function to add text labels. Return a list of Component with text labels. Args: components: list of components prefix: Optional prefix for the labels index0: defaults to 0 (0, for first component, 1 for second ...) keyword Args: text_offset: relative to component size info anchor. Defaults to center. text_anchor: relative to component (ce cw nc ne nw sc se sw center cc). text_factory: function to add text labels. Return new Component with a mirrored reference. Args: p1: first point to define mirror axis p2: second point to define mirror axis Return new Component with a moved reference to the original component. Args: origin: of component destination: axis: x or y axis Return a container that contains a reference to the original component. where the new component has port_name in (0, 0) Return rotated component inside a new component. Most times you just need to place a reference and rotate it. This rotate function just encapsulates the rotated reference into a new component. Args: component: angle: in degrees Return Component with updated info. All functions return a Component so you can easily pipe or compose them. There are two types of functions: - decorators: return the original component - containers: return a new component c.show() cm = move(c, destination=(20, 20)) cm.show() cm = mirror(c) cm.show() cm = c.mirror() cm.show() cm2 = move_port_to_zero(cm) cm2.show() cm3 = add_text(c, "hi") cm3.show() cr = rotate(component=c) cr.show() print(component_rotated) component_rotated.pprint component_netlist = component.get_netlist() component.pprint_netlist()
2,102
en
0.625287
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # @Time : 2019/2/19 11:06 # @User : zhunishengrikuaile # @File : TrainTicket.py # @Email : binary@shujian.org # @MyBlog : WWW.SHUJIAN.ORG # @NetName : 書劍 # @Software: 百度识图Api封装 # 火车票识别 import os import base64 import requests from bin.AccessToken.AccessToken import AccessToken from config.config import LOCALHOST_PATH, URL_LIST_URL ACCESS_TOKEN = AccessToken().getToken()['access_token'] TRAIN_TICKET_URL = URL_LIST_URL['TRAIN_TICKET'] + '?access_token={}'.format(ACCESS_TOKEN) class TrainTicketSuper(object): pass class TrainTicket(TrainTicketSuper): ''' 异步接口获取ID @image ''' def __init__(self, image=None): self.HEADER = { 'Content-Type': 'application/x-www-form-urlencoded', } self.IMAGE_CONFIG = { } if image is not None: imagepath = os.path.exists(LOCALHOST_PATH['PATH'] + image) if imagepath == True: images = LOCALHOST_PATH['PATH'] + image with open(images, 'rb') as images: self.IMAGE_CONFIG['image'] = base64.b64encode(images.read()) def postTrainTicket(self): if self.IMAGE_CONFIG.get('image', None) == None: return 'image参数不能为空!' trainTicket = requests.post(url=TRAIN_TICKET_URL, headers=self.HEADER, data=self.IMAGE_CONFIG) return trainTicket.json()
utils/BaiduTextApi/BaiduTextApi/bin/TrainTicket/TrainTicket.py
1,509
异步接口获取ID @image !/usr/bin/env python3 -*- coding: utf-8 -*- @Time : 2019/2/19 11:06 @User : zhunishengrikuaile @File : TrainTicket.py @Email : binary@shujian.org @MyBlog : WWW.SHUJIAN.ORG @NetName : 書劍 @Software: 百度识图Api封装 火车票识别
241
fr
0.180231
# Authors: Alexandre Gramfort <gramfort@nmr.mgh.harvard.edu> # Matti Hamalainen <msh@nmr.mgh.harvard.edu> # # License: BSD (3-clause) from warnings import warn from copy import deepcopy import os.path as op import numpy as np from scipy import linalg from ..externals.six import BytesIO from datetime import datetime as dt from .open import fiff_open from .tree import dir_tree_find, copy_tree from .constants import FIFF from .tag import read_tag from .proj import read_proj, write_proj from .ctf import read_ctf_comp, write_ctf_comp from .channels import read_bad_channels from .write import (start_file, end_file, start_block, end_block, write_string, write_dig_point, write_float, write_int, write_coord_trans, write_ch_info, write_name_list, write_julian) from ..utils import logger, verbose def _summarize_str(st): """Aux function""" return st[:56][::-1].split(',', 1)[-1][::-1] + ', ...' class Info(dict): """ Info class to nicely represent info dicts """ def __repr__(self): """Summarize info instead of printing all""" strs = ['<Info | %s non-empty fields'] non_empty = 0 for k, v in self.items(): if k in ['bads', 'ch_names']: entr = (', '.join(b for ii, b in enumerate(v) if ii < 10) if v else '0 items') if len(entr) >= 56: # get rid of of half printed ch names entr = _summarize_str(entr) elif k == 'filename' and v: path, fname = op.split(v) entr = path[:10] + '.../' + fname elif k == 'projs' and v: entr = ', '.join(p['desc'] + ': o%s' % {0: 'ff', 1: 'n'}[p['active']] for p in v) if len(entr) >= 56: entr = _summarize_str(entr) elif k == 'meas_date' and np.iterable(v): # first entire in meas_date is meaningful entr = dt.fromtimestamp(v[0]).strftime('%Y-%m-%d %H:%M:%S') else: this_len = (len(v) if hasattr(v, '__len__') else ('%s' % v if v is not None else None)) entr = (('%d items' % this_len) if isinstance(this_len, int) else ('%s' % this_len if this_len else '')) if entr: non_empty += 1 entr = ' | ' + entr strs.append('%s : %s%s' % (k, str(type(v))[7:-2], entr)) strs_non_empty = sorted(s for s in strs if '|' in s) strs_empty = sorted(s for s in strs if '|' not in s) st = '\n '.join(strs_non_empty + strs_empty) st += '\n>' st %= non_empty return st def _anonymize(self): if self.get('subject_info') is not None: del self['subject_info'] def read_fiducials(fname): """Read fiducials from a fiff file Returns ------- pts : list of dicts List of digitizer points (each point in a dict). coord_frame : int The coordinate frame of the points (one of mne.fiff.FIFF.FIFFV_COORD_...) """ fid, tree, _ = fiff_open(fname) with fid: isotrak = dir_tree_find(tree, FIFF.FIFFB_ISOTRAK) isotrak = isotrak[0] pts = [] coord_frame = FIFF.FIFFV_COORD_UNKNOWN for k in range(isotrak['nent']): kind = isotrak['directory'][k].kind pos = isotrak['directory'][k].pos if kind == FIFF.FIFF_DIG_POINT: tag = read_tag(fid, pos) pts.append(tag.data) elif kind == FIFF.FIFF_MNE_COORD_FRAME: tag = read_tag(fid, pos) coord_frame = tag.data[0] if coord_frame == FIFF.FIFFV_COORD_UNKNOWN: err = ("No coordinate frame was found in the file %r, it is probably " "not a valid fiducials file." % fname) raise ValueError(err) # coord_frame is not stored in the tag for pt in pts: pt['coord_frame'] = coord_frame return pts, coord_frame def write_fiducials(fname, pts, coord_frame=0): """Write fiducials to a fiff file Parameters ---------- fname : str Destination file name. pts : iterator of dict Iterator through digitizer points. Each point is a dictionary with the keys 'kind', 'ident' and 'r'. coord_frame : int The coordinate frame of the points (one of mne.fiff.FIFF.FIFFV_COORD_...) """ pts_frames = set((pt.get('coord_frame', coord_frame) for pt in pts)) bad_frames = pts_frames - set((coord_frame,)) if len(bad_frames) > 0: err = ("Points have coord_frame entries that are incompatible with " "coord_frame=%i: %s." % (coord_frame, str(tuple(bad_frames)))) raise ValueError(err) fid = start_file(fname) start_block(fid, FIFF.FIFFB_ISOTRAK) write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, coord_frame) for pt in pts: write_dig_point(fid, pt) end_block(fid, FIFF.FIFFB_ISOTRAK) end_file(fid) @verbose def read_info(fname, verbose=None): """Read measurement info from a file Parameters ---------- fname : str File name. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- info : instance of mne.fiff.meas_info.Info Info on dataset. """ f, tree, _ = fiff_open(fname) with f as fid: info = read_meas_info(fid, tree)[0] return info @verbose def read_meas_info(fid, tree, verbose=None): """Read the measurement info Parameters ---------- fid : file Open file descriptor. tree : tree FIF tree structure. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- info : instance of mne.fiff.meas_info.Info Info on dataset. meas : dict Node in tree that contains the info. """ # Find the desired blocks meas = dir_tree_find(tree, FIFF.FIFFB_MEAS) if len(meas) == 0: raise ValueError('Could not find measurement data') if len(meas) > 1: raise ValueError('Cannot read more that 1 measurement data') meas = meas[0] meas_info = dir_tree_find(meas, FIFF.FIFFB_MEAS_INFO) if len(meas_info) == 0: raise ValueError('Could not find measurement info') if len(meas_info) > 1: raise ValueError('Cannot read more that 1 measurement info') meas_info = meas_info[0] # Read measurement info dev_head_t = None ctf_head_t = None meas_date = None highpass = None lowpass = None nchan = None sfreq = None chs = [] experimenter = None description = None proj_id = None proj_name = None line_freq = None p = 0 for k in range(meas_info['nent']): kind = meas_info['directory'][k].kind pos = meas_info['directory'][k].pos if kind == FIFF.FIFF_NCHAN: tag = read_tag(fid, pos) nchan = int(tag.data) elif kind == FIFF.FIFF_SFREQ: tag = read_tag(fid, pos) sfreq = float(tag.data) elif kind == FIFF.FIFF_CH_INFO: tag = read_tag(fid, pos) chs.append(tag.data) p += 1 elif kind == FIFF.FIFF_LOWPASS: tag = read_tag(fid, pos) lowpass = float(tag.data) elif kind == FIFF.FIFF_HIGHPASS: tag = read_tag(fid, pos) highpass = float(tag.data) elif kind == FIFF.FIFF_MEAS_DATE: tag = read_tag(fid, pos) meas_date = tag.data elif kind == FIFF.FIFF_COORD_TRANS: tag = read_tag(fid, pos) cand = tag.data if cand['from'] == FIFF.FIFFV_COORD_DEVICE and \ cand['to'] == FIFF.FIFFV_COORD_HEAD: dev_head_t = cand elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and \ cand['to'] == FIFF.FIFFV_COORD_HEAD: ctf_head_t = cand elif kind == FIFF.FIFF_EXPERIMENTER: tag = read_tag(fid, pos) experimenter = tag.data elif kind == FIFF.FIFF_DESCRIPTION: tag = read_tag(fid, pos) description = tag.data elif kind == FIFF.FIFF_PROJ_ID: tag = read_tag(fid, pos) proj_id = tag.data elif kind == FIFF.FIFF_PROJ_NAME: tag = read_tag(fid, pos) proj_name = tag.data elif kind == FIFF.FIFF_LINE_FREQ: tag = read_tag(fid, pos) line_freq = float(tag.data) # Check that we have everything we need if nchan is None: raise ValueError('Number of channels in not defined') if sfreq is None: raise ValueError('Sampling frequency is not defined') if len(chs) == 0: raise ValueError('Channel information not defined') if len(chs) != nchan: raise ValueError('Incorrect number of channel definitions found') if dev_head_t is None or ctf_head_t is None: hpi_result = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT) if len(hpi_result) == 1: hpi_result = hpi_result[0] for k in range(hpi_result['nent']): kind = hpi_result['directory'][k].kind pos = hpi_result['directory'][k].pos if kind == FIFF.FIFF_COORD_TRANS: tag = read_tag(fid, pos) cand = tag.data if cand['from'] == FIFF.FIFFV_COORD_DEVICE and \ cand['to'] == FIFF.FIFFV_COORD_HEAD: dev_head_t = cand elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and \ cand['to'] == FIFF.FIFFV_COORD_HEAD: ctf_head_t = cand # Locate the Polhemus data isotrak = dir_tree_find(meas_info, FIFF.FIFFB_ISOTRAK) dig = None if len(isotrak) == 0: logger.info('Isotrak not found') elif len(isotrak) > 1: warn('Multiple Isotrak found') else: isotrak = isotrak[0] dig = [] for k in range(isotrak['nent']): kind = isotrak['directory'][k].kind pos = isotrak['directory'][k].pos if kind == FIFF.FIFF_DIG_POINT: tag = read_tag(fid, pos) dig.append(tag.data) dig[-1]['coord_frame'] = FIFF.FIFFV_COORD_HEAD # Locate the acquisition information acqpars = dir_tree_find(meas_info, FIFF.FIFFB_DACQ_PARS) acq_pars = None acq_stim = None if len(acqpars) == 1: acqpars = acqpars[0] for k in range(acqpars['nent']): kind = acqpars['directory'][k].kind pos = acqpars['directory'][k].pos if kind == FIFF.FIFF_DACQ_PARS: tag = read_tag(fid, pos) acq_pars = tag.data elif kind == FIFF.FIFF_DACQ_STIM: tag = read_tag(fid, pos) acq_stim = tag.data # Load the SSP data projs = read_proj(fid, meas_info) # Load the CTF compensation data comps = read_ctf_comp(fid, meas_info, chs) # Load the bad channel list bads = read_bad_channels(fid, meas_info) # # Put the data together # if tree['id'] is not None: info = Info(file_id=tree['id']) else: info = Info(file_id=None) subject_info = dir_tree_find(meas_info, FIFF.FIFFB_SUBJECT) if len(subject_info) == 1: subject_info = subject_info[0] si = dict() for k in range(subject_info['nent']): kind = subject_info['directory'][k].kind pos = subject_info['directory'][k].pos if kind == FIFF.FIFF_SUBJ_ID: tag = read_tag(fid, pos) si['id'] = int(tag.data) elif kind == FIFF.FIFF_SUBJ_HIS_ID: tag = read_tag(fid, pos) si['his_id'] = str(tag.data) elif kind == FIFF.FIFF_SUBJ_LAST_NAME: tag = read_tag(fid, pos) si['last_name'] = str(tag.data) elif kind == FIFF.FIFF_SUBJ_FIRST_NAME: tag = read_tag(fid, pos) si['first_name'] = str(tag.data) elif kind == FIFF.FIFF_SUBJ_BIRTH_DAY: tag = read_tag(fid, pos) si['birthday'] = tag.data elif kind == FIFF.FIFF_SUBJ_SEX: tag = read_tag(fid, pos) si['sex'] = int(tag.data) elif kind == FIFF.FIFF_SUBJ_HAND: tag = read_tag(fid, pos) si['hand'] = int(tag.data) else: si = None info['subject_info'] = si # Load extra information blocks read_extra_meas_info(fid, tree, info) # Make the most appropriate selection for the measurement id if meas_info['parent_id'] is None: if meas_info['id'] is None: if meas['id'] is None: if meas['parent_id'] is None: info['meas_id'] = info['file_id'] else: info['meas_id'] = meas['parent_id'] else: info['meas_id'] = meas['id'] else: info['meas_id'] = meas_info['id'] else: info['meas_id'] = meas_info['parent_id'] info['experimenter'] = experimenter info['description'] = description info['proj_id'] = proj_id info['proj_name'] = proj_name if meas_date is None: info['meas_date'] = [info['meas_id']['secs'], info['meas_id']['usecs']] else: info['meas_date'] = meas_date info['nchan'] = nchan info['sfreq'] = sfreq info['highpass'] = highpass if highpass is not None else 0 info['lowpass'] = lowpass if lowpass is not None else info['sfreq'] / 2.0 info['line_freq'] = line_freq # Add the channel information and make a list of channel names # for convenience info['chs'] = chs info['ch_names'] = [ch['ch_name'] for ch in chs] # # Add the coordinate transformations # info['dev_head_t'] = dev_head_t info['ctf_head_t'] = ctf_head_t if dev_head_t is not None and ctf_head_t is not None: head_ctf_trans = linalg.inv(ctf_head_t['trans']) dev_ctf_trans = np.dot(head_ctf_trans, info['dev_head_t']['trans']) info['dev_ctf_t'] = {'from': FIFF.FIFFV_COORD_DEVICE, 'to': FIFF.FIFFV_MNE_COORD_CTF_HEAD, 'trans': dev_ctf_trans} else: info['dev_ctf_t'] = None # All kinds of auxliary stuff info['dig'] = dig info['bads'] = bads info['projs'] = projs info['comps'] = comps info['acq_pars'] = acq_pars info['acq_stim'] = acq_stim return info, meas def read_extra_meas_info(fid, tree, info): """Read extra blocks from fid""" # current method saves them into a BytesIO file instance for simplicity # this and its partner, write_extra_meas_info, could be made more # comprehensive (i.e.., actually parse and read the data instead of # just storing it for later) blocks = [FIFF.FIFFB_EVENTS, FIFF.FIFFB_HPI_RESULT, FIFF.FIFFB_HPI_MEAS, FIFF.FIFFB_PROCESSING_HISTORY] info['orig_blocks'] = blocks fid_str = BytesIO() fid_str = start_file(fid_str) start_block(fid_str, FIFF.FIFFB_MEAS_INFO) for block in blocks: nodes = dir_tree_find(tree, block) copy_tree(fid, tree['id'], nodes, fid_str) info['orig_fid_str'] = fid_str def write_extra_meas_info(fid, info): """Write otherwise left out blocks of data""" # uses BytesIO fake file to read the appropriate blocks if 'orig_blocks' in info and info['orig_blocks'] is not None: # Blocks from the original blocks = info['orig_blocks'] fid_str, tree, _ = fiff_open(info['orig_fid_str']) for block in blocks: nodes = dir_tree_find(tree, block) copy_tree(fid_str, tree['id'], nodes, fid) def write_meas_info(fid, info, data_type=None, reset_range=True): """Write measurement info into a file id (from a fif file) Parameters ---------- fid : file Open file descriptor info : instance of mne.fiff.meas_info.Info The measurement info structure data_type : int The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT), 5 (FIFFT_DOUBLE), or 16 (mne.fiff.FIFF.FIFFT_DAU_PACK16) for raw data. reset_range : bool If True, info['chs'][k]['range'] will be set to unity. Note ---- Tags are written in a particular order for compatibility with maxfilter """ # Measurement info start_block(fid, FIFF.FIFFB_MEAS_INFO) # Extra measurement info write_extra_meas_info(fid, info) # Polhemus data if info['dig'] is not None: start_block(fid, FIFF.FIFFB_ISOTRAK) for d in info['dig']: write_dig_point(fid, d) end_block(fid, FIFF.FIFFB_ISOTRAK) # megacq parameters if info['acq_pars'] is not None or info['acq_stim'] is not None: start_block(fid, FIFF.FIFFB_DACQ_PARS) if info['acq_pars'] is not None: write_string(fid, FIFF.FIFF_DACQ_PARS, info['acq_pars']) if info['acq_stim'] is not None: write_string(fid, FIFF.FIFF_DACQ_STIM, info['acq_stim']) end_block(fid, FIFF.FIFFB_DACQ_PARS) # Coordinate transformations if the HPI result block was not there if info['dev_head_t'] is not None: write_coord_trans(fid, info['dev_head_t']) if info['ctf_head_t'] is not None: write_coord_trans(fid, info['ctf_head_t']) # Projectors write_proj(fid, info['projs']) # CTF compensation info write_ctf_comp(fid, info['comps']) # Bad channels if len(info['bads']) > 0: start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS) write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, info['bads']) end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS) # General if info.get('experimenter') is not None: write_string(fid, FIFF.FIFF_EXPERIMENTER, info['experimenter']) if info.get('description') is not None: write_string(fid, FIFF.FIFF_DESCRIPTION, info['description']) if info.get('proj_id') is not None: write_int(fid, FIFF.FIFF_PROJ_ID, info['proj_id']) if info.get('proj_name') is not None: write_string(fid, FIFF.FIFF_PROJ_NAME, info['proj_name']) if info.get('meas_date') is not None: write_int(fid, FIFF.FIFF_MEAS_DATE, info['meas_date']) write_int(fid, FIFF.FIFF_NCHAN, info['nchan']) write_float(fid, FIFF.FIFF_SFREQ, info['sfreq']) write_float(fid, FIFF.FIFF_LOWPASS, info['lowpass']) write_float(fid, FIFF.FIFF_HIGHPASS, info['highpass']) if info.get('line_freq') is not None: write_float(fid, FIFF.FIFF_LINE_FREQ, info['line_freq']) if data_type is not None: write_int(fid, FIFF.FIFF_DATA_PACK, data_type) # Channel information for k, c in enumerate(info['chs']): # Scan numbers may have been messed up c = deepcopy(c) c['scanno'] = k + 1 # for float/double, the "range" param is unnecessary if reset_range is True: c['range'] = 1.0 write_ch_info(fid, c) # Subject information if info.get('subject_info') is not None: start_block(fid, FIFF.FIFFB_SUBJECT) si = info['subject_info'] if si.get('id') is not None: write_int(fid, FIFF.FIFF_SUBJ_ID, si['id']) if si.get('his_id') is not None: write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, si['his_id']) if si.get('last_name') is not None: write_string(fid, FIFF.FIFF_SUBJ_LAST_NAME, si['last_name']) if si.get('first_name') is not None: write_string(fid, FIFF.FIFF_SUBJ_FIRST_NAME, si['first_name']) if si.get('birthday') is not None: write_julian(fid, FIFF.FIFF_SUBJ_BIRTH_DAY, si['birthday']) if si.get('sex') is not None: write_int(fid, FIFF.FIFF_SUBJ_SEX, si['sex']) if si.get('hand') is not None: write_int(fid, FIFF.FIFF_SUBJ_HAND, si['hand']) end_block(fid, FIFF.FIFFB_SUBJECT) end_block(fid, FIFF.FIFFB_MEAS_INFO) def write_info(fname, info, data_type=None, reset_range=True): """Write measurement info in fif file. Parameters ---------- fname : str The name of the file. Should end by -info.fif. info : instance of mne.fiff.meas_info.Info The measurement info structure data_type : int The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT), 5 (FIFFT_DOUBLE), or 16 (mne.fiff.FIFF.FIFFT_DAU_PACK16) for raw data. reset_range : bool If True, info['chs'][k]['range'] will be set to unity. """ fid = start_file(fname) start_block(fid, FIFF.FIFFB_MEAS) write_meas_info(fid, info, data_type, reset_range) end_block(fid, FIFF.FIFFB_MEAS) end_file(fid)
mne/fiff/meas_info.py
21,274
Info class to nicely represent info dicts Summarize info instead of printing all Aux function Read extra blocks from fid Read fiducials from a fiff file Returns ------- pts : list of dicts List of digitizer points (each point in a dict). coord_frame : int The coordinate frame of the points (one of mne.fiff.FIFF.FIFFV_COORD_...) Read measurement info from a file Parameters ---------- fname : str File name. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- info : instance of mne.fiff.meas_info.Info Info on dataset. Read the measurement info Parameters ---------- fid : file Open file descriptor. tree : tree FIF tree structure. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). Returns ------- info : instance of mne.fiff.meas_info.Info Info on dataset. meas : dict Node in tree that contains the info. Write otherwise left out blocks of data Write fiducials to a fiff file Parameters ---------- fname : str Destination file name. pts : iterator of dict Iterator through digitizer points. Each point is a dictionary with the keys 'kind', 'ident' and 'r'. coord_frame : int The coordinate frame of the points (one of mne.fiff.FIFF.FIFFV_COORD_...) Write measurement info in fif file. Parameters ---------- fname : str The name of the file. Should end by -info.fif. info : instance of mne.fiff.meas_info.Info The measurement info structure data_type : int The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT), 5 (FIFFT_DOUBLE), or 16 (mne.fiff.FIFF.FIFFT_DAU_PACK16) for raw data. reset_range : bool If True, info['chs'][k]['range'] will be set to unity. Write measurement info into a file id (from a fif file) Parameters ---------- fid : file Open file descriptor info : instance of mne.fiff.meas_info.Info The measurement info structure data_type : int The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT), 5 (FIFFT_DOUBLE), or 16 (mne.fiff.FIFF.FIFFT_DAU_PACK16) for raw data. reset_range : bool If True, info['chs'][k]['range'] will be set to unity. Note ---- Tags are written in a particular order for compatibility with maxfilter Authors: Alexandre Gramfort <gramfort@nmr.mgh.harvard.edu> Matti Hamalainen <msh@nmr.mgh.harvard.edu> License: BSD (3-clause) get rid of of half printed ch names first entire in meas_date is meaningful coord_frame is not stored in the tag Find the desired blocks Read measurement info Check that we have everything we need Locate the Polhemus data Locate the acquisition information Load the SSP data Load the CTF compensation data Load the bad channel list Put the data together Load extra information blocks Make the most appropriate selection for the measurement id Add the channel information and make a list of channel names for convenience Add the coordinate transformations All kinds of auxliary stuff current method saves them into a BytesIO file instance for simplicity this and its partner, write_extra_meas_info, could be made more comprehensive (i.e.., actually parse and read the data instead of just storing it for later) uses BytesIO fake file to read the appropriate blocks Blocks from the original Measurement info Extra measurement info Polhemus data megacq parameters Coordinate transformations if the HPI result block was not there Projectors CTF compensation info Bad channels General Channel information Scan numbers may have been messed up for float/double, the "range" param is unnecessary Subject information
3,685
en
0.613813
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, division import numpy as np import tensorflow as tf from niftynet.layer.activation import ActiLayer from niftynet.layer.convolution import ConvolutionalLayer from niftynet.layer.deconvolution import DeconvolutionalLayer from niftynet.layer.fully_connected import FullyConnectedLayer from niftynet.layer.gan_blocks import BaseDiscriminator from niftynet.layer.gan_blocks import BaseGenerator from niftynet.layer.gan_blocks import GANImageBlock class SimulatorGAN(GANImageBlock): """ implementation of Hu et al., "Freehand Ultrasound Image Simulation with Spatially-Conditioned Generative Adversarial Networks", MICCAI RAMBO 2017 https://arxiv.org/abs/1707.05392 """ def __init__(self, name='simulator_GAN'): super(SimulatorGAN, self).__init__( generator=ImageGenerator(name='generator'), discriminator=ImageDiscriminator(name='discriminator'), clip=None, name=name) class ImageGenerator(BaseGenerator): def __init__(self, name): super(ImageGenerator, self).__init__(name=name) self.initializers = {'w': tf.random_normal_initializer(0, 0.02), 'b': tf.constant_initializer(0.001)} self.noise_channels_per_layer = 0 self.with_conditionings = [True, True, True, True, False] def layer_op(self, random_source, image_size, conditioning, is_training): keep_prob_ph = 1 # not passed in as a placeholder add_noise = self.noise_channels_per_layer if conditioning is not None: conditioning_channels = conditioning.shape.as_list()[-1] conditioning_channels = conditioning_channels + add_noise else: conditioning_channels = add_noise # feature channels design pattern ch = [512] sz = image_size[:-1] for i in range(4): # compute output n_feature_channels of i-th layer new_ch = ch[-1] + conditioning_channels * self.with_conditionings[i] new_ch = round(new_ch / 2) ch.append(new_ch) # compute output spatial size of i-th layer sz = [int(round(spatial_len / 2)) for spatial_len in sz] ch.append(1) # last layer single channel image # resizing utilities spatial_rank = len(image_size) - 1 if spatial_rank == 3: def resize_func(x, sz): sz_x = x.shape.as_list() r1 = tf.image.resize_images( tf.reshape(x, sz_x[:3] + [-1]), sz[0:2]) r2 = tf.image.resize_images( tf.reshape(r1, [sz_x[0], sz[0] * sz[1], sz_x[3], -1]), [sz[0] * sz[1], sz[2]]) resized_3d = tf.reshape(r2, [sz_x[0]] + sz + [sz_x[-1]]) return resized_3d elif spatial_rank == 2: resize_func = tf.image.resize_bilinear def concat_cond(x, with_conditioning): noise = [] if add_noise: feature_shape = x.shape.as_list()[0:-1] noise_shape = feature_shape + [add_noise] noise = [tf.random_normal(noise_shape, 0.0, 0.1)] if with_conditioning and conditioning is not None: with tf.name_scope('concat_conditioning'): spatial_shape = x.shape.as_list()[1:-1] resized_cond = resize_func(conditioning, spatial_shape) return tf.concat([x, resized_cond] + noise, axis=-1) return x def conv(ch, x): with tf.name_scope('conv'): conv_layer = ConvolutionalLayer( n_output_chns=ch, kernel_size=3, feature_normalization='batch', with_bias=False, acti_func='relu', w_initializer=self.initializers['w']) return conv_layer(x, is_training=is_training) def up(ch, x): with tf.name_scope('up'): deconv_layer = DeconvolutionalLayer( n_output_chns=ch, kernel_size=3, stride=2, feature_normalization='batch', with_bias=False, acti_func='relu', w_initializer=self.initializers['w']) return deconv_layer(x, is_training=is_training) def up_block(ch, x, with_conditioning): with tf.name_scope('up_block'): u = up(ch, x) cond = concat_cond(u, with_conditioning) return conv(cond.shape.as_list()[-1], cond) def noise_to_image(sz, ch, rand_tensor, with_conditioning): batch_size = rand_tensor.shape.as_list()[0] output_shape = [batch_size] + sz + [ch] with tf.name_scope('noise_to_image'): g_no_0 = np.prod(sz) * ch fc_layer = FullyConnectedLayer( n_output_chns=g_no_0, feature_normalization=None, with_bias=True, w_initializer=self.initializers['w'], b_initializer=self.initializers['b']) g_h1p = fc_layer(rand_tensor, keep_prob=keep_prob_ph) g_h1p = tf.reshape(g_h1p, output_shape) g_h1p = concat_cond(g_h1p, with_conditioning) return conv(ch + conditioning_channels, g_h1p) def final_image(n_chns, x): with tf.name_scope('final_image'): if add_noise > 0: feature_shape = x.shape.as_list()[0:-1] noise_shape = feature_shape + [add_noise] noise = tf.random_normal(noise_shape, 0, .1) x = tf.concat([x, noise], axis=3) conv_layer = ConvolutionalLayer( n_output_chns=n_chns, kernel_size=3, acti_func='tanh', feature_normalization=None, with_bias=True, w_initializer=self.initializers['w'], b_initializer=self.initializers['b']) x_sample = conv_layer( x, is_training=is_training, keep_prob=keep_prob_ph) return tf.image.resize_images(x_sample, image_size[:-1]) # let the tensors flow... flow = random_source for (idx, chns) in enumerate(ch): if idx == 0: # first layer fully-connected flow = noise_to_image( sz, chns, flow, self.with_conditionings[idx]) elif idx == len(ch) - 1: # final conv without bn return final_image(chns, flow) else: # upsampling block flow = up_block(chns, flow, self.with_conditionings[idx]) class ImageDiscriminator(BaseDiscriminator): def __init__(self, name): super(ImageDiscriminator, self).__init__(name=name) w_init = tf.random_normal_initializer(0, 0.02) b_init = tf.constant_initializer(0.001) # w_init = tf.contrib.layers.variance_scaling_initializer() # b_init = tf.constant_initializer(0) self.initializers = {'w': w_init, 'b': b_init} self.chns = [32, 64, 128, 256, 512, 1024, 1] def layer_op(self, image, conditioning, is_training): batch_size = image.shape.as_list()[0] def down(ch, x): with tf.name_scope('downsample'): conv_layer = ConvolutionalLayer( n_output_chns=ch, kernel_size=3, stride=2, feature_normalization='batch', acti_func='selu', w_initializer=self.initializers['w']) return conv_layer(x, is_training=is_training) def convr(ch, x): conv_layer = ConvolutionalLayer( n_output_chns=ch, kernel_size=3, feature_normalization='batch', acti_func='selu', w_initializer=self.initializers['w']) return conv_layer(x, is_training=is_training) def conv(ch, x, s): conv_layer = ConvolutionalLayer( n_output_chns=ch, kernel_size=3, feature_normalization='batch', w_initializer=self.initializers['w']) acti_layer = ActiLayer(func='selu') # combining two flows res_flow = conv_layer(x, is_training=is_training) + s return acti_layer(res_flow) def down_block(ch, x): with tf.name_scope('down_resnet'): s = down(ch, x) r = convr(ch, s) return conv(ch, r, s) def feature_block(ch, image): with tf.name_scope('feature'): conv_layer = ConvolutionalLayer( n_output_chns=ch, kernel_size=5, with_bias=True, feature_normalization=None, acti_func='selu', w_initializer=self.initializers['w'], b_initializer=self.initializers['b']) d_h1s = conv_layer(image, is_training=is_training) d_h1r = convr(ch, d_h1s) return conv(ch, d_h1r, d_h1s) def fully_connected(ch, features): with tf.name_scope('fully_connected'): # with bn? fc_layer = FullyConnectedLayer( n_output_chns=ch, feature_normalization=None, with_bias=True) return fc_layer(features, is_training=is_training) if conditioning is not None: image = tf.concat([image, conditioning], axis=-1) # let the tensors flow... flow = image for (idx, n_chns) in enumerate(self.chns): if idx == 0: # first layer flow = feature_block(n_chns, flow) elif idx == len(self.chns) - 1: # last layer return fully_connected(n_chns, flow) else: flow = down_block(n_chns, flow)
niftynet/network/simulator_gan.py
10,290
implementation of Hu et al., "Freehand Ultrasound Image Simulation with Spatially-Conditioned Generative Adversarial Networks", MICCAI RAMBO 2017 https://arxiv.org/abs/1707.05392 -*- coding: utf-8 -*- not passed in as a placeholder feature channels design pattern compute output n_feature_channels of i-th layer compute output spatial size of i-th layer last layer single channel image resizing utilities let the tensors flow... first layer fully-connected final conv without bn upsampling block w_init = tf.contrib.layers.variance_scaling_initializer() b_init = tf.constant_initializer(0) combining two flows with bn? let the tensors flow... first layer last layer
667
en
0.693371
# Desafio 42 - Aula 12 : Refazer Desasfio 35 e mostrar qual o tipo do triangulo. # A/ Equilatero. # B/ Isósceles. # C/ Escaleno. print('\033[32mATENÇÃO! VAMOS MONTAR UM TRIÂNGULO!!!\033[m') a = int(input('Digite a primeira medida: ')) b = int(input('Digite a segunda medida: ')) c = int(input('Digite a terceira medida: ')) if a < b + c and b < a + c and c < b + a: print('\033[34mTemos um triangulo!\033[m') if a==b==c: print('Este triângulo é \033[32mEQUILATERO\033[m! Pois possui todos os lados iguais.') elif a==b or a==c or b==c: print('Neste caso este triângulo possui dois lados iguais, portanto é \033[32mISÓSCELES\033[m!') else: print('Todos os lados são diferentes portante temos um triângulo \033[32mESCALENO\033[m!') else: print('\033[31mNão temos um triângulo!\033[m')
desafios/Mundo 2/Ex042.py
842
Desafio 42 - Aula 12 : Refazer Desasfio 35 e mostrar qual o tipo do triangulo. A/ Equilatero. B/ Isósceles. C/ Escaleno.
121
pt
0.742032
""" pyAutoSpec Spectral learning for WFA/MPS """ from .wfa import Wfa, SpectralLearning from .mps import Mps from .plots import parallel_plot from .function_wfa import FunctionWfa from .function_mps import FunctionMps from .dataset_mps import DatasetMps from .image_wfa import ImageWfa __all__ = ["Wfa", "Mps", "parallel_plot", "SpectralLearning", "FunctionWfa", "FunctionMps", "DatasetMps", "ImageWfa"]
pyautospec/__init__.py
407
pyAutoSpec Spectral learning for WFA/MPS
41
en
0.719463
# -*- coding: utf-8 -*- import sys from os.path import dirname, abspath, normpath, join, realpath from os import listdir, remove, system import json from datetime import datetime begin = len(normpath(abspath(join(dirname(__file__), "../..")))) end = len(normpath(abspath(join(dirname(__file__), "..")))) MAIN_DIR = dirname(realpath(__file__)) package_name = MAIN_DIR[begin + 1 : end] # Add the directory to the python path sys.path.append(MAIN_DIR[:begin]) exec( "from " + package_name + ".Generator.ClassGenerator.class_generator import generate_class" ) exec("from " + package_name + ".Generator.read_fct import read_all") exec("from " + package_name + ".definitions import MAIN_DIR, DOC_DIR, INT_DIR") # List of the main packages (to sort the classes) PACKAGE_LIST = ["Geometry", "Machine", "Material", "Slot", "Import"] def generate_code(root_path, gen_dict=None): """Generate pyleecan Classes code according to doc in root_path Parameters ---------- root_path : str Path to the main folder of Pyleecan gen_dict : dict Generation dictionnary (contains all the csv data) Returns ------- None """ CLASS_DIR = join(root_path, "Classes") FUNC_DIR = join(root_path, "Functions") DOC_DIR = join(root_path, "Generator", "ClassesRef") print("Reading classes csv in: " + DOC_DIR) print("Saving generated files in: " + CLASS_DIR) path = __file__[__file__.index(package_name) :] path = path.replace("\\", "/") # Deleting all the previous class print("Deleting old class files...") for file_name in listdir(CLASS_DIR): if file_name[0] != "_": remove(join(CLASS_DIR, file_name)) # A file to import every classes quickly import_file = open(join(CLASS_DIR, "import_all.py"), "w") import_file.write("# -*- coding: utf-8 -*-\n\n") import_file.write('"""File generated by generate_code() - \n') import_file.write('WARNING! All changes made in this file will be lost!\n"""\n\n') # A file to select the constructor according to a string load_file = open(join(FUNC_DIR, "load_switch.py"), "w") load_file.write("# -*- coding: utf-8 -*-\n") load_file.write('"""File generated by generate_code() - \n') load_file.write('WARNING! All changes made in this file will be lost!\n"""\n\n') load_file.write("from ..Classes.import_all import *\n\n") load_file.write("load_switch = {\n") # Read all the csv files if gen_dict is None: gen_dict = read_all(DOC_DIR) # Generate all the class files (sorted to remove "commit noise") for class_name, _ in iter(sorted(list(gen_dict.items()))): import_file.write( "from ..Classes." + class_name + " import " + class_name + "\n" ) load_file.write(' "' + class_name + '": ' + class_name + ",\n") print("Generation of " + class_name + " class") generate_class(gen_dict, class_name, CLASS_DIR) import_file.close() load_file.write("}\n") load_file.close() print("Generation of load_switch.py") print("Generation of import_all.py") # Save gen_dict class_dict_file = join(CLASS_DIR, "Class_Dict.json") with open(class_dict_file, "w") as json_file: json.dump(gen_dict, json_file, sort_keys=True, indent=4, separators=(",", ": ")) if __name__ == "__main__": gen_dict = read_all(DOC_DIR, is_internal=False, in_path=INT_DIR) generate_code(MAIN_DIR, gen_dict) # Run black try: import black system('"{}" -m black .'.format(sys.executable)) if black.__version__.split(".")[0] != "20": print("\n############################################") print( "WARNING: The official version of black for pyleecan is 20, please update your black version" ) print("############################################\n") except ImportError: print("/!\\ Please install and run black (version 20) /!\\") now = datetime.now() print("End at: ", now.strftime("%H:%M:%S"))
pyleecan/Generator/run_generate_classes.py
4,082
Generate pyleecan Classes code according to doc in root_path Parameters ---------- root_path : str Path to the main folder of Pyleecan gen_dict : dict Generation dictionnary (contains all the csv data) Returns ------- None -*- coding: utf-8 -*- Add the directory to the python path List of the main packages (to sort the classes) Deleting all the previous class A file to import every classes quickly A file to select the constructor according to a string Read all the csv files Generate all the class files (sorted to remove "commit noise") Save gen_dict Run black
576
en
0.649802
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Single NN @author: xuping """ import numpy as np import scipy.io #from threeNN import sigmoid def layer_sizes(X, Y): n_in = X.shape[0] n_out = Y.shape[0] return(n_in, n_out) def initialize_parameters(dim): np.random.seed(3) W = np.random.randn(dim, dim)*0.01 b = np.zeros((dim, 1)) return W,b def prop(W,b,X,Y,lambd): m = X.shape[1] #forward A = sigmoid(np.dot(W, X) + b) cost = 1./m*np.sum(np.sum(np.square(A-Y),axis=0,keepdims=True)) + lambd/(2*m)*np.sum(np.sum(W*W)) #cost = 1./m*np.sum(np.sum(np.square(A-Y))) #backward Z = np.dot(W, X) + b dZ = 2*(A-Y)*sigmoid(Z)*(1-sigmoid(Z)) dW = 1./m*np.dot(dZ, X.T) + lambd/m*W #dW = 1./m*np.dot(dZ, X.T) db = 1./m*np.sum(dZ,axis=1,keepdims=True) grads = {"dW":dW, "db":db} return grads, cost def nn_model(X,Y,num_iterations, lambd, learning_rate, print_cost=True): #np.random.seed(3) costs = [] W, b = initialize_parameters(X.shape[0]) for i in range(num_iterations): grads, cost = prop(W,b,X,Y,lambd) dW = grads["dW"] db = grads["db"] W = W-learning_rate*dW b = b-learning_rate*db if print_cost and i%1000==0: print("cost after iteration %i: %f" %(i, cost)) costs.append(cost) parameters={"W":W, "b":b} grads={"dW":dW, "db":db} return parameters, costs def predict(parameters, X): W=parameters["W"] b=parameters["b"] A = sigmoid(np.dot(W, X) + b) return A def load_data(): data=scipy.io.loadmat('U_Train.mat') X = data['ud'] Y10 = data['tauR10'] Y5 = data['tauR5'] Y6 = data['tauR6'] return X, Y5, Y6, Y10 if __name__ == "__main__": #load data X, Y5, Y6, Y10 = load_data() X5 = X[:5, :] X6 = X[:6, :] X10 = X[:10, :] num_iterations = 30000 lambd = 10 learning_rate = 3 """ X=X6 Y=Y6 np.random.seed(3) dim=X.shape[0] W = np.random.randn(dim, dim)*0.01 b = np.zeros((dim, 1)) Z = np.dot(W, X) + b A = sigmoid(Z) cost = A-Y #dZ = 2*(A-Y)*sigmoid(Z)*(1-sigmoid(Z)) #dW = 1/m*np.dot(dZ, X.T) #db = 1/m*np.sum(dZ,axis=1,keepdims=True) """ #parameters5, cost5 = nn_model(X5, Y5, num_iterations, lambd, learning_rate, print_cost=True) parameters6, cost6 = nn_model(X6, Y6, num_iterations, lambd, learning_rate, print_cost=True) #parameters10, cost10 = nn_model(X10, Y10, num_iterations, lambd, learning_rate, print_cost=True) #W5=parameters5["W"] #b5=parameters5["b"] W6=parameters6["W"] b6=parameters6["b"] #W10=parameters10["W"] #b10=parameters10["b"] #scipy.io.savemat('weights6.mat',{'W6':W6}) #scipy.io.savemat('bias.mat',{'b6':b6})
NN_buildingblock/SingleNN.py
2,887
Single NN @author: xuping !/usr/bin/env python2 -*- coding: utf-8 -*-from threeNN import sigmoidforwardcost = 1./m*np.sum(np.sum(np.square(A-Y)))backwarddW = 1./m*np.dot(dZ, X.T)np.random.seed(3)load dataparameters5, cost5 = nn_model(X5, Y5, num_iterations, lambd, learning_rate, print_cost=True)parameters10, cost10 = nn_model(X10, Y10, num_iterations, lambd, learning_rate, print_cost=True)W5=parameters5["W"]b5=parameters5["b"]W10=parameters10["W"]b10=parameters10["b"]scipy.io.savemat('weights6.mat',{'W6':W6})scipy.io.savemat('bias.mat',{'b6':b6})
554
en
0.312823
# the TestEnv environment is used to simply simulate the network from flow.envs import TestEnv # the Experiment class is used for running simulations from flow.core.experiment import Experiment # the base network class from flow.networks import Network from flow.envs.base import Env # all other imports are standard from flow.core.params import VehicleParams, SumoCarFollowingParams, SumoLaneChangeParams from flow.controllers import IDMController from flow.core.params import InFlows from flow.core.params import NetParams from flow.core.params import TrafficLightParams from flow.core.params import InitialConfig from flow.core.params import EnvParams from flow.controllers import IDMController, RLController, StaticLaneChanger from gym.spaces.box import Box import numpy as np import collections # create some default parameters parameters HORIZON = 3000 env_params = EnvParams( horizon=HORIZON, sims_per_step=1, warmup_steps=0, additional_params={ "max_accel": 3, "max_decel": -2, "target_velocity": 20, "lane_change_duration": 4, "num_rl": 5, }) initial_config = InitialConfig(edges_distribution=['highway_0']) vehicles = VehicleParams() vehicles.add( veh_id="human", acceleration_controller=(IDMController, { "noise": 0.2 }), # lane_change_controller=(StaticLaneChanger, {}), car_following_params=SumoCarFollowingParams( speed_mode="obey_safe_speed", ), lane_change_params=SumoLaneChangeParams( lane_change_mode=1621, model="SL2015", lc_impatience="0.1", lc_time_to_impatience="1.0" )) vehicles.add( veh_id="rl", acceleration_controller=(RLController, {}), lane_change_controller=(StaticLaneChanger, {}), # routing_controller=(HighwayRouter, {}), car_following_params=SumoCarFollowingParams( speed_mode="obey_safe_speed", ), lane_change_params=SumoLaneChangeParams( lane_change_mode=256, model="SL2015", lc_impatience="0.1", lc_time_to_impatience="1.0" ), num_vehicles=0) from flow.core.params import SumoParams sim_params = SumoParams( sim_step=0.2, render=True, lateral_resolution=1.0, restart_instance=True, ) import os inflow = InFlows() inflow.add(veh_type="human", edge="WC", # depart_lane="best", depart_lane=1, arrivalLane=0, probability=0.1, depart_speed="random", ) inflow.add(veh_type="human", edge="WC", # depart_lane="best", depart_lane=0, arrivalLane=1, probability=0.1, depart_speed="random", ) inflow.add(veh_type="human", edge="EC", # depart_lane="best", # vehs_per_hour=2000, depart_lane=1, arrivalLane=0, probability=0.1, depart_speed="random", ) inflow.add(veh_type="human", edge="EC", # depart_lane="best", # vehs_per_hour=2000, depart_lane=0, arrivalLane=1, probability=0.1, depart_speed="random", ) inflow.add( veh_type="rl", edge="WC", vehs_per_hour=100, depart_lane="free", depart_speed=5) net_params = NetParams( template={ "net":"/home/rong/Safe-RL-for-Driving/traci_pedestrian_crossing/pedcrossing.net.xml", # features associated with the routes vehicles take "vtype": "/home/rong/Safe-RL-for-Driving/traci_pedestrian_crossing/pedcrossing.add.xml", # 和下方specify_routes一致 "rou":"/home/rong/Safe-RL-for-Driving/traci_pedestrian_crossing/data/pedcrossing.rou.xml", "trip":"/home/rong/Safe-RL-for-Driving/traci_pedestrian_crossing/pedestrians.trip.xml" }, inflows=inflow, ) # specify the edges vehicles can originate on initial_config = InitialConfig( edges_distribution=["WC"] ) tl_logic = TrafficLightParams(baseline=False) phases = [{"duration": "100000", "state": "GGGGr"}, {"duration": "4", "state": "yyyyr"}, {"duration": "10", "state": "rrrrG"}, {"duration": "10", "state": "rrrrr"}] tl_logic.add("C", phases=phases, programID="custom", offset="0") # specify the routes for vehicles in the network class PedCrossing(Network): def specify_routes(self, net_params): return {'EC': ['EC', 'CW'], 'WC': ['WC', 'CE']} class MoveXYPedEnv(Env): def __init__(self, env_params, sim_params, network, simulator='traci'): super().__init__(env_params, sim_params, network, simulator) # 环境相关 self.activeRequest = False self.greenTimeSoFar = 0 # minimum green time for the vehicles self.MIN_GREEN_TIME = 15 # the first phase in tls plan. see 'pedcrossing.tll.xml' self.VEHICLE_GREEN_PHASE = 0 self.PEDESTRIAN_GREEN_PHASE = 2 # the id of the traffic light (there is only one). This is identical to the # id of the controlled intersection (by default) self.TLSID = 'C' # pedestrian edges at the controlled intersection self.WALKINGAREAS = [':C_w0', ':C_w1'] self.CROSSINGS = [':C_c0'] # Move xy相关 self.num_lanes = max(self.k.network.num_lanes(edge) for edge in self.k.network.get_edge_list()) self.visible = [] self.stuck = False # variables used to sort vehicles by their initial position plus # distance traveled self.prev_pos = dict() self.absolute_position = dict() # maximum number of controlled vehicles self.num_rl = env_params.additional_params["num_rl"] # queue of rl vehicles waiting to be controlled self.rl_queue = collections.deque() # names of the rl vehicles controlled at any step self.rl_veh = [] # used for visualization: the vehicles behind and after RL vehicles # (ie the observed vehicles) will have a different color self.leader = [] self.follower = [] @property def action_space(self): """See class definition.""" max_decel = self.env_params.additional_params["max_decel"] max_accel = self.env_params.additional_params["max_accel"] lb = [1, -0.2] * self.num_rl ub = [2, 0.2] * self.num_rl # print("num_rl_vehicles:", self.num_rl) return Box(np.array(lb), np.array(ub), dtype=np.float32) @property def observation_space(self): """See class definition.""" # print("observation sapce shape: ", 4 * self.num_rl * # self.num_lanes + self.num_rl) return Box( low=-1000, high=3000, shape=(4 * self.num_rl * self.num_lanes + 2 * self.num_rl, ), dtype=np.float32) def compute_reward(self, rl_actions, **kwargs): """See class definition.""" reward = 0 # rl 车辆向前,并惩罚停止 rl_velocity = np.array(self.k.vehicle.get_speed(self.rl_veh)) target_vel = self.env_params.additional_params['target_velocity'] max_cost = np.array([target_vel] * self.num_rl) max_cost = np.linalg.norm(max_cost) cost = rl_velocity - target_vel cost = np.linalg.norm(cost) # epsilon term (to deal with ZeroDivisionError exceptions) eps = np.finfo(np.float32).eps reward += max(max_cost - cost, 0) / (max_cost + eps) gain = 0.5 thresh = 0.3 penalize = len(rl_velocity[rl_velocity < thresh]) reward -= gain * penalize # punish excessive lane changes by reducing the reward by a set value # every time an rl car changes lanes (10% of max reward) for veh_id in self.rl_veh: if self.k.vehicle.get_last_lc(veh_id) == self.time_counter: reward -= 10 if self.stuck: reward -= 100 # print("reward: ", reward) return reward def _apply_rl_actions(self, actions): """See class definition.""" acceleration = actions[::2] direction = actions[1::2] # represents vehicles that are allowed to change lanes # non_lane_changing_veh = [] # non_lane_changing_veh = \ # [self.time_counter <= # self.env_params.additional_params["lane_change_duration"] # + self.k.vehicle.get_last_lc(veh_id) # for veh_id in self.rl_veh] # # vehicle that are not allowed to change have their directions set to 0 # print(non_lane_changing_veh) # direction[non_lane_changing_veh] = \ # np.array([0] * sum(non_lane_changing_veh)) for i, veh_id in enumerate(self.rl_veh): if self.time_counter <= self.env_params.additional_params["lane_change_duration"]\ + self.k.vehicle.get_last_lc(veh_id): direction[i] = 0 x, y = self.k.vehicle.kernel_api.vehicle.getPosition(veh_id) print(x, y) print("edgeID", self.k.vehicle.get_edge(veh_id)) print("lane", self.k.vehicle.get_lane(veh_id)) self.k.vehicle.kernel_api.vehicle.moveToXY(vehID=veh_id, edgeID="highway_1", lane=1, x=x+acceleration[i], y=y+direction[i], keepRoute=2) for x in np.nditer(direction, op_flags=['readwrite']): if x > 0.7: x[...] = 1 elif x < -0.7: x[...] = -1 else: x[...] = 0 # print("actions:", actions) # print("veh id: ", self.rl_veh) # print("acceleration: ", acceleration) # print("direction", direction) # self.k.vehicle.apply_acceleration(self.rl_veh, acc=acceleration) # self.k.vehicle.apply_lane_change(self.rl_veh, direction=direction) def get_state(self): """See class definition.""" obs = [ 0 for _ in range(4 * self.num_rl * self.num_lanes + 2 * self.num_rl) ] # print("rl veh id: ", self.rl_veh) self.visible = [] self.update_veh_id() speeds = [] for i, rl_id in enumerate(self.rl_veh): # x, y = self.k.vehicle.kernel_api.vehicle.getPosition(rl_id) # print(x, y) # print("edgeID", self.k.vehicle.get_edge(rl_id)) # print("lane", self.k.vehicle.get_lane(rl_id)) # self.k.vehicle.kernel_api.vehicle.moveToXY(vehID=[rl_id, rl_id], edgeID="highway_1", lane=1, x=600, y=134) # add the speed for the ego rl vehicle x = self.k.vehicle.get_x_by_id(rl_id) if x == -1001: continue speed = self.k.vehicle.get_speed(rl_id) obs[-2*i - 1] = speed speeds.append(speed) obs[-2*i - 2] = x # if rl_id not in self.k.vehicle.get_ids(): # print("not in:", rl_id) # self.additional_command() # normalizers max_length = self.k.network.length() max_speed = self.k.network.max_speed() # set to 1000 since the absence of a vehicle implies a large # headway headway = [1] * self.num_lanes tailway = [1] * self.num_lanes vel_in_front = [0] * self.num_lanes vel_behind = [0] * self.num_lanes lane_leaders = self.k.vehicle.get_lane_leaders(rl_id) lane_followers = self.k.vehicle.get_lane_followers(rl_id) lane_headways = self.k.vehicle.get_lane_headways(rl_id) lane_tailways = self.k.vehicle.get_lane_tailways(rl_id) headway[0:len(lane_headways)] = lane_headways tailway[0:len(lane_tailways)] = lane_tailways for j, lane_leader in enumerate(lane_leaders): if lane_leader != '': lane_headways[j] /= max_length vel_in_front[j] = self.k.vehicle.get_speed(lane_leader) \ / max_speed self.visible.extend([lane_leader]) for j, lane_follower in enumerate(lane_followers): if lane_follower != '': lane_headways[j] /= max_length vel_behind[j] = self.k.vehicle.get_speed(lane_follower) \ / max_speed self.visible.extend([lane_follower]) # add the headways, tailways, and speed for all lane leaders # and followers obs[4*self.num_lanes*i:4*self.num_lanes*(i+1)] = \ np.concatenate((headway, tailway, vel_in_front, vel_behind)) # if len(speeds) > 3: # self.stuck = True # for speed in speeds: # if speed != 0: # self.stuck = False obs = np.array(obs) # print("observation: ", obs) # print("observation shape: ", obs.shape) np.clip(obs, -1000, 3000, out=obs) return obs def additional_command(self): # 红绿灯相关 # decide wether there is a waiting pedestrian and switch if the green # phase for the vehicles exceeds its minimum duration if not self.activeRequest: self.activeRequest = self.checkWaitingPersons() if self.k.kernel_api.trafficlight.getPhase(self.TLSID) == self.VEHICLE_GREEN_PHASE: self.greenTimeSoFar += 1 if self.greenTimeSoFar > self.MIN_GREEN_TIME: # check whether someone has pushed the button if self.activeRequest: # switch to the next phase self.k.kernel_api.trafficlight.setPhase( self.TLSID, self.VEHICLE_GREEN_PHASE + 1) # reset state self.activeRequest = False # MOVE XY相关 # specify observed vehicles for veh_id in self.leader + self.follower: self.k.vehicle.set_observed(veh_id) # update the "absolute_position" variable for veh_id in self.k.vehicle.get_ids(): this_pos = self.k.vehicle.get_x_by_id(veh_id) if this_pos == -1001: # in case the vehicle isn't in the network self.absolute_position[veh_id] = -1001 else: change = this_pos - self.prev_pos.get(veh_id, this_pos) self.absolute_position[veh_id] = \ (self.absolute_position.get(veh_id, this_pos) + change) \ % self.k.network.length() self.prev_pos[veh_id] = this_pos return def update_veh_id(self): # add rl vehicles that just entered the network into the rl queue for veh_id in self.k.vehicle.get_rl_ids(): if veh_id not in list(self.rl_queue) + self.rl_veh: self.rl_queue.append(veh_id) # remove rl vehicles that exited the network for veh_id in list(self.rl_queue): if veh_id not in self.k.vehicle.get_rl_ids() or veh_id not in self.k.vehicle.get_ids(): self.rl_queue.remove(veh_id) for veh_id in self.rl_veh: if veh_id not in self.k.vehicle.get_rl_ids() or veh_id not in self.k.vehicle.get_ids(): # print("rm veh_id", veh_id) self.rl_veh.remove(veh_id) # fil up rl_veh until they are enough controlled vehicles while len(self.rl_queue) > 0 and len(self.rl_veh) < self.num_rl: rl_id = self.rl_queue.popleft() self.rl_veh.append(rl_id) # print("add rl_veh:", rl_id) # print("update_veh_id, self.rl_veh:", self.rl_veh) def checkWaitingPersons(self): """check whether a person has requested to cross the street""" # check both sides of the crossing for edge in self.WALKINGAREAS: peds = self.k.kernel_api.edge.getLastStepPersonIDs(edge) # check who is waiting at the crossing # we assume that pedestrians push the button upon # standing still for 1s for ped in peds: if (self.k.kernel_api.person.getWaitingTime(ped) == 1 and self.k.kernel_api.person.getNextEdge(ped) in self.CROSSINGS): numWaiting = self.k.kernel_api.trafficlight.getServedPersonCount(self.TLSID, self.PEDESTRIAN_GREEN_PHASE) print("%s: pedestrian %s pushes the button (waiting: %s)" % (self.k.kernel_api.simulation.getTime(), ped, numWaiting)) return True return False def step(self, rl_actions): """Advance the environment by one step. Assigns actions to autonomous and human-driven agents (i.e. vehicles, traffic lights, etc...). Actions that are not assigned are left to the control of the simulator. The actions are then used to advance the simulator by the number of time steps requested per environment step. Results from the simulations are processed through various classes, such as the Vehicle and TrafficLight kernels, to produce standardized methods for identifying specific network state features. Finally, results from the simulator are used to generate appropriate observations. Parameters ---------- rl_actions : array_like an list of actions provided by the rl algorithm Returns ------- observation : array_like agent's observation of the current environment reward : float amount of reward associated with the previous state/action pair done : bool indicates whether the episode has ended info : dict contains other diagnostic information from the previous action """ for _ in range(self.env_params.sims_per_step): self.time_counter += 1 self.step_counter += 1 # perform acceleration actions for controlled human-driven vehicles if len(self.k.vehicle.get_controlled_ids()) > 0: accel = [] for veh_id in self.k.vehicle.get_controlled_ids(): action = self.k.vehicle.get_acc_controller( veh_id).get_action(self) accel.append(action) self.k.vehicle.apply_acceleration( self.k.vehicle.get_controlled_ids(), accel) # perform lane change actions for controlled human-driven vehicles if len(self.k.vehicle.get_controlled_lc_ids()) > 0: direction = [] for veh_id in self.k.vehicle.get_controlled_lc_ids(): target_lane = self.k.vehicle.get_lane_changing_controller( veh_id).get_action(self) direction.append(target_lane) self.k.vehicle.apply_lane_change( self.k.vehicle.get_controlled_lc_ids(), direction=direction) # perform (optionally) routing actions for all vehicles in the # network, including RL and SUMO-controlled vehicles routing_ids = [] routing_actions = [] for veh_id in self.k.vehicle.get_ids(): if self.k.vehicle.get_routing_controller(veh_id) \ is not None: routing_ids.append(veh_id) route_contr = self.k.vehicle.get_routing_controller( veh_id) routing_actions.append(route_contr.choose_route(self)) self.k.vehicle.choose_routes(routing_ids, routing_actions) self.apply_rl_actions(rl_actions) self.additional_command() # advance the simulation in the simulator by one step self.k.simulation.simulation_step() # store new observations in the vehicles and traffic lights class self.k.update(reset=False) # update the colors of vehicles if self.sim_params.render: self.k.vehicle.update_vehicle_colors() # crash encodes whether the simulator experienced a collision crash = self.k.simulation.check_collision() # stop collecting new simulation steps if there is a collision if crash: break # render a frame self.render() states = self.get_state() # collect information of the state of the network based on the # environment class used self.state = np.asarray(states).T # collect observation new state associated with action next_observation = np.copy(states) # test if the environment should terminate due to a collision or the # time horizon being met done = (self.time_counter >= self.env_params.warmup_steps + self.env_params.horizon) or self.stuck if done: print("done") if self.stuck: print("stuck") else: print("time up") # compute the info for each agent infos = {} # compute the reward if self.env_params.clip_actions: rl_clipped = self.clip_actions(rl_actions) reward = self.compute_reward(rl_clipped, fail=crash) else: reward = self.compute_reward(rl_actions, fail=crash) return next_observation, reward, done, infos def reset(self): """See parent class. This also includes updating the initial absolute position and previous position. """ self.rl_queue.clear() self.rl_veh.clear() obs = super().reset() print("reset") for veh_id in self.k.vehicle.get_ids(): self.absolute_position[veh_id] = self.k.vehicle.get_x_by_id(veh_id) self.prev_pos[veh_id] = self.k.vehicle.get_x_by_id(veh_id) self.leader = [] self.follower = [] return obs if __name__ == "__main__": flow_params = dict( exp_tag='template', env_name=MoveXYPedEnv, network=PedCrossing, simulator='traci', sim=sim_params, env=env_params, net=net_params, veh=vehicles, initial=initial_config, tls=tl_logic, ) # number of time steps flow_params['env'].horizon = 10000 exp = Experiment(flow_params) # run the sumo simulation _ = exp.run(1)
traci_pedestrian_crossing/movexy_ped.py
22,992
See class definition. See class definition. check whether a person has requested to cross the street See class definition. See class definition. See class definition. See parent class. This also includes updating the initial absolute position and previous position. Advance the environment by one step. Assigns actions to autonomous and human-driven agents (i.e. vehicles, traffic lights, etc...). Actions that are not assigned are left to the control of the simulator. The actions are then used to advance the simulator by the number of time steps requested per environment step. Results from the simulations are processed through various classes, such as the Vehicle and TrafficLight kernels, to produce standardized methods for identifying specific network state features. Finally, results from the simulator are used to generate appropriate observations. Parameters ---------- rl_actions : array_like an list of actions provided by the rl algorithm Returns ------- observation : array_like agent's observation of the current environment reward : float amount of reward associated with the previous state/action pair done : bool indicates whether the episode has ended info : dict contains other diagnostic information from the previous action the TestEnv environment is used to simply simulate the network the Experiment class is used for running simulations the base network class all other imports are standard create some default parameters parameters lane_change_controller=(StaticLaneChanger, {}), routing_controller=(HighwayRouter, {}), depart_lane="best", depart_lane="best", depart_lane="best", vehs_per_hour=2000, depart_lane="best", vehs_per_hour=2000, features associated with the routes vehicles take 和下方specify_routes一致 specify the edges vehicles can originate on specify the routes for vehicles in the network 环境相关 minimum green time for the vehicles the first phase in tls plan. see 'pedcrossing.tll.xml' the id of the traffic light (there is only one). This is identical to the id of the controlled intersection (by default) pedestrian edges at the controlled intersection Move xy相关 variables used to sort vehicles by their initial position plus distance traveled maximum number of controlled vehicles queue of rl vehicles waiting to be controlled names of the rl vehicles controlled at any step used for visualization: the vehicles behind and after RL vehicles (ie the observed vehicles) will have a different color print("num_rl_vehicles:", self.num_rl) print("observation sapce shape: ", 4 * self.num_rl * self.num_lanes + self.num_rl) rl 车辆向前,并惩罚停止 epsilon term (to deal with ZeroDivisionError exceptions) punish excessive lane changes by reducing the reward by a set value every time an rl car changes lanes (10% of max reward) print("reward: ", reward) represents vehicles that are allowed to change lanes non_lane_changing_veh = [] non_lane_changing_veh = \ [self.time_counter <= self.env_params.additional_params["lane_change_duration"] + self.k.vehicle.get_last_lc(veh_id) for veh_id in self.rl_veh] vehicle that are not allowed to change have their directions set to 0 print(non_lane_changing_veh) direction[non_lane_changing_veh] = \ np.array([0] * sum(non_lane_changing_veh)) print("actions:", actions) print("veh id: ", self.rl_veh) print("acceleration: ", acceleration) print("direction", direction) self.k.vehicle.apply_acceleration(self.rl_veh, acc=acceleration) self.k.vehicle.apply_lane_change(self.rl_veh, direction=direction) print("rl veh id: ", self.rl_veh) x, y = self.k.vehicle.kernel_api.vehicle.getPosition(rl_id) print(x, y) print("edgeID", self.k.vehicle.get_edge(rl_id)) print("lane", self.k.vehicle.get_lane(rl_id)) self.k.vehicle.kernel_api.vehicle.moveToXY(vehID=[rl_id, rl_id], edgeID="highway_1", lane=1, x=600, y=134) add the speed for the ego rl vehicle if rl_id not in self.k.vehicle.get_ids(): print("not in:", rl_id) self.additional_command() normalizers set to 1000 since the absence of a vehicle implies a large headway add the headways, tailways, and speed for all lane leaders and followers if len(speeds) > 3: self.stuck = True for speed in speeds: if speed != 0: self.stuck = False print("observation: ", obs) print("observation shape: ", obs.shape) 红绿灯相关 decide wether there is a waiting pedestrian and switch if the green phase for the vehicles exceeds its minimum duration check whether someone has pushed the button switch to the next phase reset state MOVE XY相关 specify observed vehicles update the "absolute_position" variable in case the vehicle isn't in the network add rl vehicles that just entered the network into the rl queue remove rl vehicles that exited the network print("rm veh_id", veh_id) fil up rl_veh until they are enough controlled vehicles print("add rl_veh:", rl_id) print("update_veh_id, self.rl_veh:", self.rl_veh) check both sides of the crossing check who is waiting at the crossing we assume that pedestrians push the button upon standing still for 1s perform acceleration actions for controlled human-driven vehicles perform lane change actions for controlled human-driven vehicles perform (optionally) routing actions for all vehicles in the network, including RL and SUMO-controlled vehicles advance the simulation in the simulator by one step store new observations in the vehicles and traffic lights class update the colors of vehicles crash encodes whether the simulator experienced a collision stop collecting new simulation steps if there is a collision render a frame collect information of the state of the network based on the environment class used collect observation new state associated with action test if the environment should terminate due to a collision or the time horizon being met compute the info for each agent compute the reward number of time steps run the sumo simulation
5,896
en
0.800143
# -*- coding: utf-8 -*- """ Created on Fri Apr 24 14:52:03 2020 @author: DELL """ import pandas as pd data = pd.read_csv('http://187.191.75.115/gobmx/salud/datos_abiertos/datos_abiertos_covid19.zip', encoding = 'ANSI') res = data[data['ENTIDAD_RES'] == 31] res.to_csv('data_yuc_actualizado.csv', index = False)
datos_yuc_actualizado.py
317
Created on Fri Apr 24 14:52:03 2020 @author: DELL -*- coding: utf-8 -*-
74
en
0.864873
#!/usr/bin/env python import unittest from ct.proto import client_pb2 from ct.proto import test_message_pb2 from ct.serialization import tls_message valid_test_message = test_message_pb2.TestMessage() valid_test_message.uint_8 = 0 valid_test_message.uint_16 = 258 valid_test_message.uint_24 = 197637 valid_test_message.uint_32 = 101124105 valid_test_message.uint_48 = 11042563100175 valid_test_message.uint_64 = 255 valid_test_message.fixed_bytes = "\xff\x00" valid_test_message.var_bytes = "hello" valid_test_message.var_bytes2 = "world" valid_test_message.vector_bytes.append("hello") valid_test_message.vector_bytes.append("world") valid_test_message.vector_uint32.append(1) valid_test_message.vector_uint32.append(255) valid_test_message.test_enum = test_message_pb2.TestMessage.ENUM_1 valid_test_message.select_uint32 = 2 valid_test_message.embedded_message.uint_32 = 3 valid_test_message.repeated_message.add().uint_32 = 4 valid_test_message.repeated_message.add().uint_32 = 256 # Test vectors are given as a list of serialized, hex-encoded components. serialized_valid_test_message = [ "00", # 0: uint_8 "0102", # 1: uint_16 "030405", # 2: uint_24 "06070809", # 3: uint_32 "0a0b0c0d0e0f", # 4: uint_48 "00000000000000ff", # 5: uint_64 "ff00", # 6: fixed_bytes "05" + "hello".encode("hex"), # 7: var_bytes "0005" + "world".encode("hex"), # 8: var_bytes2 "0c" + "05" + "hello".encode("hex") + "05" + "world".encode("hex"), # 9: vector_bytes "0800000001000000ff", # 10: vector_uint32 "0001", # 11: test_enum "00000002", # 12: select_uint32 "0003", # 13: embedded_message.uint_32 "0400040100", # 14: repeated_message ] class TLSReaderTest(unittest.TestCase): def verify_decode(self, test_vector, test_message): serialized = "".join(test_vector).decode("hex") message = test_message_pb2.TestMessage() tls_message.decode(serialized, message) self.assertEqual(test_message, message, msg = "%s vs %s" % (test_message, message)) def verify_decode_fail(self, test_vector): serialized = "".join(test_vector).decode("hex") message = test_message_pb2.TestMessage() self.assertRaises(tls_message.TLSDecodingError, tls_message.decode, serialized, message) def test_decode_valid(self): self.verify_decode(serialized_valid_test_message, valid_test_message) pass def test_decode_valid_select(self): test_vector = serialized_valid_test_message[:] test_vector[11] = "0000" test_vector[12] = "" test_message = test_message_pb2.TestMessage() test_message.CopyFrom(valid_test_message) test_message.test_enum = test_message_pb2.TestMessage.ENUM_0 test_message.ClearField("select_uint32") self.verify_decode(test_vector, test_message) def test_decode_invalid_select_fails(self): test_vector = serialized_valid_test_message[:] test_vector[11] = "0000" self.verify_decode_fail(test_vector) def test_decode_too_short_fails(self): test_vector = serialized_valid_test_message[:] # var_bytes2 has a min length of 4 test_vector[8] = "bit".encode("hex") self.verify_decode_fail(test_vector) def test_decode_empty(self): test_vector = serialized_valid_test_message[:] # var_bytes has no min length test_vector[7] = "00" test_message = test_message_pb2.TestMessage() test_message.CopyFrom(valid_test_message) test_message.var_bytes = "" self.verify_decode(test_vector, test_message) def test_decode_too_long_fails(self): test_vector = serialized_valid_test_message[:] # var_bytes has a max length of 16 test_vector[7] = "16" + "Iamtoolongformyowngood".encode("hex") self.verify_decode_fail(test_vector) def test_decode_repeated_too_short_fails(self): test_vector = serialized_valid_test_message[:] # repeated_uint32 has a min total length of 4 test_vector[10] = "00" self.verify_decode_fail(test_vector) def test_decode_repeated_too_long_fails(self): test_vector = serialized_valid_test_message[:] # repeated_uint32 has a max total length of 8 test_vector[10] = "0c" + "00"*12 self.verify_decode_fail(test_vector) def test_decode_repeated_invalid_contents_fails(self): test_vector = serialized_valid_test_message[:] # repeated_uint32 must be a multiple of 4 test_vector[10] = "02" + "0000" self.verify_decode_fail(test_vector) def test_read_longer_buffer(self): test_vector = serialized_valid_test_message[:] test_vector.append("somegarbageintheend".encode("hex")) serialized = "".join(test_vector).decode("hex") message = test_message_pb2.TestMessage() reader = tls_message.TLSReader(serialized) reader.read(message) self.assertEqual(valid_test_message, message, msg = "%s vs %s" % (valid_test_message, message)) self.assertFalse(reader.finished()) class TLSWriterTest(unittest.TestCase): def verify_encode(self, test_message, test_vector): serialized = tls_message.encode(test_message) self.assertEqual("".join(test_vector), serialized.encode("hex")) def verify_encode_fails(self, test_message): self.assertRaises(tls_message.TLSEncodingError, tls_message.encode, test_message) def test_encode(self): self.verify_encode(valid_test_message, serialized_valid_test_message) def test_encode_ignores_skipped_fields(self): test_message = test_message_pb2.TestMessage() test_message.CopyFrom(valid_test_message) test_message.skip_uint32 = 42 self.verify_encode(test_message, serialized_valid_test_message) def test_encode_ignores_bad_select(self): test_vector = serialized_valid_test_message[:] test_vector[11] = "0000" test_vector[12] = "" test_message = test_message_pb2.TestMessage() test_message.CopyFrom(valid_test_message) test_message.test_enum = test_message_pb2.TestMessage.ENUM_0 self.verify_encode(test_message, test_vector) def test_encode_too_large_value_fails(self): test_message = test_message_pb2.TestMessage() test_message.CopyFrom(valid_test_message) test_message.uint_8 = 65000 self.verify_encode_fails(test_message) def test_encode_bad_length_fails(self): test_message = test_message_pb2.TestMessage() test_message.CopyFrom(valid_test_message) test_message.fixed_bytes = "hello" self.verify_encode_fails(test_message) def test_encode_too_short_fails(self): test_message = test_message_pb2.TestMessage() test_message.CopyFrom(valid_test_message) test_message.var_bytes2 = "sho" self.verify_encode_fails(test_message) def test_encode_too_long_fails(self): test_message = test_message_pb2.TestMessage() test_message.CopyFrom(valid_test_message) test_message.var_bytes = "Iamtoolongformyowngood" self.verify_encode_fails(test_message) def test_encode_repeated_too_long_fails(self): test_message = test_message_pb2.TestMessage() test_message.CopyFrom(valid_test_message) test_message.vector_uint32.extend([1, 2, 3, 4]) self.verify_encode_fails(test_message) def test_encode_repeated_too_short_fails(self): test_message = test_message_pb2.TestMessage() test_message.CopyFrom(valid_test_message) test_message.ClearField("vector_uint32") self.verify_encode_fails(test_message) class SCTEncodingTest(unittest.TestCase): def setUp(self): sct_proto = client_pb2.SignedCertificateTimestamp() sct_proto.version = client_pb2.V1 sct_proto.id.key_id = ( "a4b90990b418581487bb13a2cc67700a3c359804f91bdfb8e377cd0ec80ddc10" ).decode('hex') sct_proto.timestamp = 1365427532443 sct_proto.signature.hash_algorithm = client_pb2.DigitallySigned.SHA256 sct_proto.signature.sig_algorithm = client_pb2.DigitallySigned.ECDSA sct_proto.signature.signature = ( "304502210089de897f603e590b1aa0d7c4236c2f697e90602795f7a469215fda5e" "460123fc022065ab501ce3dbaf49bd563d1c9ff0ac76120bc11f65a44122b3cd8b" "89fc77a48c").decode("hex") self._sct_proto = sct_proto def test_correctly_encodes_sct(self): sct = tls_message.encode(self._sct_proto) expected_sct = ("00a4b90990b418581487bb13a2cc67700a3c359804f91bdfb8e377" "cd0ec80ddc100000013de9d2b29b000004030047304502210089de" "897f603e590b1aa0d7c4236c2f697e90602795f7a469215fda5e46" "0123fc022065ab501ce3dbaf49bd563d1c9ff0ac76120bc11f65a4" "4122b3cd8b89fc77a48c").decode("hex") self.assertEqual(sct, expected_sct) def test_correctly_encodes_sct_list_one_sct(self): # Taken from the C++ serializer test, to ensure this encoder # produces results compatible with the C++ one. single_sct = ("0069616d617075626c69636b657973686174776f6669766573697864" "696765737400000000000004d20000040300097369676e6174757265" ).decode("hex") sct_list = client_pb2.SignedCertificateTimestampList() sct_list.sct_list.append(single_sct) encoded_sct_list = tls_message.encode(sct_list) self.assertEqual(encoded_sct_list[:4], "003a0038".decode("hex")) self.assertEqual(encoded_sct_list[4:], single_sct) def test_correctly_encodes_sct_list_multiple_scts(self): first_sct = tls_message.encode(self._sct_proto) sct_proto_2 = client_pb2.SignedCertificateTimestamp() sct_proto_2.CopyFrom(self._sct_proto) sct_proto_2.timestamp = 1365427530000 second_sct = tls_message.encode(sct_proto_2) sct_list = client_pb2.SignedCertificateTimestampList() sct_list.sct_list.extend([first_sct, second_sct]) encoded_sct_list = tls_message.encode(sct_list) # First 2 bytes are list length prefix - 240 bytes in total # Next 2 bytes are the length of the first SCT: 118 self.assertEqual(encoded_sct_list[:4], "00f00076".decode("hex")) first_sct_end = len(first_sct) + 4 # The actual SCT self.assertEqual(encoded_sct_list[4:first_sct_end], first_sct) # Next 2 bytes are the length of the second SCT (118 again) self.assertEqual(encoded_sct_list[first_sct_end:first_sct_end+2], "0076".decode("hex")) # The 2nd SCT self.assertEqual(encoded_sct_list[first_sct_end+2:], second_sct) if __name__ == "__main__": unittest.main()
vendor/github.com/google/certificate-transparency/python/ct/serialization/tls_message_test.py
10,987
!/usr/bin/env python Test vectors are given as a list of serialized, hex-encoded components. 0: uint_8 1: uint_16 2: uint_24 3: uint_32 4: uint_48 5: uint_64 6: fixed_bytes 7: var_bytes 8: var_bytes2 9: vector_bytes 10: vector_uint32 11: test_enum 12: select_uint32 13: embedded_message.uint_32 14: repeated_message var_bytes2 has a min length of 4 var_bytes has no min length var_bytes has a max length of 16 repeated_uint32 has a min total length of 4 repeated_uint32 has a max total length of 8 repeated_uint32 must be a multiple of 4 Taken from the C++ serializer test, to ensure this encoder produces results compatible with the C++ one. First 2 bytes are list length prefix - 240 bytes in total Next 2 bytes are the length of the first SCT: 118 The actual SCT Next 2 bytes are the length of the second SCT (118 again) The 2nd SCT
835
en
0.797192
# coding: utf-8 import codecs import re import json from budget2013_common import * class Budget2013_37_SubTable1Item(object): def __init__(self): self._no = None self._purpose = None self._principal = None self._value = None self._regress = None self._check = None self._other = [] @property def no(self): return self._no @no.setter def no(self, value): self._no = value @property def purpose(self): return self._purpose @purpose.setter def purpose(self, value): self._purpose = value @property def principal(self): return self._principal @principal.setter def principal(self, value): self._principal = value @property def value(self): return self._value @value.setter def value(self, value): self._value = value @property def regress(self): return self._regress @regress.setter def regress(self, value): self._regress = value @property def check(self): return self._check @check.setter def check(self, value): self._check = value @property def other(self): return self._other @other.setter def other(self, value): self._other = value class JsonEncoder_Budget2013_37_SubTable1Item(json.JSONEncoder): def default(self, o): return { "no": o.no, "purpose": o.purpose, "principal": o.principal, "value": o.value, "regress": o.regress, "check": o.check, "other": o.other } class Budget2013_37_SubTable1(object): def __init__(self): self._caption = None self._headers = [] self._items = [] self._notes = [] @property def caption(self): return self._caption @caption.setter def caption(self, value): self._caption = value @property def headers(self): return self._headers @headers.setter def headers(self, value): self._headers = value @property def items(self): return self._items @items.setter def items(self, value): self._items = value @property def notes(self): return self._notes @notes.setter def notes(self, value): self._notes = value class JsonEncoder_Budget2013_37_SubTable1(json.JSONEncoder): def default(self, o): item_encoder = JsonEncoder_Budget2013_37_SubTable1Item() return { "caption": o.caption, "headers": o.headers, "items": [item_encoder.default(item) for item in o.items], "notes": o.notes } class Budget2013_37_SubTable2(object): def __init__(self): self._caption = None self._headers = [] self._items = [] @property def caption(self): return self._caption @caption.setter def caption(self, value): self._caption = value @property def headers(self): return self._headers @headers.setter def headers(self, value): self._headers = value @property def items(self): return self._items @items.setter def items(self, value): self._items = value class JsonEncoder_Budget2013_37_SubTable2Item(json.JSONEncoder): def default(self, o): return { "name": o["name"], "value": o["value"] } class JsonEncoder_Budget2013_37_SubTable2(json.JSONEncoder): def default(self, o): item_encoder = JsonEncoder_Budget2013_37_SubTable2Item() return { "caption": o.caption, "headers": o.headers, "items": [item_encoder.default(item) for item in o.items] } class Budget2013_37(object): def __init__(self): self._caption = None self._subtable1 = Budget2013_37_SubTable1() self._subtable2 = Budget2013_37_SubTable2() @property def caption(self): return self._caption @caption.setter def caption(self, value): self._caption = value @property def subtable1(self): return self._subtable1 @property def subtable2(self): return self._subtable2 class JsonEncoder_Budget2013_37(json.JSONEncoder): def default(self, o): subtable1_encoder = JsonEncoder_Budget2013_37_SubTable1() subtable2_encoder = JsonEncoder_Budget2013_37_SubTable2() return { "caption": o.caption, "subtable1": subtable1_encoder.default(o.subtable1), "subtable2": subtable2_encoder.default(o.subtable2) } def check_document(document): total_value = 0.0 for item in document.subtable1.items[:-1]: total_value += item.value if total_value != document.subtable1.items[-1].value: print total_value, document.subtable1.items[-1].value raise Exception(u"Сумма не сходится.") def get_document(input_file_name): with codecs.open(input_file_name, "r", encoding = "utf-8-sig") as input_file: input_data = input_file.readlines() document = Budget2013_37() line_index = 0 # caption caption_lines = [] while line_index < len(input_data): caption_line = input_data[line_index].strip() line_index += 1 if not caption_line: break caption_lines.append(caption_line) document.caption = join_lines(caption_lines) # subtable1 caption caption_lines = [] while line_index < len(input_data): caption_line = input_data[line_index].strip() line_index += 1 if not caption_line: break caption_lines.append(caption_line) document.subtable1.caption = join_lines(caption_lines) # subtable1 headers headers = input_data[line_index].strip() line_index += 2 document.subtable1.headers = headers.split(";") # subtable1 data while not input_data[line_index].strip().startswith(u"ИТОГО"): item = Budget2013_37_SubTable1Item() # no + purpose purpose_lines = [] while line_index < len(input_data): purpose_line = input_data[line_index].strip() line_index += 1 if not purpose_line: break purpose_lines.append(purpose_line) purpose = join_lines(purpose_lines) m = re.compile(u"^(\\d+) (.*)").match(purpose) item.no = int(m.group(1)) item.purpose = m.group(2) # principal principal_lines = [] while line_index < len(input_data): principal_line = input_data[line_index].strip() line_index += 1 if not principal_line: break principal_lines.append(principal_line) item.principal = join_lines(principal_lines) # value item.value = float(input_data[line_index].strip().replace(",", ".").replace(" ", "")) line_index += 2 # regress s = input_data[line_index].strip() if s == u"Нет": item.regress = False elif s == u"Есть": item.regress = True else: print s raise Exception(u"Unknown regress: " + s) line_index += 2 # check s = input_data[line_index].strip() if s == u"Нет": item.check = False elif s == u"Есть": item.check = True else: print s raise Exception(u"Unknown check: " + s) line_index += 2 # other other_lines = [] while line_index < len(input_data): other_line = input_data[line_index].strip() line_index += 1 if not other_line: break if re.compile("^\\d+\\. ").match(other_line): if other_lines: o = join_lines(other_lines) item.other.append(o) other_lines = [] other_lines.append(other_line) if other_lines: o = join_lines(other_lines) item.other.append(o) other_lines = [] document.subtable1.items.append(item) # ИТОГО s = input_data[line_index].strip() m = re.compile(u"^(ИТОГО)\\*? (.*)").match(s) item = Budget2013_37_SubTable1Item() item.purpose = m.group(1) item.value = float(m.group(2).replace(",", ".").replace(" ", "")) document.subtable1.items.append(item) line_index += 2 # notes notes_lines = [] while line_index < len(input_data): notes_line = input_data[line_index].rstrip() line_index += 1 if not notes_line: break m = re.compile("^\\*? (.*)").match(notes_line) if m: if notes_lines: note = join_lines(notes_lines) document.subtable1.notes.append(note) notes_lines = [] notes_lines.append(m.group(1)) else: notes_lines.append(notes_line.lstrip()) if notes_lines: note = join_lines(notes_lines) document.subtable1.notes.append(note) notes_lines = [] line_index += 1 # subtable2 caption caption_lines = [] while line_index < len(input_data): caption_line = input_data[line_index].strip() line_index += 1 if not caption_line: break caption_lines.append(caption_line) document.subtable2.caption = join_lines(caption_lines) # subtable2 headers headers = input_data[line_index].strip() line_index += 1 document.subtable2.headers = headers.split(";") #subtable2 data while line_index < len(input_data): data_line = input_data[line_index].strip() line_index += 1 if not data_line: break m = re.compile("([\\d ,]+)$").search(data_line) value = float(m.group(1).replace(",", ".").replace(" ", "")) name = data_line[:len(data_line) - len(m.group(1)) - 1].strip() item = {"name": name, "value": value} document.subtable2.items.append(item) check_document(document) return document def do_write_text_document(output_file, document): output_file.write(document.caption + "\r\n\r\n") output_file.write(document.subtable1.caption + "\r\n\r\n") output_file.write(u" ".join(document.subtable1.headers) + "\r\n\r\n") for item in document.subtable1.items[:-1]: output_file.write(unicode(item.no) + " " + item.purpose + " " + item.principal + " " + unicode(item.value) + " " + unicode(item.regress) + " " + unicode(item.check) + "\r\n") if item.other: for o in item.other: output_file.write(o + "\r\n"); output_file.write("\r\n") output_file.write(document.subtable1.items[-1].purpose + " " + unicode(document.subtable1.items[-1].value) + "\r\n\r\n") for note in document.subtable1.notes: output_file.write(note + "\r\n") output_file.write("\r\n") output_file.write(document.subtable2.caption + "\r\n\r\n") output_file.write(u" ".join(document.subtable2.headers) + "\r\n\r\n") for item in document.subtable2.items: output_file.write(item["name"] + " " + unicode(item["value"]) + "\r\n") if __name__ == "__main__": parser = get_default_argument_parser() args = parser.parse_args() input_file_name = args.input_file_name output_pickle_file_name = args.output_pickle_file_name output_text_file_name = args.output_text_file_name output_json_file_name = args.output_json_file_name output_json_pretty_file_name = args.output_json_pretty_file_name if (not output_pickle_file_name) and (not output_text_file_name) and (not output_json_file_name) and (not output_json_pretty_file_name): raise Exception("No output file specified") document = get_document(input_file_name) if output_pickle_file_name: write_pickle_document(document, output_pickle_file_name) if output_text_file_name: write_text_document(document, output_text_file_name, do_write_text_document) if output_json_file_name: write_json_document(document, output_json_file_name, JsonEncoder_Budget2013_37) if output_json_pretty_file_name: write_json_pretty_document(document, output_json_pretty_file_name, JsonEncoder_Budget2013_37)
federal/2013/code/budget2013_37.py
10,833
coding: utf-8 caption subtable1 caption subtable1 headers subtable1 data no + purpose principal value regress check other ИТОГО notes subtable2 caption subtable2 headerssubtable2 data
183
en
0.124983
""" Baseline CNN, losss function and metrics Also customizes knowledge distillation (KD) loss function here """ import numpy as np import torch import torch.nn as nn import torch.nn.functional as F class Flatten(nn.Module): def forward(self, input): return input.view(input.size(0), -1) """ This is the standard way to define your own network in PyTorch. You typically choose the components (e.g. LSTMs, linear layers etc.) of your network in the __init__ function. You then apply these layers on the input step-by-step in the forward function. You can use torch.nn.functional to apply functions such as F.relu, F.sigmoid, F.softmax, F.max_pool2d. Be careful to ensure your dimensions are correct after each step. You are encouraged to have a look at the network in pytorch/nlp/model/net.py to get a better sense of how you can go about defining your own network. The documentation for all the various components available o you is here: http://pytorch.org/docs/master/nn.html """ class studentB(nn.Module): def __init__(self, params): """ We define an convolutional network that predicts the sign from an image. The components required are: Args: params: (Params) contains num_channels """ super(studentB, self).__init__() self.num_channels = params.num_channels # each of the convolution layers below have the arguments (input_channels, output_channels, filter_size, # stride, padding). We also include batch normalisation layers that help stabilise training. # For more details on how to use these layers, check out the documentation. self.conv1 = nn.Conv2d(3, 32, 5, stride=1, padding=2) self.bn1 = nn.BatchNorm2d(32) self.conv2_1 = nn.Conv2d(32, 32, 1, stride=1, padding=0) self.conv2_2 = nn.Conv2d(32, 32, 3, stride=1, padding=1) self.conv2_3 = nn.Conv2d(32, 64, 1, stride=1, padding=0) self.bn2 = nn.BatchNorm2d(64) self.conv3_1 = nn.Conv2d(64, 64, 1, stride=1, padding=0) self.conv3_2 = nn.Conv2d(64, 64, 3, stride=1, padding=1) self.conv3_3 = nn.Conv2d(64, 128, 1, stride=1, padding=0) self.bn3 = nn.BatchNorm2d(128) # 2 fully connected layers to transform the output of the convolution layers to the final output self.fc1 = nn.Linear(4*4*128, 500) self.fcbn1 = nn.BatchNorm1d(500) self.fc2 = nn.Linear(500, 10) self.dropout_rate = params.dropout_rate def forward(self, s): """ This function defines how we use the components of our network to operate on an input batch. Args: s: (Variable) contains a batch of images, of dimension batch_size x 3 x 32 x 32 . Returns: out: (Variable) dimension batch_size x 6 with the log probabilities for the labels of each image. Note: the dimensions after each step are provided """ # -> batch_size x 3 x 32 x 32 # we apply the convolution layers, followed by batch normalisation, maxpool and relu x 3 s = self.bn1(self.conv1(s)) # batch_size x 32 x 32 x 32 s = F.relu(F.max_pool2d(s, 2)) # batch_size x 32 x 16 x 16 s = self.conv2_1(s) s = self.conv2_2(s) s = self.conv2_3(s) s = self.bn2(s) # batch_size x 10 * 2 x 16 x 16 s = F.relu(F.max_pool2d(s, 2)) # batch_size x num_channels*2 x 8 x 8 s = self.conv3_1(s) s = self.conv3_2(s) s = self.conv3_3(s) s = self.bn3(s) # batch_size x 10 * 2 x 16 x 16 s = F.relu(F.max_pool2d(s, 2)) # batch_size x num_channels*2 x 8 x 8 # flatten the output for each image s = s.view(-1, 4*4*128) # batch_size x 4*4*num_channels*4 # apply 2 fully connected layers with dropout s = F.dropout(F.relu(self.fcbn1(self.fc1(s))), p=self.dropout_rate, training=self.training) # batch_size x self.num_channels*4 s = self.fc2(s) # batch_size x 10 return s def loss_fn(outputs, labels): """ Compute the cross entropy loss given outputs and labels. Args: outputs: (Variable) dimension batch_size x 6 - output of the model labels: (Variable) dimension batch_size, where each element is a value in [0, 1, 2, 3, 4, 5] Returns: loss (Variable): cross entropy loss for all images in the batch Note: you may use a standard loss function from http://pytorch.org/docs/master/nn.html#loss-functions. This example demonstrates how you can easily define a custom loss function. """ return nn.CrossEntropyLoss()(outputs, labels) def loss_fn_kd(outputs, labels, teacher_outputs, params): """ Compute the knowledge-distillation (KD) loss given outputs, labels. "Hyperparameters": temperature and alpha NOTE: the KL Divergence for PyTorch comparing the softmaxs of teacher and student expects the input tensor to be log probabilities! See Issue #2 """ alpha = params.alpha T = params.temperature KD_loss = nn.KLDivLoss()(F.log_softmax(outputs/T, dim=1), F.softmax(teacher_outputs/T, dim=1)) * (alpha * T * T) + \ F.cross_entropy(outputs, labels) * (1. - alpha) return KD_loss def accuracy(outputs, labels): """ Compute the accuracy, given the outputs and labels for all images. Args: outputs: (np.ndarray) output of the model labels: (np.ndarray) [0, 1, ..., num_classes-1] Returns: (float) accuracy in [0,1] """ outputs = np.argmax(outputs, axis=1) return np.sum(outputs==labels)/float(labels.size) # maintain all metrics required in this dictionary- these are used in the training and evaluation loops metrics = { 'accuracy': accuracy, # could add more metrics such as accuracy for each token type }
model/studentB.py
6,169
We define an convolutional network that predicts the sign from an image. The components required are: Args: params: (Params) contains num_channels Compute the accuracy, given the outputs and labels for all images. Args: outputs: (np.ndarray) output of the model labels: (np.ndarray) [0, 1, ..., num_classes-1] Returns: (float) accuracy in [0,1] This function defines how we use the components of our network to operate on an input batch. Args: s: (Variable) contains a batch of images, of dimension batch_size x 3 x 32 x 32 . Returns: out: (Variable) dimension batch_size x 6 with the log probabilities for the labels of each image. Note: the dimensions after each step are provided Compute the cross entropy loss given outputs and labels. Args: outputs: (Variable) dimension batch_size x 6 - output of the model labels: (Variable) dimension batch_size, where each element is a value in [0, 1, 2, 3, 4, 5] Returns: loss (Variable): cross entropy loss for all images in the batch Note: you may use a standard loss function from http://pytorch.org/docs/master/nn.html#loss-functions. This example demonstrates how you can easily define a custom loss function. Compute the knowledge-distillation (KD) loss given outputs, labels. "Hyperparameters": temperature and alpha NOTE: the KL Divergence for PyTorch comparing the softmaxs of teacher and student expects the input tensor to be log probabilities! See Issue #2 Baseline CNN, losss function and metrics Also customizes knowledge distillation (KD) loss function here each of the convolution layers below have the arguments (input_channels, output_channels, filter_size, stride, padding). We also include batch normalisation layers that help stabilise training. For more details on how to use these layers, check out the documentation. 2 fully connected layers to transform the output of the convolution layers to the final output -> batch_size x 3 x 32 x 32 we apply the convolution layers, followed by batch normalisation, maxpool and relu x 3 batch_size x 32 x 32 x 32 batch_size x 32 x 16 x 16 batch_size x 10 * 2 x 16 x 16 batch_size x num_channels*2 x 8 x 8 batch_size x 10 * 2 x 16 x 16 batch_size x num_channels*2 x 8 x 8 flatten the output for each image batch_size x 4*4*num_channels*4 apply 2 fully connected layers with dropout batch_size x self.num_channels*4 batch_size x 10 maintain all metrics required in this dictionary- these are used in the training and evaluation loops could add more metrics such as accuracy for each token type
2,597
en
0.763691
from typing import TypedDict from cff.models.cloudfront_event import CloudFrontEvent class Record(TypedDict): """Record of an event that raised a Lambda event.""" cf: CloudFrontEvent """The CloudFront event that raised this Lambda event."""
cff/models/record.py
257
Record of an event that raised a Lambda event.
46
en
0.961025
""" Django settings for toDoList project. Generated by 'django-admin startproject' using Django 2.1.7. For more information on this file, see https://docs.djangoproject.com/en/2.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.1/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'r=cr&4z(#t-&vbyp_71-sy&edioe73mt48%)1ur^g1&@p$m69e' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'apps.todo', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'toDoList.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], 'builtins': ['django.templatetags.static'] }, }, ] WSGI_APPLICATION = 'toDoList.wsgi.application' # Database # https://docs.djangoproject.com/en/2.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': 'things_to_do', 'USER': 'root', 'PASSWORD': '123456', 'PORT': 3306, 'HOST': '127.0.0.1' } } # Password validation # https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.1/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
toDoList/toDoList/settings.py
3,462
Django settings for toDoList project. Generated by 'django-admin startproject' using Django 2.1.7. For more information on this file, see https://docs.djangoproject.com/en/2.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.1/ref/settings/ Build paths inside the project like this: os.path.join(BASE_DIR, ...) Quick-start development settings - unsuitable for production See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/ SECURITY WARNING: keep the secret key used in production secret! SECURITY WARNING: don't run with debug turned on in production! Application definition Database https://docs.djangoproject.com/en/2.1/ref/settings/databases Password validation https://docs.djangoproject.com/en/2.1/ref/settings/auth-password-validators Internationalization https://docs.djangoproject.com/en/2.1/topics/i18n/ Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/2.1/howto/static-files/
989
en
0.642836
from .spark_cluster import SparkCluster from staroid import Staroid import requests import os, stat, time from pathlib import Path class Ods: def __init__(self, staroid=None, ske=None, cache_dir=None): self.__ske = None if staroid == None: self._staroid = Staroid() else: self._staroid = staroid if cache_dir == None: self.__cache_dir = "{}/.ods".format(str(Path.home())) else: self.__cache_dir = cache_dir # configure from env var if "STAROID_SKE" in os.environ: self.__ske = os.environ["STAROID_SKE"] # configure from args if ske != None: self.__ske = ske def create_or_get_cache_dir(self, module = ""): "create (if not exists) or return cache dir path for module" cache_dir = "{}/{}".format(self.__cache_dir, module) if not os.path.exists(cache_dir): os.makedirs(cache_dir) return cache_dir def download_chisel_if_not_exists(self): self._staroid.get_chisel_path() def _start_instance_on_staroid(self, instance_name, commit_url): cluster = self._staroid.cluster().get(self.__ske) if cluster == None: raise Exception("Can't get ske cluster") ns_api = self._staroid.namespace(cluster) ns = ns_api.create(instance_name, commit_url) if ns == None: raise Exception("Can't create instance") # if instnace is stopped, restart if ns.status() == "PAUSE": ns_api.start(instance_name) # wait for phase to become RUNNING return self.__wait_for_ns_phase(ns_api, ns, "RUNNING", 600) def _start_tunnel(self, instance_name, tunnels): cluster = self._staroid.cluster().get(self.__ske) if cluster == None: raise Exception("Can't get ske cluster") ns_api = self._staroid.namespace(cluster) ns = ns_api.get(instance_name) ns_api.shell_start(instance_name) ns_api.start_tunnel(instance_name, tunnels) def _stop_tunnel(self, instance_name): cluster = self._staroid.cluster().get(self.__ske) if cluster == None: raise Exception("Can't get ske cluster") ns_api = self._staroid.namespace(cluster) ns_api.stop_tunnel(instance_name) ns_api.shell_stop(instance_name) def _stop_instance_on_staroid(self, instance_name): cluster = self._staroid.cluster().get(self.__ske) if cluster == None: raise Exception("Can't get ske cluster") ns_api = self._staroid.namespace(cluster) ns = ns_api.stop(instance_name) ns = self.__wait_for_ns_phase(ns_api, ns, "PAUSED", 600) return ns def _delete_instance_on_staroid(self, instance_name): cluster = self._staroid.cluster().get(self.__ske) if cluster == None: raise Exception("Can't get ske cluster") ns_api = self._staroid.namespace(cluster) ns = ns_api.delete(instance_name) ns = self.__wait_for_ns_phase(ns_api, ns, "REMOVED", 600) def __wait_for_ns_phase(self, ns_api, ns, phase, timeout): start_time = time.time() sleep_time = 1 max_sleep_time = 7 while ns.phase() != phase: if time.time() - start_time > timeout: raise Exception("Timeout") # sleep time.sleep(sleep_time) if sleep_time < max_sleep_time: sleep_time += 1 # check ns = ns_api.get_by_id(ns.id()) return ns __singleton = {} def init(ske=None, reinit=True): if "instance" not in __singleton or reinit: __singleton["instance"] = Ods(ske=ske) return __singleton["instance"] def spark( name, spark_conf=None, spark_version="3.0.1", spark_home=None, worker_num=1, worker_type="standard-4", worker_isolation="dedicated", delta=False, aws=True): init(reinit=False) cluster = SparkCluster( __singleton["instance"], name, spark_conf=spark_conf, spark_version=spark_version, spark_home=spark_home, worker_num=worker_num, worker_type=worker_type, worker_isolation=worker_isolation, delta=delta, aws=aws) return cluster
ods/ods.py
4,416
create (if not exists) or return cache dir path for module configure from env var configure from args if instnace is stopped, restart wait for phase to become RUNNING sleep check
180
en
0.623735
# -*- coding: utf-8 -*- # Copyright 2020 The PsiZ Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Example that infers a shared embedding for three groups. Fake data is generated from a ground truth model for three different groups. In this example, these groups represent groups of agents with varying levels of skill: novices, intermediates, and experts. Each group has a different set of attention weights. An embedding model is inferred from the simulated data and compared to the ground truth model. Example output: Attention weights: Novice | [3.38 3.32 0.49 0.43] Intermediate | [2.06 2.18 2.04 2.18] Expert | [0.55 0.50 3.40 3.32] Model Comparison (R^2) ================================ True | Inferred | Novice Interm Expert --------+----------------------- Novice | 0.95 0.68 0.16 Interm | 0.64 0.96 0.54 Expert | 0.16 0.61 0.96 """ import os os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # noqa import numpy as np from scipy.stats import pearsonr import tensorflow as tf import psiz # Uncomment the following line to force eager execution. # tf.config.run_functions_eagerly(True) # Uncomment and edit the following to control GPU visibility. # os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # os.environ["CUDA_VISIBLE_DEVICES"] = "0" def main(): """Run the simulation that infers an embedding for three groups.""" # Settings. n_stimuli = 30 n_dim = 4 n_group = 3 n_restart = 1 epochs = 1000 n_trial = 2000 batch_size = 128 model_true = ground_truth(n_stimuli, n_dim, n_group) # Generate a random docket of trials to show each group. generator = psiz.trials.RandomRank( n_stimuli, n_reference=8, n_select=2 ) docket = generator.generate(n_trial) # Create virtual agents for each group. agent_novice = psiz.agents.RankAgent(model_true, groups=[0]) agent_interm = psiz.agents.RankAgent(model_true, groups=[1]) agent_expert = psiz.agents.RankAgent(model_true, groups=[2]) # Simulate similarity judgments for each group. obs_novice = agent_novice.simulate(docket) obs_interm = agent_interm.simulate(docket) obs_expert = agent_expert.simulate(docket) obs = psiz.trials.stack((obs_novice, obs_interm, obs_expert)) # Partition observations into 80% train, 10% validation and 10% test set. obs_train, obs_val, obs_test = psiz.utils.standard_split(obs) # Convert to TF dataset. ds_obs_train = obs_train.as_dataset().shuffle( buffer_size=obs_train.n_trial, reshuffle_each_iteration=True ).batch(batch_size, drop_remainder=False) ds_obs_val = obs_val.as_dataset().batch( batch_size, drop_remainder=False ) ds_obs_test = obs_test.as_dataset().batch( batch_size, drop_remainder=False ) # Use early stopping. early_stop = psiz.keras.callbacks.EarlyStoppingRe( 'val_cce', patience=15, mode='min', restore_best_weights=True ) callbacks = [early_stop] compile_kwargs = { 'loss': tf.keras.losses.CategoricalCrossentropy(), 'optimizer': tf.keras.optimizers.Adam(lr=.001), 'weighted_metrics': [ tf.keras.metrics.CategoricalCrossentropy(name='cce') ] } model_inferred = build_model(n_stimuli, n_dim, n_group) # Infer embedding with restarts. restarter = psiz.keras.Restarter( model_inferred, compile_kwargs=compile_kwargs, monitor='val_loss', n_restart=n_restart ) restart_record = restarter.fit( x=ds_obs_train, validation_data=ds_obs_val, epochs=epochs, callbacks=callbacks, verbose=0 ) model_inferred = restarter.model # Compare the inferred model with ground truth by comparing the # similarity matrices implied by each model. simmat_truth = ( model_similarity(model_true, groups=[0]), model_similarity(model_true, groups=[1]), model_similarity(model_true, groups=[2]) ) simmat_inferred = ( model_similarity(model_inferred, groups=[0]), model_similarity(model_inferred, groups=[1]), model_similarity(model_inferred, groups=[2]) ) r_squared = np.empty((n_group, n_group)) for i_truth in range(n_group): for j_infer in range(n_group): rho, _ = pearsonr(simmat_truth[i_truth], simmat_inferred[j_infer]) r_squared[i_truth, j_infer] = rho**2 # Display attention weights. # Permute inferred dimensions to best match ground truth. attention_weight = tf.stack( [ model_inferred.kernel.subnets[0].distance.w, model_inferred.kernel.subnets[1].distance.w, model_inferred.kernel.subnets[2].distance.w ], axis=0 ).numpy() idx_sorted = np.argsort(-attention_weight[0, :]) attention_weight = attention_weight[:, idx_sorted] group_labels = ["Novice", "Intermediate", "Expert"] print("\n Attention weights:") for i_group in range(attention_weight.shape[0]): print(" {0:>12} | {1}".format( group_labels[i_group], np.array2string( attention_weight[i_group, :], formatter={'float_kind': lambda x: "%.2f" % x}) ) ) # Display comparison results. A good inferred model will have a high # R^2 value on the diagonal elements (max is 1) and relatively low R^2 # values on the off-diagonal elements. print('\n Model Comparison (R^2)') print(' ================================') print(' True | Inferred') print(' | Novice Interm Expert') print(' --------+-----------------------') print(' Novice | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format( r_squared[0, 0], r_squared[0, 1], r_squared[0, 2])) print(' Interm | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format( r_squared[1, 0], r_squared[1, 1], r_squared[1, 2])) print(' Expert | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format( r_squared[2, 0], r_squared[2, 1], r_squared[2, 2])) print('\n') def ground_truth(n_stimuli, n_dim, n_group): """Return a ground truth embedding.""" stimuli = tf.keras.layers.Embedding( n_stimuli+1, n_dim, mask_zero=True, embeddings_initializer=tf.keras.initializers.RandomNormal( stddev=.17 ) ) shared_similarity = psiz.keras.layers.ExponentialSimilarity( trainable=False, beta_initializer=tf.keras.initializers.Constant(10.), tau_initializer=tf.keras.initializers.Constant(1.), gamma_initializer=tf.keras.initializers.Constant(0.) ) # Define group-specific kernels. kernel_0 = psiz.keras.layers.DistanceBased( distance=psiz.keras.layers.Minkowski( rho_trainable=False, rho_initializer=tf.keras.initializers.Constant(2.), w_initializer=tf.keras.initializers.Constant( [1.8, 1.8, .2, .2] ), w_constraint=psiz.keras.constraints.NonNegNorm( scale=n_dim, p=1. ), ), similarity=shared_similarity ) kernel_1 = psiz.keras.layers.DistanceBased( distance=psiz.keras.layers.Minkowski( rho_trainable=False, rho_initializer=tf.keras.initializers.Constant(2.), w_initializer=tf.keras.initializers.Constant( [1., 1., 1., 1.] ), w_constraint=psiz.keras.constraints.NonNegNorm( scale=n_dim, p=1. ), ), similarity=shared_similarity ) kernel_2 = psiz.keras.layers.DistanceBased( distance=psiz.keras.layers.Minkowski( rho_trainable=False, rho_initializer=tf.keras.initializers.Constant(2.), w_initializer=tf.keras.initializers.Constant( [.2, .2, 1.8, 1.8] ), w_constraint=psiz.keras.constraints.NonNegNorm( scale=n_dim, p=1. ), ), similarity=shared_similarity ) kernel_group = psiz.keras.layers.GateMulti( subnets=[kernel_0, kernel_1, kernel_2], group_col=0 ) model = psiz.keras.models.Rank( stimuli=stimuli, kernel=kernel_group, use_group_kernel=True ) return model def build_model(n_stimuli, n_dim, n_group): """Build model. Arguments: n_stimuli: Integer indicating the number of stimuli in the embedding. n_dim: Integer indicating the dimensionality of the embedding. Returns: model: A TensorFlow Keras model. """ stimuli = tf.keras.layers.Embedding( n_stimuli+1, n_dim, mask_zero=True, ) shared_similarity = psiz.keras.layers.ExponentialSimilarity( trainable=False, beta_initializer=tf.keras.initializers.Constant(10.), tau_initializer=tf.keras.initializers.Constant(1.), gamma_initializer=tf.keras.initializers.Constant(0.) ) kernel_0 = build_kernel(shared_similarity, n_dim) kernel_1 = build_kernel(shared_similarity, n_dim) kernel_2 = build_kernel(shared_similarity, n_dim) kernel_group = psiz.keras.layers.GateMulti( subnets=[kernel_0, kernel_1, kernel_2], group_col=0 ) model = psiz.keras.models.Rank( stimuli=stimuli, kernel=kernel_group, use_group_kernel=True ) return model def build_kernel(similarity, n_dim): """Build kernel for single group.""" mink = psiz.keras.layers.Minkowski( rho_trainable=False, rho_initializer=tf.keras.initializers.Constant(2.), w_constraint=psiz.keras.constraints.NonNegNorm( scale=n_dim, p=1. ), ) kernel = psiz.keras.layers.DistanceBased( distance=mink, similarity=similarity ) return kernel def model_similarity(model, groups=[]): ds_pairs, ds_info = psiz.utils.pairwise_index_dataset( model.n_stimuli, mask_zero=True, groups=groups ) simmat = psiz.utils.pairwise_similarity( model.stimuli, model.kernel, ds_pairs, use_group_kernel=True ).numpy() return simmat if __name__ == "__main__": main()
examples/rank/mle_3g.py
10,845
Build kernel for single group. Build model. Arguments: n_stimuli: Integer indicating the number of stimuli in the embedding. n_dim: Integer indicating the dimensionality of the embedding. Returns: model: A TensorFlow Keras model. Return a ground truth embedding. Run the simulation that infers an embedding for three groups. Example that infers a shared embedding for three groups. Fake data is generated from a ground truth model for three different groups. In this example, these groups represent groups of agents with varying levels of skill: novices, intermediates, and experts. Each group has a different set of attention weights. An embedding model is inferred from the simulated data and compared to the ground truth model. Example output: Attention weights: Novice | [3.38 3.32 0.49 0.43] Intermediate | [2.06 2.18 2.04 2.18] Expert | [0.55 0.50 3.40 3.32] Model Comparison (R^2) ================================ True | Inferred | Novice Interm Expert --------+----------------------- Novice | 0.95 0.68 0.16 Interm | 0.64 0.96 0.54 Expert | 0.16 0.61 0.96 -*- coding: utf-8 -*- Copyright 2020 The PsiZ Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================ noqa Uncomment the following line to force eager execution. tf.config.run_functions_eagerly(True) Uncomment and edit the following to control GPU visibility. os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "0" Settings. Generate a random docket of trials to show each group. Create virtual agents for each group. Simulate similarity judgments for each group. Partition observations into 80% train, 10% validation and 10% test set. Convert to TF dataset. Use early stopping. Infer embedding with restarts. Compare the inferred model with ground truth by comparing the similarity matrices implied by each model. Display attention weights. Permute inferred dimensions to best match ground truth. Display comparison results. A good inferred model will have a high R^2 value on the diagonal elements (max is 1) and relatively low R^2 values on the off-diagonal elements. Define group-specific kernels.
2,806
en
0.821421
#Reverse the input array # input : {5,4,3,2,1} # output : {1,2,3,4,5} arr = list(map(int,input().split())) for i in range(len(arr)): arr.push(a[-1]) arr.remove(a[-1]) print(arr)
Python/test.py
187
Reverse the input array input : {5,4,3,2,1} output : {1,2,3,4,5}
64
en
0.122725
import pandas as pd import time from google import google import sys from A00_File_name import file_name file_df = pd.read_csv(file_name, sep=';', encoding='latin-1') print(file_df.head()) brand_names_list = file_df['Official Chain Name'].tolist() ''' create a column with Official Brand WWWs ''' # https://github.com/abenassi/Google-Search-API WWW = [] for index in range(len(brand_names_list)): search_results = google.search(str(brand_names_list[index]) + ' ' + str(file_df.iloc[index]['Category']) + " official website") time.sleep(3) result_nb = 0 try: for i in range(len(search_results)): if "wiki" in str(search_results[i].link) or 'facebook' in str(search_results[i].link).lower() \ or'stackoverflow' in str(search_results[i].link).lower(): print(str(index), 'wiki or facebook or stackoverflow') pass else: print(search_results[i].link) WWW.append("/".join(search_results[i].link.split("/", 3)[:3])) print(index, i) result_nb += 1 break if result_nb == 0: WWW.append('[]') except OSError: WWW.append('Permission denial ' + str(sys.exc_info()[0])) except: WWW.append(sys.exc_info()[0]) print(len(brand_names_list)) print(len(WWW)) ''' create a column with .com domain ''' def create_www_brand_COM(brand_name): newstr = brand_name.replace("'", "") newstr = newstr.replace(" ", "") newstr = newstr.replace(".", "") newstr = newstr.replace("&", "") newstr = newstr.replace("-", "") newstr = newstr + '.com' newstr = newstr.lower() print(newstr) return newstr brands_wwws = [] for name in file_df['Official Chain Name']: brands_wwws.append(create_www_brand_COM(name)) print(brands_wwws) file_df['Official Web Page'] = WWW file_df['.com Web Page'] = brands_wwws print(file_df.head()) file_df.to_csv(file_name[:-4] + '_URLs_from_WB.csv', sep=';')
A01_WEB_BROWSER_get_Official_WWWs_create_COM_domain.py
2,149
https://github.com/abenassi/Google-Search-API
45
en
0.531244
"""Representation of a WeMo Motion device.""" from .api.long_press import LongPressMixin from .switch import Switch class LightSwitch(Switch, LongPressMixin): """Representation of a WeMo Motion device.""" def __repr__(self): """Return a string representation of the device.""" return '<WeMo LightSwitch "{name}">'.format(name=self.name) @property def device_type(self): """Return what kind of WeMo this device is.""" return "LightSwitch"
pywemo/ouimeaux_device/lightswitch.py
490
Representation of a WeMo Motion device. Return a string representation of the device. Return what kind of WeMo this device is. Representation of a WeMo Motion device.
166
en
0.769214
import requests import json class BuddyAPI(): ''' An API of buddymojo.com :returns: An API ''' def __init__(self): self.payload = {'type': 'friend', 'action': 'finish'} self.payloadf = {'userQuizId': 1, 'type': 'friend', 'stats': '1'} self.url = 'https://cn.buddymojo.com/api/v1/quiz/18' self.match = 'https://cn.buddymojo.com/match/' def send_single_ans(self, ID, name: str): ''' Send a single message to specific id with a specific name. :params ID: User quiz id. :type ID: int :params name: Name you want on the message. :type name: str ''' self.data = {'userFullName': name, 'userQuizId': 1} self.data.update(userQuizId=ID) self.payloadf.update(userQuizId=ID) try: req = requests.request('GET', self.url, params=self.payloadf) questions = json.loads(req.text).get('data').get('questions') # d = text.get('data') # questions = d.get('questions') for j, q in enumerate(questions): qval = q.get('choosenOption') self.data.update( {'questions['+str(j)+'][choosenOption]': qval}) reqi = requests.post(self.url, params=self.payload, data=self.data) print('sending post to userQuizId: '+str(ID)) except: print('User not found') def send_range_ans(self, start, end, name: str): ''' Send messages to a range of users id. :params start: The start user id. :type start: int :params end: The end user id. :type end: int :params name: The name you want. :type name: str ''' for i in range(start, end): data = {'userFullName': name, 'userQuizId': 1} data.update(userQuizId=i) self.payloadf.update(userQuizId=i) try: req = requests.request('GET', self.url, params=self.payloadf) questions = json.loads(req.text).get('data').get('questions') # d = text.get('data') # questions = d.get('questions') for j, q in enumerate(questions): qval = q.get('choosenOption') data.update({'questions['+str(j)+'][choosenOption]': qval}) reqi = requests.post(self.url, params=self.payload, data=data) print('sending post to userQuizId: '+str(i)) except: continue # Still working out def get_userQuizId(self, encUserQuizId): ''' Returns a user id string of the encUserQuizId. ''' try: req = requests.request('GET', str(match+encUserQuizId)) data = json.loads(req.text) print(data) except: return 'User not found' def get_link(self, ID): ''' Returns a url string of the id. :params ID: The id to get the url from. :type ID: int :returns: A url string. :rtype: String ''' self.payloadf.update(userQuizId=ID) try: req = requests.request('GET', self.url, params=self.payloadf) data = json.loads(req.text).get('data').get('encUserQuizId') return self.match + data except: return 'User not found'
buddymojoAPI/BuddyMojoAPI.py
3,539
An API of buddymojo.com :returns: An API Returns a url string of the id. :params ID: The id to get the url from. :type ID: int :returns: A url string. :rtype: String Returns a user id string of the encUserQuizId. Send messages to a range of users id. :params start: The start user id. :type start: int :params end: The end user id. :type end: int :params name: The name you want. :type name: str Send a single message to specific id with a specific name. :params ID: User quiz id. :type ID: int :params name: Name you want on the message. :type name: str d = text.get('data') questions = d.get('questions') d = text.get('data') questions = d.get('questions') Still working out
684
en
0.542352
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for factory.py.""" # Import libraries from absl.testing import parameterized import tensorflow as tf from official.vision.beta.configs import backbones from official.vision.beta.configs import backbones_3d from official.vision.beta.configs import image_classification as classification_cfg from official.vision.beta.configs import maskrcnn as maskrcnn_cfg from official.vision.beta.configs import retinanet as retinanet_cfg from official.vision.beta.configs import video_classification as video_classification_cfg from official.vision.beta.modeling import factory from official.vision.beta.modeling import factory_3d class ClassificationModelBuilderTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( ('resnet', (224, 224), 5e-5), ('resnet', (224, 224), None), ('resnet', (None, None), 5e-5), ('resnet', (None, None), None), ) def test_builder(self, backbone_type, input_size, weight_decay): num_classes = 2 input_specs = tf.keras.layers.InputSpec( shape=[None, input_size[0], input_size[1], 3]) model_config = classification_cfg.ImageClassificationModel( num_classes=num_classes, backbone=backbones.Backbone(type=backbone_type)) l2_regularizer = ( tf.keras.regularizers.l2(weight_decay) if weight_decay else None) _ = factory.build_classification_model( input_specs=input_specs, model_config=model_config, l2_regularizer=l2_regularizer) class MaskRCNNBuilderTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( ('resnet', (640, 640)), ('resnet', (None, None)), ) def test_builder(self, backbone_type, input_size): num_classes = 2 input_specs = tf.keras.layers.InputSpec( shape=[None, input_size[0], input_size[1], 3]) model_config = maskrcnn_cfg.MaskRCNN( num_classes=num_classes, backbone=backbones.Backbone(type=backbone_type)) l2_regularizer = tf.keras.regularizers.l2(5e-5) _ = factory.build_maskrcnn( input_specs=input_specs, model_config=model_config, l2_regularizer=l2_regularizer) class RetinaNetBuilderTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( ('resnet', (640, 640), False), ('resnet', (None, None), True), ) def test_builder(self, backbone_type, input_size, has_att_heads): num_classes = 2 input_specs = tf.keras.layers.InputSpec( shape=[None, input_size[0], input_size[1], 3]) if has_att_heads: attribute_heads_config = [ retinanet_cfg.AttributeHead(name='att1'), retinanet_cfg.AttributeHead( name='att2', type='classification', size=2), ] else: attribute_heads_config = None model_config = retinanet_cfg.RetinaNet( num_classes=num_classes, backbone=backbones.Backbone(type=backbone_type), head=retinanet_cfg.RetinaNetHead( attribute_heads=attribute_heads_config)) l2_regularizer = tf.keras.regularizers.l2(5e-5) _ = factory.build_retinanet( input_specs=input_specs, model_config=model_config, l2_regularizer=l2_regularizer) if has_att_heads: self.assertEqual(model_config.head.attribute_heads[0].as_dict(), dict(name='att1', type='regression', size=1)) self.assertEqual(model_config.head.attribute_heads[1].as_dict(), dict(name='att2', type='classification', size=2)) class VideoClassificationModelBuilderTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( ('resnet_3d', (8, 224, 224), 5e-5), ('resnet_3d', (None, None, None), 5e-5), ) def test_builder(self, backbone_type, input_size, weight_decay): input_specs = tf.keras.layers.InputSpec( shape=[None, input_size[0], input_size[1], input_size[2], 3]) model_config = video_classification_cfg.VideoClassificationModel( backbone=backbones_3d.Backbone3D(type=backbone_type)) l2_regularizer = ( tf.keras.regularizers.l2(weight_decay) if weight_decay else None) _ = factory_3d.build_video_classification_model( input_specs=input_specs, model_config=model_config, num_classes=2, l2_regularizer=l2_regularizer) if __name__ == '__main__': tf.test.main()
official/vision/beta/modeling/factory_test.py
5,019
Tests for factory.py. Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Lint as: python3 Import libraries
639
en
0.850785
from model import * from data import * from keras.preprocessing.image import ImageDataGenerator os.environ["CUDA_VISIBLE_DEVICES"] = "1" data_gen_args = dict(rotation_range=0.2, width_shift_range=0.05, height_shift_range=0.05, shear_range=0.05, zoom_range=0.05, horizontal_flip=True, fill_mode='nearest') myGene = trainGenerator(2,'data/membrane/train','image','label',data_gen_args,save_to_dir = None) model = unet() model_checkpoint = ModelCheckpoint('unet_membrane.hdf5', monitor='loss',verbose=1, save_best_only=True) model.fit_generator(myGene,steps_per_epoch=300,epochs=1,callbacks=[model_checkpoint]) # test_dir = "data/membrane/test" # test_datagen = ImageDataGenerator(rescale=1./255) # test_generator = test_datagen.flow_from_directory( # test_dir, # target_size=(256, 256), # color_mode="grayscale", # batch_size=1) # test_path = "data/membrane/test" # image_datagen = ImageDataGenerator(**data_gen_args) # image_generator = image_datagen.flow_from_directory( # test_path, # class_mode = None, # color_mode = "grayscale", # target_size = (256,256), # batch_size = 1, # save_to_dir = None, # seed = 2) # filenames = test_generator.filenames # nb_samples = len(filenames) # print(nb_samples) # predict = model.predict_generator(test_generator,steps = nb_samples) # testGene = testGenerator("data/membrane/test") # filenames = testGene.filenames # nb_samples = len(filenames) # results = model.predict_generator(testGene,30,verbose=1) # saveResult("data/membrane/test",results) test_path = "data/membrane/test" target_size = (256,256) flag_multi_class = False img = io.imread(os.path.join(test_path,"%d.png"%30),as_gray = True) img = img / 255 img = trans.resize(img,target_size) img = np.reshape(img,img.shape+(1,)) if (not flag_multi_class) else img img = np.reshape(img,(1,)+img.shape) results = model.predict(img) print(results) COLOR_DICT = np.array([Sky, Building, Pole, Road, Pavement, Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled]) saveResult("data/membrane/test",results) #io.imsave(os.path.join(save_path,"%d_predict.png"%31),results) # testGene = testGenerator("data/membrane/test") # results = model.predict_generator(testGene,31) # saveResult("data/membrane/test",results)
main.py
2,463
test_dir = "data/membrane/test" test_datagen = ImageDataGenerator(rescale=1./255) test_generator = test_datagen.flow_from_directory( test_dir, target_size=(256, 256), color_mode="grayscale", batch_size=1) test_path = "data/membrane/test" image_datagen = ImageDataGenerator(**data_gen_args) image_generator = image_datagen.flow_from_directory( test_path, class_mode = None, color_mode = "grayscale", target_size = (256,256), batch_size = 1, save_to_dir = None, seed = 2) filenames = test_generator.filenames nb_samples = len(filenames) print(nb_samples) predict = model.predict_generator(test_generator,steps = nb_samples) testGene = testGenerator("data/membrane/test") filenames = testGene.filenames nb_samples = len(filenames) results = model.predict_generator(testGene,30,verbose=1) saveResult("data/membrane/test",results)io.imsave(os.path.join(save_path,"%d_predict.png"%31),results) testGene = testGenerator("data/membrane/test") results = model.predict_generator(testGene,31) saveResult("data/membrane/test",results)
1,110
en
0.226079
# © Copyright IBM Corporation 2020. # # LICENSE: Apache License 2.0 (Apache-2.0) # http://www.apache.org/licenses/LICENSE-2.0 """ ... """ # init file # import cython created shared object files import sib.c_package # cython with cpp version # import core functionality from .sib_main import * from ._version import get_versions __version__ = get_versions()['version'] del get_versions
src/sib/__init__.py
393
... © Copyright IBM Corporation 2020. LICENSE: Apache License 2.0 (Apache-2.0) http://www.apache.org/licenses/LICENSE-2.0 init file import cython created shared object files cython with cpp version import core functionality
225
en
0.555048
#!/usr/bin/python3 from sys import version_info from setuptools import setup if version_info < (3, 5, 3): raise RuntimeError("aiopm requires Python 3.5.3+") setup( name='aiopm', version='1.1', description='Async Postmark client (asyncio)', classifiers=[ 'Intended Audience :: Developers', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Operating System :: POSIX', # 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Development Status :: 4 - Beta', # 'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: MIT License', ], author='Vitold Sedyshev', author_email='vit1251@gmail.com', maintainer=', '.join([ 'Vitold Sedyshev <vit1251@gmail.com>', ]), maintainer_email='aiopm@googlegroups.com', url='https://github.com/vit1251/aiopm', project_urls={ # 'CI: Travis': '...', # 'Coverage: codecov': '...', # 'GitHub: issues': '', # 'GitHub: repo': '', }, license='MIT', packages=['aiopm'], python_requires='>=3.5.3', install_requires=['aiohttp'], include_package_data=True, )
setup.py
1,384
!/usr/bin/python3 'Operating System :: MacOS :: MacOS X', 'Development Status :: 5 - Production/Stable', 'CI: Travis': '...', 'Coverage: codecov': '...', 'GitHub: issues': '', 'GitHub: repo': '',
237
en
0.285201
import halide as hl import simple_stub import complex_stub def _realize_and_check(f, offset = 0): b = hl.Buffer(hl.Float(32), [2, 2]) f.realize(b) assert b[0, 0] == 3.5 + offset + 123 assert b[0, 1] == 4.5 + offset + 123 assert b[1, 0] == 4.5 + offset + 123 assert b[1, 1] == 5.5 + offset + 123 def test_simple(gen): x, y = hl.Var(), hl.Var() target = hl.get_jit_target_from_environment() b_in = hl.Buffer(hl.UInt(8), [2, 2]) b_in.fill(123) f_in = hl.Func("f") f_in[x, y] = x + y # ----------- Inputs by-position f = gen(target, b_in, f_in, 3.5) _realize_and_check(f) # ----------- Inputs by-name f = gen(target, buffer_input=b_in, func_input=f_in, float_arg=3.5) _realize_and_check(f) f = gen(target, float_arg=3.5, buffer_input=b_in, func_input=f_in) _realize_and_check(f) # ----------- Above set again, w/ GeneratorParam mixed in k = 42 # (positional) f = gen(target, b_in, f_in, 3.5, offset=k) _realize_and_check(f, k) # (keyword) f = gen(target, offset=k, buffer_input=b_in, func_input=f_in, float_arg=3.5) _realize_and_check(f, k) f = gen(target, buffer_input=b_in, offset=k, func_input=f_in, float_arg=3.5) _realize_and_check(f, k) f = gen(target, buffer_input=b_in, func_input=f_in, offset=k, float_arg=3.5) _realize_and_check(f, k) f = gen(target, buffer_input=b_in, float_arg=3.5, func_input=f_in, offset=k) _realize_and_check(f, k) # ----------- Test various failure modes try: # Inputs w/ mixed by-position and by-name f = gen(target, b_in, f_in, float_arg=3.5) except RuntimeError as e: assert 'Cannot use both positional and keyword arguments for inputs.' in str(e) else: assert False, 'Did not see expected exception!' try: # too many positional args f = gen(target, b_in, f_in, 3.5, 4) except RuntimeError as e: assert 'Expected exactly 3 positional args for inputs, but saw 4.' in str(e) else: assert False, 'Did not see expected exception!' try: # too few positional args f = gen(target, b_in, f_in) except RuntimeError as e: assert 'Expected exactly 3 positional args for inputs, but saw 2.' in str(e) else: assert False, 'Did not see expected exception!' try: # Inputs that can't be converted to what the receiver needs (positional) f = gen(target, hl.f32(3.141592), "happy", k) except RuntimeError as e: assert 'Unable to cast Python instance' in str(e) else: assert False, 'Did not see expected exception!' try: # Inputs that can't be converted to what the receiver needs (named) f = gen(target, b_in, f_in, float_arg="bogus") except RuntimeError as e: assert 'Unable to cast Python instance' in str(e) else: assert False, 'Did not see expected exception!' try: # Input specified by both pos and kwarg f = gen(target, b_in, f_in, 3.5, float_arg=4.5) except RuntimeError as e: assert "Cannot use both positional and keyword arguments for inputs." in str(e) else: assert False, 'Did not see expected exception!' try: # Bad input name f = gen(target, buffer_input=b_in, float_arg=3.5, offset=k, funk_input=f_in) except RuntimeError as e: assert "Expected exactly 3 keyword args for inputs, but saw 2." in str(e) else: assert False, 'Did not see expected exception!' try: # Bad gp name f = gen(target, buffer_input=b_in, float_arg=3.5, offset=k, func_input=f_in, nonexistent_generator_param="wat") except RuntimeError as e: assert "has no GeneratorParam named: nonexistent_generator_param" in str(e) else: assert False, 'Did not see expected exception!' def test_looplevel(gen): x, y = hl.Var('x'), hl.Var('y') target = hl.get_jit_target_from_environment() buffer_input = hl.Buffer(hl.UInt(8), [4, 4]) buffer_input.fill(123) func_input = hl.Func("func_input") func_input[x, y] = x + y simple_compute_at = hl.LoopLevel() simple = gen(target, buffer_input, func_input, 3.5, compute_level=simple_compute_at) computed_output = hl.Func('computed_output') computed_output[x, y] = simple[x, y] + 3 simple_compute_at.set(hl.LoopLevel(computed_output, x)) _realize_and_check(computed_output, 3) def _make_constant_image(): constant_image = hl.Buffer(hl.UInt(8), [32, 32, 3], 'constant_image') for x in range(32): for y in range(32): for c in range(3): constant_image[x, y, c] = x + y + c return constant_image def test_complex(gen): constant_image = _make_constant_image() input = hl.ImageParam(hl.UInt(8), 3, 'input') input.set(constant_image) x, y, c = hl.Var(), hl.Var(), hl.Var() target = hl.get_jit_target_from_environment() float_arg = 1.25 int_arg = 33 func_input = hl.Func("func_input") func_input[x, y, c] = hl.u16(x + y + c) r = gen(target, typed_buffer_input=constant_image, untyped_buffer_input=constant_image, simple_input=input, array_input=[ input, input ], float_arg=float_arg, int_arg=[ int_arg, int_arg ], untyped_buffer_output_type="uint8", extra_func_input=func_input, vectorize=True) # return value is a tuple; unpack separately to avoid # making the callsite above unreadable (simple_output, tuple_output, array_output, typed_buffer_output, untyped_buffer_output, static_compiled_buffer_output, scalar_output, extra_func_output) = r b = simple_output.realize([32, 32, 3], target) assert b.type() == hl.Float(32) for x in range(32): for y in range(32): for c in range(3): expected = constant_image[x, y, c] actual = b[x, y, c] assert expected == actual, "Expected %s Actual %s" % (expected, actual) b = tuple_output.realize([32, 32, 3], target) assert b[0].type() == hl.Float(32) assert b[1].type() == hl.Float(32) assert len(b) == 2 for x in range(32): for y in range(32): for c in range(3): expected1 = constant_image[x, y, c] * float_arg expected2 = expected1 + int_arg actual1, actual2 = b[0][x, y, c], b[1][x, y, c] assert expected1 == actual1, "Expected1 %s Actual1 %s" % (expected1, actual1) assert expected2 == actual2, "Expected2 %s Actual1 %s" % (expected2, actual2) assert len(array_output) == 2 for a in array_output: b = a.realize([32, 32], target) assert b.type() == hl.Int(16) for x in range(32): for y in range(32): expected = constant_image[x, y, 0] + int_arg actual = b[x, y] assert expected == actual, "Expected %s Actual %s" % (expected, actual) # TODO: Output<Buffer<>> has additional behaviors useful when a Stub # is used within another Generator; this isn't yet implemented since there # isn't yet Python bindings for Generator authoring. This section # of the test may need revision at that point. b = typed_buffer_output.realize([32, 32, 3], target) assert b.type() == hl.Float(32) for x in range(32): for y in range(32): for c in range(3): expected = constant_image[x, y, c] actual = b[x, y, c] assert expected == actual, "Expected %s Actual %s" % (expected, actual) b = untyped_buffer_output.realize([32, 32, 3], target) assert b.type() == hl.UInt(8) for x in range(32): for y in range(32): for c in range(3): expected = constant_image[x, y, c] actual = b[x, y, c] assert expected == actual, "Expected %s Actual %s" % (expected, actual) b = static_compiled_buffer_output.realize([4, 4, 1], target) assert b.type() == hl.UInt(8) for x in range(4): for y in range(4): for c in range(1): expected = constant_image[x, y, c] + 42 actual = b[x, y, c] assert expected == actual, "Expected %s Actual %s" % (expected, actual) b = scalar_output.realize([], target) assert b.type() == hl.Float(32) assert b[()] == 34.25 b = extra_func_output.realize([32, 32], target) assert b.type() == hl.Float(64) for x in range(32): for y in range(32): expected = x + y + 1 actual = b[x, y] assert expected == actual, "Expected %s Actual %s" % (expected, actual) if __name__ == "__main__": test_simple(simple_stub.generate) test_looplevel(simple_stub.generate) test_complex(complex_stub.generate)
python_bindings/correctness/pystub.py
9,020
----------- Inputs by-position ----------- Inputs by-name ----------- Above set again, w/ GeneratorParam mixed in (positional) (keyword) ----------- Test various failure modes Inputs w/ mixed by-position and by-name too many positional args too few positional args Inputs that can't be converted to what the receiver needs (positional) Inputs that can't be converted to what the receiver needs (named) Input specified by both pos and kwarg Bad input name Bad gp name return value is a tuple; unpack separately to avoid making the callsite above unreadable TODO: Output<Buffer<>> has additional behaviors useful when a Stub is used within another Generator; this isn't yet implemented since there isn't yet Python bindings for Generator authoring. This section of the test may need revision at that point.
804
en
0.873322
from logger import elog, mlog, alog from db_engine import mysql_connect, mysql_reconnect, get_qs, \ estr, valid_pass, SQLParamError, sql_selectall, \ sql_insertinto, do_param_error, sq, sql_update import random, time, json, os, os.path, sys, math, types from utils import * from math import * from auth import do_auth, gen_token, toktypes, rot_userid, unrot_userid import datetime from config import * from db_engine import * import db_engine import base64 import os, os.path, sys, stat from fileapi import file_restricted_fields, FOLDER_MIME, EMPTY_TAG, ROOT_PARENT_ID import urllib #stupid unicode! def jsondumps(obj): if type(obj) in [int, float, long]: return str(obj); elif type(obj) in [list, tuple, set]: s = "[" for i, item in enumerate(obj): if i > 0: s += ", " s += jsondumps(item) s += "]" return s elif type(obj) == dict: s = "{" for i, k in enumerate(obj): if i > 0: s += ", " s += '"' + k + '" : ' s += jsondumps(obj[k]) s += "}" return s; else: #XXX type(obj) == str: return '"' + str(obj) + '"' #else: # raise RuntimeError("unknown object " + str(type(obj))); WIN32 = sys.platform.startswith("win") if not WIN32: #unix functions; need to test these! def unixnorm(path): #strip out '.', so ./path works while path[0] == ".": path = path[1:] return path def listdir(path): path = unixnorm(path) return os.listdir(path) def exists(path): path = unixnorm(path) return os.path.exists(path) def dostat(path): path = unixnorm(path) return os.stat(path) def local_to_real(path): path = unixnorm(path) if path == "/.settings.bin": print("APPDATA", get_appdata()) #os.environ["APPDATA"]) dir = get_appdata() + os.path.sep + ".fairmotion" #os.path.join(get_appdata(), "/.fairmotion") if not os.path.exists(dir): print("make dirs", dir) os.makedirs(dir) path = os.path.join(dir, ".settings.bin") print("DIRPATH", dir) print("PATH", path) if not os.path.exists(path): templ = config.server_root + "/default_settings_bin" f = open(templ, "rb") buf = f.read() f.close() f = open(path, "wb") f.write(buf) f.close() return os.path.abspath(os.path.normpath(path)) if not serv_all_local: path = files_root + os.path.sep + path return os.path.abspath(os.path.normpath(path)) def real_to_local(path): path = unixnorm(path) if os.path.abspath(os.path.normpath(path)) == unixnorm(local_to_real("/.settings.bin")): return "/.settings.bin" path = os.path.abspath(os.path.normpath(path)) froot = os.path.abspath(os.path.normpath(files_root)) path = path[len(froot):].replace(os.path.sep, "/") return path if WIN32: import win_util local_to_real = win_util.local_to_real real_to_local = win_util.real_to_local listdir = win_util.listdir dostat = win_util.dostat exists = win_util.exists get_appdata = win_util.get_appdata else: def get_appdata(): return os.environ["HOME"] FOLDER_MIME = "application/vnd.google-apps.folder" import fileapi_db ROOT_PARENT_ID = fileapi_db.ROOT_PARENT_ID def is_folder(file): return file.mimeType == FOLDER_MIME or file.id == ROOT_PARENT_ID def is_valid_file(file): return file["realpath"] != EMPTY_TAG try: a = FileNotFoundError except: FileNotFoundError = OSError class FileClass (dict): def __init__(self, path, userid): print(" FCLS PATH", path, userid) path = os.path.normpath(path).replace(os.path.sep, "/") diskpath = local_to_real(path) froot = local_to_real("/") if not os.path.exists(diskpath): self.bad = True return else: try: nstat = dostat(diskpath) except: self.bad = True return rootid = fileapi_db.fileid_to_publicid(userid, ROOT_PARENT_ID) if stat.S_ISDIR(nstat.st_mode): mime = FOLDER_MIME self.is_dir = True else: mime = "application/x-javascript" self.is_dir = False self.name = "" self.bad = False if not serv_all_local and not diskpath.startswith(froot): elog("Error! " + diskpath) print("Error!", diskpath, froot) self.bad = True return self.diskpath = diskpath self.mimeType = mime self.id = fileid_to_publicid(path, userid) #print("Final relative path:", path, len(froot)); oname = path while len(oname) > 0 and oname[0] in ["\\", "/"]: oname = oname[1:] name = oname[oname.rfind("/")+1:].strip() name = name.replace("/", "") if name == "": name = oname self.name = name #print("Final name:", self.name) parentpath = path[:path.rfind("/")].strip() if "/" not in path: parentpath = "/" #print("PARENT PATH", "'"+parentpath+"'", fileid_to_publicid(parentpath, userid)) if name == "/" or parentpath == "/" or parentpath == "": self.parentid = rootid else: self.parentid = fileid_to_publicid(parentpath, userid) def File(path, userid): f = FileClass(path, userid) if f.bad: return None return f #for local serving, encode file path as the id def fileid_to_publicid(path, userid): if ".." in path: return "-1" path = bytes(path, "latin-1") path = str(base64.b64encode(path), "latin-1") return path def publicid_to_fileid(publicid): if len(publicid) == 17: userid, fileid = fileapi_db.publicid_to_fileid(publicid) if fileid == ROOT_PARENT_ID: return "/" if publicid == "/": return publicid #print(":::", publicid) path = base64.b64decode(bytes(publicid, "latin-1")); path = str(path, "latin-1") if ".." in path: return "-1" return path class FileAPI_DirList: basepath = "/api/files/dir/list" def __init__(self): pass def do_GET(self, serv): qs = get_qs(serv.path) if "accessToken" not in qs or ("path" not in qs and "id" not in qs): serv.send_error(400) return tok = qs["accessToken"][0] userid = do_auth(tok) if userid == None: elog("Invalid access in file api") serv.send_error(401) return if "id" in qs: path = publicid_to_fileid(qs["id"][0]) else: path = qs["path"][0] path = urllib.unquote(path).strip(); print("PATHPATH", path); dir = File(path, userid) if ".." in path: serv.send_error(401) return if not serv_all_local: prefix = files_root#+rot_userid(userid) try: os.makedirs(prefix) except FileExistsError: pass dirpath = local_to_real(path) files = [] for f in listdir(dirpath): path2 = path + os.path.sep + f file = File(path2, userid) f = {} if file == None: continue print("error!", dirpath) #if file == None: continue f["name"] = file.name f["id"] = file.id f["mimeType"] = file.mimeType f["is_dir"] = 1 if file.is_dir else 0 f["parentid"] = file.parentid files.append(f) body = jsondumps({"items": files}) body = bstr(body) serv.gen_headers("GET", len(body), json_mimetype) serv.wfile.write(body) class FileAPI_MakeFolder: basepath = "/api/files/dir/new" def __init__(self): pass def do_GET(self, serv): qs = get_qs(serv.path) if "name" not in qs or "accessToken" not in qs or ("path" not in qs and "id" not in qs): serv.send_error(400) return if ".." in qs["name"][0]: serv.send_error(403) return tok = qs["accessToken"][0] userid = do_auth(tok) if userid == None: serv.send_error(401) return if "id" in qs: folderid = publicid_to_fileid(qs["id"][0]) else: folderid = qs["path"][0] if folderid == None: serv.send_error(400) return path = local_to_real(folderid + "/" + qs["name"][0]) print("PATH", path, exists(path)) #see if folder (or a file) already exists if exists(path): serv.send_error(400) return os.makedirs(path) body = json.dumps({"success": True}) body = bstr(body) serv.gen_headers("GET", len(body), json_mimetype) serv.wfile.write(body) class FileAPI_GetMeta: basepath = "/api/files/get/meta" def __init__(self): pass def do_POST(self, serv): buf = serv.rfile.read() try: obj = json.loads(buf) except: self.send_error(401) return def do_GET(self, serv): qs = get_qs(serv.path) if "accessToken" not in qs or ("path" not in qs and "id" not in qs): serv.send_error(400) return tok = qs["accessToken"][0] userid = do_auth(tok) if userid == None: serv.send_error(401) return if "path" in qs: fileid = qs["path"][0] fileid = urllib.unquote(fileid); else: fileid = qs["id"][0] path = local_to_real(fileid); if not os.path.exists(path): serv.send_error(404); return st = os.stat(path) fname = fileid.replace("\\", "/").strip() dir = "" if "/" in fname and fname[-1] != "/": dir = fname[:fname.rfind("/")].strip() fname = fname[len(dir):] while fname[0] == "/": fname = fname[1:] #ROOT_PARENT_ID mime = "unknown" if stat.S_ISDIR(st.st_mode): mime = FOLDER_MIME else: pass #deal with later #stupid quoting #id = urllib.quote(fileid, "").strip() id = fileid_to_publicid(fileid, userid).strip() #if id[0] == "'" or id[0] == "\"" and id[0] == id[-1]: f = { 'name' : fname, 'id' : id, 'parentid' : dir, 'mimeType' : mime, 'modified' : st.st_mtime, 'is_dir' : stat.S_ISDIR(st.st_mode) }; f2 = {} for k in f: if k in file_restricted_fields: continue f2[k] = f[k] body = json.dumps(f2) body = bstr(body) serv.gen_headers("GET", len(body), json_mimetype) serv.wfile.write(body) class UploadStatus: def __init__(self, uploadToken=None): self.invalid = False if uploadToken != None: self.from_sql(uploadToken) def from_sql(self, utoken): cur, con = mysql_connect() try: qstr = sql_selectall("uploadtokens", ["tokenid"], [utoken], [sq.token]) except SQLParamError: do_param_error("UploadToken.from_sql") raise SQLParamError() #qstr = "SELECT * FROM uploadtokens WHERE tokenid="+estr(utoken) cur.execute(qstr) ret = cur.fetchone() if ret == None: self.invalid = True return self.token = ret["tokenid"] self.path = ret["path"] self.time = ret["time"] self.name = ret["name"] self.fileid = ret["fileid"] self.realpath = ret["realpath"] self.userid = ret["userid"] self.permissions = ret["permissions"] self.expiration = ret["expiration"] self.size = ret["size"] self.cur = ret["cur"] def toJSON(self): obj = {} for k in this.__dict__: val = getattr(self, k) if type(val) in [types.MethodType, types.FunctionType]: continue obj[k] = getattr(self, k) return obj def commit(self): cur, con = mysql_connect() dnow = datetime.datetime.now() dend = datetime.datetime.now()+datetime.timedelta(days=1) types = [sq.token, sq.path, sq.datetime, sq.int ] cols = ["tokenid", "path", "time", "fileid" ] values = [self.token, self.path, dnow, 32423423] #we don't use database fileids in local mode types += [sq.str(100), sq.path, sq.int, sq.int ] cols += ["name", "realpath", "userid", "permissions"] values += [self.name, self.realpath, self.userid, 0 ] types += [sq.datetime, sq.int, sq.int ] cols += ["expiration", "size", "cur" ] values += [dend, self.size, self.cur] try: qstr = sql_insertinto("uploadtokens", cols, values, types) except SQLParamError: #do_param_error(json.dumps(self)); raise SQLParamError("upload token error; see error.log for details") print("QSTR", qstr) cur.execute(qstr) con.commit() def create(self, token, path, userid, fileid, parentid=ROOT_PARENT_ID): self.token = token self.path = path cs = os.path.split(path) self.dir = cs[0]; self.time = time.time(); self.size = -1 self.cur = 0 self.file = None self.file_init = False self.fileid = fileid self.userid = userid; self.parentid = parentid; #note: not cached in database if len(cs) == 1 or cs[1] == "" or cs[1] == None: self.name = cs[0] else: self.name = cs[1] self.gen_realpath() def gen_realpath(self): f = File(self.fileid, self.userid) fpath = os.path.split(f.diskpath)[0] if not os.path.exists(fpath): os.makedirs(fpath) self.realpath = f.diskpath return f.diskpath class FileAPI_UploadStart: basepath = "/api/files/upload/start" def __init__(self): pass def do_GET(self, serv): elog("fileapi access" + serv.path) qs = get_qs(serv.path) if "accessToken" not in qs or ("path" not in qs and "id" not in qs): serv.send_error(400) return tok = qs["accessToken"][0] userid = do_auth(tok) if userid == None: elog("Need user id") print("Bad auth") serv.send_error(401) return path = qs["path"][0] if "id" in qs: fileid = publicid_to_fileid(qs["id"][0]) else: fileid = urllib.unquote(path) meta = File(fileid, userid) if meta != None: print("DISKPATH", meta.diskpath) if meta == None or not os.path.exists(meta.diskpath): elog("creating new file") cs = os.path.split(path) folderid = cs[0] f = File(folderid, userid) if not os.path.exists(f.diskpath): elog("invalid folder " + f.diskpath) print("invalid folder " + f.diskpath) serv.send_error(401); return if len(cs) == 1 or cs[1] == "": fname = cs[0] else: fname = cs[1] mime = "application/octet-stream" #create empty file f = open(f.diskpath+"/"+fname, "w") f.close() meta = File(fileid, userid) if meta == None: elog("Invalid file id") serv.send_error(400) return print("\n\nFILE", meta, "\n\n") if is_folder(meta): elog("target file is a folder" + meta["name"]) serv.send_error(401) return utoken = gen_token("U", userid); ustatus = UploadStatus() #ignore fileid/parentid in upload status token ustatus.create(utoken, path, userid, fileid, -1) try: ustatus.commit() except: import traceback elog("USTATUS.COMMIT failed!") traceback.print_exc() f = open(ustatus.realpath, "w"); f.close(); realpath = ustatus.realpath body = json.dumps({"uploadToken" : utoken}); body = bstr(body) print("\nupload start result:", body, "\n\n\n") serv.gen_headers("GET", len(body), json_mimetype) serv.wfile.write(body) cur_uploads = {} class FileAPI_UploadChunk: basepath = "/api/files/upload" def __init__(self): pass def do_PUT(self, serv): alog("fileapi access" + serv.path) qs = get_qs(serv.path) if "accessToken" not in qs or "uploadToken" not in qs: elog("fileapi: invalid tokens") serv.send_error(400) return tok = qs["accessToken"][0] utoken = qs["uploadToken"][0] userid = do_auth(tok) if userid == None: elog("invalid authorization") serv.send_error(401) return status = UploadStatus(utoken) if status.invalid: elog("invalid upload token ", utoken) serv.send_error(401) return if "Content-Range" not in serv.headers: elog("missing header " + json.dumps(serv.headers)) serv.send_error(400) return r = serv.headers["Content-Range"].strip() if not r.startswith("bytes"): elog("malformed request 1") serv.send_error(400) return r = r[len("bytes"):].strip() r = r.split("/") if r == None or len(r) != 2: elog("malformed request 2") serv.send_error(400) return try: max_size = int(r[1]) except ValueError: elog("malformed request 3") serv.send_error(400) return r = r[0].split("-") if r == None or len(r) != 2: elog("malformed request 4") serv.send_error(400) return try: r = [int(r[0]), int(r[1])] except ValueError: elog("malformed request 4") serv.send_error(400) return if r[0] < 0 or r[1] < 0 or r[0] >= max_size or r[1] >= max_size \ or r[0] > r[1]: elog("malformed request 5") serv.send_error(400) return if status.size == -1: status.size = max_size buflen = r[1]-r[0]+1 if serv.rfile == None: elog("serv.rfile was None! eek! " + str(buflen)); serv.send_error(500) return; buf = serv.rfile.read(buflen) if len(buf) != buflen: elog("malformed request 6") serv.send_error(400) return if r[0] == 0: mode = "wb" else: mode = "ab" status.file = open(status.realpath, mode); status.file.seek(r[0]); status.file.write(buf); status.file.flush() status.file.close() status.commit() body = json.dumps({"success" : True}); body = bstr(body) serv.gen_headers("PUT", len(body), json_mimetype) serv.wfile.write(body) class FileAPI_GetFile: basepath = "/api/files/get" def __init__(self): pass def do_GET(self, serv): qs = get_qs(serv.path) if "accessToken" not in qs or ("path" not in qs and "id" not in qs): serv.send_error(400) return tok = qs["accessToken"][0] userid = do_auth(tok) if userid == None: serv.send_error(401) return if "path" in qs: path = qs["path"][0] else: path = publicid_to_fileid(qs["id"][0]) if path == None: serv.send_error(404) return alog("fetching file %s" % path); f = File(path, userid) if f == None: serv.send_error(400) return if is_folder(f): serv.send_error(401) return print("diskpath:", f.diskpath) try: file = open(f.diskpath, "rb") except OSError: serv.send_error(404) return body = file.read() file.close() serv.gen_headers("GET", len(body), "application/octet-stream") serv.send_header("Content-Disposition", "attachment; filename=\"%s\"" % f.name) #Content-Disposition: attachment; filename=FILENAME serv.wfile.write(body)
pyserver/fileapi_local.py
19,629
stupid unicode!XXX type(obj) == str:else: raise RuntimeError("unknown object " + str(type(obj)));unix functions; need to test these!strip out '.', so ./path worksos.environ["APPDATA"])os.path.join(get_appdata(), "/.fairmotion")print("Final relative path:", path, len(froot));print("Final name:", self.name)print("PARENT PATH", "'"+parentpath+"'", fileid_to_publicid(parentpath, userid))for local serving, encode file path as the id print(":::", publicid)+rot_userid(userid)if file == None: continuesee if folder (or a file) already existsROOT_PARENT_ID deal with laterstupid quotingid = urllib.quote(fileid, "").strip()if id[0] == "'" or id[0] == "\"" and id[0] == id[-1]:qstr = "SELECT * FROM uploadtokens WHERE tokenid="+estr(utoken)we don't use database fileids in local modedo_param_error(json.dumps(self));note: not cached in databasecreate empty fileignore fileid/parentid in upload status tokenContent-Disposition: attachment; filename=FILENAME
958
en
0.468302
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.utils.translation import ugettext as _, ugettext_lazy as _t from desktop.lib.conf import Config, validate_path SPARK_MASTER = Config( key="spark_master", help=_t("Address of the Spark master, e.g spark://localhost:7077. If empty use the current configuration. " "Can be overriden in the script too."), default="" ) SPARK_HOME = Config( key="spark_home", help=_t("Local path to Spark Home on all the nodes of the cluster."), default="/usr/lib/spark" ) def config_validator(user): res = [] res.extend(validate_path(SPARK_HOME, is_dir=True)) return res
apps/spark/src/spark/conf.py
1,391
!/usr/bin/env python Licensed to Cloudera, Inc. under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. Cloudera, Inc. licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
759
en
0.868888
""" Module holds JMX handlers implementations Copyright 2017 BlazeMeter Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import re from distutils.version import LooseVersion from lxml import etree from bzt import TaurusInternalException, TaurusConfigError from bzt.engine import Scenario from bzt.jmx import JMX from bzt.jmx.base import cond_int from bzt.jmx.threadgroups import ThreadGroup, ConcurrencyThreadGroup, ThreadGroupHandler from bzt.requests_model import RequestVisitor, has_variable_pattern, HierarchicRequestParser from bzt.utils import iteritems, numeric_types from bzt.utils import BetterDict, dehumanize_time, ensure_is_dict, load_class, guess_delimiter class RequestCompiler(RequestVisitor): def __init__(self, jmx_builder): super(RequestCompiler, self).__init__() self.jmx_builder = jmx_builder def visit_mqttrequest(self, request): return self.jmx_builder.compile_request(request) def visit_hierarchichttprequest(self, request): return self.jmx_builder.compile_request(request) def visit_ifblock(self, block): return self.jmx_builder.compile_if_block(block) def visit_onceblock(self, block): return self.jmx_builder.compile_once_block(block) def visit_loopblock(self, block): return self.jmx_builder.compile_loop_block(block) def visit_whileblock(self, block): return self.jmx_builder.compile_while_block(block) def visit_foreachblock(self, block): return self.jmx_builder.compile_foreach_block(block) def visit_transactionblock(self, block): return self.jmx_builder.compile_transaction_block(block) def visit_includescenarioblock(self, block): scenario_name = block.scenario_name if scenario_name in self.path: msg = "Mutual recursion detected in include-scenario blocks (scenario %s)" raise TaurusConfigError(msg % scenario_name) self.record_path(scenario_name) return self.jmx_builder.compile_include_scenario_block(block) def visit_actionblock(self, block): return self.jmx_builder.compile_action_block(block) def visit_setvariables(self, block): return self.jmx_builder.compile_set_variables_block(block) class LoadSettingsProcessor(object): TG = ThreadGroup.__name__ CTG = ConcurrencyThreadGroup.__name__ def __init__(self, executor): self.log = executor.log.getChild(self.__class__.__name__) self.load = executor.get_specific_load() self.raw_load = executor.get_raw_load() self.log.debug("Load: %s", self.load) self.force_ctg = executor.settings.get("force-ctg", True) self.tg = self._detect_thread_group(executor) self.tg_handler = ThreadGroupHandler(self.log) def _detect_thread_group(self, executor): """ Detect preferred thread group :param executor: :return: """ tg = self.TG if not self.force_ctg: return tg msg = 'Thread group detection: %s, regular ThreadGroup will be used' if not self.load.duration: self.log.debug(msg, 'duration not found') elif self.load.iterations: self.log.debug(msg, 'iterations are found') elif not executor.tool: msg = 'You must set executor tool (%s) for choosing of ConcurrencyThreadGroup' raise TaurusInternalException(msg % executor.tool_name) elif not executor.tool.ctg_plugin_installed(): self.log.warning(msg % 'plugin for ConcurrentThreadGroup not found') else: tg = self.CTG return tg def modify(self, jmx, is_jmx_generated=False): if not (self.raw_load.iterations or self.raw_load.concurrency or self.load.duration): self.log.debug('No iterations/concurrency/duration found, thread group modification is skipped') return # IMPORTANT: fix groups order as changing of element type changes order of getting of groups groups = list(self.tg_handler.groups(jmx)) # user concurrency is jmeter variable, write it to tg as is if isinstance(self.load.concurrency, str): target_list = [(group, self.load.concurrency) for group in groups] else: # concurrency is numeric or empty raw = self.load.concurrency is None # keep existed concurrency if self.load.concurrency is omitted concurrency_list = [] for group in groups: concurrency = group.get_concurrency(raw=raw) if concurrency is None: concurrency = 1 concurrency_list.append(concurrency) if not raw: # divide numeric concurrency self._divide_concurrency(concurrency_list) target_list = zip(groups, concurrency_list) for group, concurrency in target_list: iterations = None existed_tg = (not is_jmx_generated) and (group.gtype == self.TG) if not self.force_ctg and existed_tg: iterations = group.get_iterations() self.tg_handler.convert(source=group, target_gtype=self.tg, load=self.load, concurrency=concurrency, iterations=iterations) if self.load.throughput: self._add_shaper(jmx) if self.tg == self.TG and self.load.steps: self.log.warning("Stepping ramp-up isn't supported for regular ThreadGroup") def _divide_concurrency(self, concurrency_list): """ calculate target concurrency for every thread group """ total_old_concurrency = sum(concurrency_list) for idx, concurrency in enumerate(concurrency_list): if total_old_concurrency and concurrency_list[idx] != 0: part_of_load = 1.0 * self.load.concurrency * concurrency / total_old_concurrency concurrency_list[idx] = int(round(part_of_load)) if concurrency_list[idx] == 0: concurrency_list[idx] = 1 else: concurrency_list[idx] = 0 total_new_concurrency = sum(concurrency_list) leftover = self.load.concurrency - total_new_concurrency if leftover < 0: msg = "Had to add %s more threads to maintain thread group proportion" self.log.warning(msg, -leftover) elif leftover > 0: msg = "%s threads left undistributed due to thread group proportion" self.log.warning(msg, leftover) def _add_shaper(self, jmx): """ Add shaper :param jmx: JMX :return: """ if not self.load.duration: self.log.warning("You must set 'ramp-up' and/or 'hold-for' when using 'throughput' option") return etree_shaper = jmx.get_rps_shaper() if self.load.ramp_up: if isinstance(self.load.throughput, numeric_types) and self.load.duration: start_rps = self.load.throughput / float(self.load.duration) start_rps = max(start_rps, 0.001) # avoid zeroing start_rps = min(start_rps, 1.0) # avoid starting too fast else: start_rps = 1 if not self.load.steps: jmx.add_rps_shaper_schedule(etree_shaper, start_rps, self.load.throughput, self.load.ramp_up) else: step_h = self.load.throughput / self.load.steps step_w = float(self.load.ramp_up) / self.load.steps accum_time = 0 for step in range(1, self.load.steps + 1): jmx.add_rps_shaper_schedule(etree_shaper, step_h * step, step_h * step, step_w * step - accum_time) accum_time += cond_int(step_w * step - accum_time) if self.load.hold: jmx.add_rps_shaper_schedule(etree_shaper, self.load.throughput, self.load.throughput, self.load.hold) jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree_shaper) jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree.Element("hashTree")) class ProtocolHandler(object): def __init__(self, sys_props, engine): super(ProtocolHandler, self).__init__() self.system_props = sys_props self.engine = engine def get_toplevel_elements(self, scenario): return [] def get_sampler_pair(self, request): return None, None @staticmethod def safe_time(any_time): try: smart_time = int(1000 * dehumanize_time(any_time)) except TaurusInternalException: smart_time = any_time return smart_time class JMeterScenarioBuilder(JMX): """ Helper to build JMeter test plan from Scenario :type protocol_handlers: dict[str,ProtocolHandler] """ def __init__(self, executor, original=None): """ :type executor: ScenarioExecutor :type original: JMX """ super(JMeterScenarioBuilder, self).__init__(original) self.executor = executor self.scenario = executor.get_scenario() self.engine = executor.engine self.system_props = BetterDict() self.request_compiler = None self.default_protocol = self.executor.settings.get('default-protocol', 'http') self.protocol_handlers = {} for protocol, cls_name in iteritems(self.executor.settings.get("protocol-handlers")): cls_obj = load_class(cls_name) instance = cls_obj(self.system_props, self.engine) self.protocol_handlers[protocol] = instance self.FIELD_KEYSTORE_CONFIG = 'keystore-config' @staticmethod def _get_timer(req): think_time = req.get_think_time(full=True) if not think_time: return [] if not isinstance(think_time, list): # constant return JMX.get_constant_timer(delay=ProtocolHandler.safe_time(think_time)) mean = ProtocolHandler.safe_time(think_time[1]) dev = ProtocolHandler.safe_time(think_time[2]) if think_time[0] == "uniform": return JMX.get_uniform_timer(maximum=dev * 2, offset=mean - dev) elif think_time[0] == "gaussian": return JMX.get_gaussian_timer(dev=dev, offset=mean) elif think_time[0] == "poisson": return JMX.get_poisson_timer(lam=mean - dev, delay=dev) else: raise TaurusConfigError("Wrong timer type: %s" % think_time[0]) def __add_extractors(self, children, req): self.__add_boundary_ext(children, req) self.__add_regexp_ext(children, req) self.__add_json_ext(children, req) self.__add_jquery_ext(children, req) self.__add_xpath_ext(children, req) def __add_boundary_ext(self, children, req): extractors = req.config.get("extract-boundary") for varname, cfg in iteritems(extractors): subj = cfg.get('subject', 'body') left = cfg.get('left', TaurusConfigError("Left boundary is missing for boundary extractor %s" % varname)) right = cfg.get('right', TaurusConfigError("Right boundary is missing for boundary extractor %s" % varname)) match_no = cfg.get('match-no', 1) defvalue = cfg.get('default', 'NOT_FOUND') scope = cfg.get("scope", None) from_var = cfg.get("from-variable", None) extractor = JMX._get_boundary_extractor(varname, subj, left, right, match_no, defvalue, scope, from_var) children.append(extractor) children.append(etree.Element("hashTree")) def __add_regexp_ext(self, children, req): extractors = req.config.get("extract-regexp") for varname in extractors: cfg = ensure_is_dict(extractors, varname, "regexp") scope = cfg.get("scope", None) from_var = cfg.get("from-variable", None) extractor = JMX._get_extractor(varname, cfg.get('subject', 'body'), cfg['regexp'], cfg.get('template', 1), cfg.get('match-no', 1), cfg.get('default', 'NOT_FOUND'), scope, from_var) children.append(extractor) children.append(etree.Element("hashTree")) def __add_json_ext(self, children, req): jextractors = req.config.get("extract-jsonpath") for varname in jextractors: cfg = ensure_is_dict(jextractors, varname, "jsonpath") if LooseVersion(str(self.executor.settings.get("version"))) < LooseVersion("3.0"): extractor = JMX._get_json_extractor(varname, cfg["jsonpath"], cfg.get("default", "NOT_FOUND"), cfg.get("from-variable", None)) else: extractor = JMX._get_internal_json_extractor(varname, cfg["jsonpath"], cfg.get("default", "NOT_FOUND"), cfg.get("scope", None), cfg.get("from-variable", None), cfg.get("match-no", "0"), cfg.get("concat", False)) children.append(extractor) children.append(etree.Element("hashTree")) def __add_jquery_ext(self, children, req): css_jquery_extors = req.config.get("extract-css-jquery") for varname in css_jquery_extors: cfg = ensure_is_dict(css_jquery_extors, varname, "expression") extractor = self._get_jquerycss_extractor(varname, cfg['expression'], cfg.get('attribute', ""), cfg.get('match-no', 0), cfg.get('default', 'NOT_FOUND'), cfg.get("scope", None), cfg.get("from-variable", None)) children.append(extractor) children.append(etree.Element("hashTree")) def __add_xpath_ext(self, children, req): xpath_extractors = req.config.get("extract-xpath") for varname in xpath_extractors: cfg = ensure_is_dict(xpath_extractors, varname, "xpath") children.append(JMX._get_xpath_extractor(varname, cfg['xpath'], cfg.get('default', 'NOT_FOUND'), cfg.get('validate-xml', False), cfg.get('ignore-whitespace', True), cfg.get("match-no", "-1"), cfg.get('use-namespaces', False), cfg.get('use-tolerant-parser', False), cfg.get("scope", None), cfg.get("from-variable", None))) children.append(etree.Element("hashTree")) @staticmethod def __add_assertions(children, req): assertions = req.config.get("assert", []) for idx, assertion in enumerate(assertions): assertion = ensure_is_dict(assertions, idx, "contains") if not isinstance(assertion['contains'], list): assertion['contains'] = [assertion['contains']] children.append(JMX._get_resp_assertion(assertion.get("subject", Scenario.FIELD_BODY), assertion['contains'], assertion.get('regexp', True), assertion.get('not', False), assertion.get('assume-success', False))) children.append(etree.Element("hashTree")) jpath_assertions = req.config.get("assert-jsonpath", []) for idx, assertion in enumerate(jpath_assertions): assertion = ensure_is_dict(jpath_assertions, idx, "jsonpath") exc = TaurusConfigError('JSON Path not found in assertion: %s' % assertion) component = JMX._get_json_path_assertion(assertion.get('jsonpath', exc), assertion.get('expected-value', ''), assertion.get('validate', False), assertion.get('expect-null', False), assertion.get('invert', False), assertion.get('regexp', True)) children.append(component) children.append(etree.Element("hashTree")) xpath_assertions = req.config.get("assert-xpath", []) for idx, assertion in enumerate(xpath_assertions): assertion = ensure_is_dict(xpath_assertions, idx, "xpath") exc = TaurusConfigError('XPath not found in assertion: %s' % assertion) component = JMX._get_xpath_assertion(assertion.get('xpath', exc), assertion.get('validate-xml', False), assertion.get('ignore-whitespace', True), assertion.get('use-tolerant-parser', False), assertion.get('invert', False)) children.append(component) children.append(etree.Element("hashTree")) @staticmethod def __add_jsr_elements(children, req, get_from_config=True): """ :type children: etree.Element :type req: Request """ jsrs = [] if get_from_config: jsrs = req.config.get("jsr223", []) else: jsrs = req.get("jsr223", []) if not isinstance(jsrs, list): jsrs = [jsrs] for idx, _ in enumerate(jsrs): jsr = ensure_is_dict(jsrs, idx, sub_key='script-text') lang = jsr.get("language", "groovy") script_file = jsr.get("script-file", None) script_text = jsr.get("script-text", None) if not script_file and not script_text: raise TaurusConfigError("jsr223 element must specify one of 'script-file' or 'script-text'") parameters = jsr.get("parameters", "") execute = jsr.get("execute", "after") cache_key = str(jsr.get("compile-cache", True)).lower() children.append(JMX._get_jsr223_element(lang, script_file, parameters, execute, script_text, cache_key)) children.append(etree.Element("hashTree")) def __gen_requests(self, scenario): http_protocol = scenario.data.get('protocol', 'http') == 'http' requests = scenario.get_requests(parser=HierarchicRequestParser, require_url=http_protocol) elements = [] for compiled in self.compile_requests(requests): elements.extend(compiled) return elements def compile_scenario(self, scenario): elements = [] for _, protocol in iteritems(self.protocol_handlers): elements.extend(protocol.get_toplevel_elements(scenario)) elements.extend(self.__gen_authorization(scenario)) elements.extend(self.__gen_keystore_config(scenario)) elements.extend(self.__gen_data_sources(scenario)) elements.extend(self.__gen_requests(scenario)) self.__add_jsr_elements(elements, scenario, False) return elements def compile_request(self, request): """ :type request: HierarchicHTTPRequest :return: """ sampler = children = None protocol_name = request.priority_option('protocol', default=self.default_protocol) if protocol_name in self.protocol_handlers: protocol = self.protocol_handlers[protocol_name] sampler, children = protocol.get_sampler_pair(request) if sampler is None: self.log.warning("Problematic request: %s", request.config) raise TaurusInternalException("Unable to handle request, please review missing options") children.extend(self._get_timer(request)) self.__add_assertions(children, request) self.__add_extractors(children, request) self.__add_jsr_elements(children, request) return [sampler, children] def compile_if_block(self, block): elements = [] if_controller = JMX._get_if_controller(block.condition) then_children = etree.Element("hashTree") for compiled in self.compile_requests(block.then_clause): for element in compiled: then_children.append(element) elements.extend([if_controller, then_children]) if block.else_clause: inverted_condition = "!(" + block.condition + ")" else_controller = JMX._get_if_controller(inverted_condition) else_children = etree.Element("hashTree") for compiled in self.compile_requests(block.else_clause): for element in compiled: else_children.append(element) elements.extend([else_controller, else_children]) return elements def compile_once_block(self, block): elements = [] once_controller = JMX._get_once_controller() children = etree.Element("hashTree") for compiled in self.compile_requests(block.requests): for element in compiled: children.append(element) elements.extend([once_controller, children]) return elements def compile_loop_block(self, block): elements = [] loop_controller = JMX._get_loop_controller(block.loops) children = etree.Element("hashTree") for compiled in self.compile_requests(block.requests): for element in compiled: children.append(element) elements.extend([loop_controller, children]) return elements def compile_while_block(self, block): elements = [] controller = JMX._get_while_controller(block.condition) children = etree.Element("hashTree") for compiled in self.compile_requests(block.requests): for element in compiled: children.append(element) elements.extend([controller, children]) return elements def compile_foreach_block(self, block): """ :type block: ForEachBlock """ elements = [] controller = JMX._get_foreach_controller(block.input_var, block.loop_var) children = etree.Element("hashTree") for compiled in self.compile_requests(block.requests): for element in compiled: children.append(element) elements.extend([controller, children]) return elements def compile_transaction_block(self, block): elements = [] controller = JMX._get_transaction_controller(block.label, block.priority_option('force-parent-sample', False), block.include_timers) children = etree.Element("hashTree") for compiled in self.compile_requests(block.requests): for element in compiled: children.append(element) elements.extend([controller, children]) return elements def compile_include_scenario_block(self, block): elements = [] controller = JMX._get_simple_controller(block.scenario_name) children = etree.Element("hashTree") scenario = self.executor.get_scenario(name=block.scenario_name) for element in self.compile_scenario(scenario): children.append(element) elements.extend([controller, children]) return elements def compile_action_block(self, block): """ :type block: ActionBlock :return: """ actions = { 'stop': 0, 'pause': 1, 'stop-now': 2, 'continue': 3, } targets = {'current-thread': 0, 'all-threads': 2} action = actions[block.action] target = targets[block.target] duration = 0 if block.duration is not None: duration = int(block.duration * 1000) test_action = JMX._get_action_block(action, target, duration) children = etree.Element("hashTree") self.__add_jsr_elements(children, block) return [test_action, children] @staticmethod def compile_set_variables_block(block): set_var_action = JMX.get_set_var_action(block.mapping) hashtree = etree.Element("hashTree") return [set_var_action, hashtree] def compile_requests(self, requests): if self.request_compiler is None: self.request_compiler = RequestCompiler(self) compiled = [] for request in requests: compiled.append(self.request_compiler.visit(request)) self.request_compiler.clear_path_cache() return compiled def __generate(self): """ Generate the test plan """ thread_group = JMX.get_thread_group(testname=self.executor.label) thread_group_ht = etree.Element("hashTree", type="tg") self.request_compiler = RequestCompiler(self) for element in self.compile_scenario(self.scenario): thread_group_ht.append(element) results_tree = self._get_results_tree() results_tree_ht = etree.Element("hashTree") self.append(self.TEST_PLAN_SEL, thread_group) self.append(self.TEST_PLAN_SEL, thread_group_ht) self.append(self.TEST_PLAN_SEL, results_tree) self.append(self.TEST_PLAN_SEL, results_tree_ht) def save(self, filename): """ Generate test plan and save :type filename: str """ self.__generate() super(JMeterScenarioBuilder, self).save(filename) @staticmethod def __gen_authorization(scenario): """ Generates HTTP Authorization Manager """ elements = [] authorizations = scenario.get("authorization") if authorizations: clear_flag = False if isinstance(authorizations, dict): if "clear" in authorizations or "list" in authorizations: # full form clear_flag = authorizations.get("clear", False) authorizations = authorizations.get("list", []) else: authorizations = [authorizations] # short form if not isinstance(authorizations, list): raise TaurusConfigError("Wrong authorization format: %s" % authorizations) auth_manager = JMX.get_auth_manager(authorizations, clear_flag) elements.append(auth_manager) elements.append(etree.Element("hashTree")) return elements def __gen_data_sources(self, scenario): elements = [] for source in scenario.get_data_sources(): source_path = source["path"] delimiter = source.get("delimiter") if has_variable_pattern(source_path): msg = "Path to CSV contains JMeter variable/function, can't check for file existence: %s" self.log.warning(msg, source_path) if not delimiter: delimiter = ',' self.log.warning("Can't detect CSV dialect, default delimiter will be '%s'", delimiter) else: source_path = self.executor.engine.find_file(source_path) if not os.path.isfile(source_path): raise TaurusConfigError("data-sources path not found: %s" % source_path) if not delimiter: delimiter = guess_delimiter(source_path) if source.get("random-order"): config = JMX._get_csv_config_random(source_path, delimiter, source.get("loop", True), source.get("variable-names", "")) else: config = JMX._get_csv_config(source_path, delimiter, source.get("loop", True), source.get("variable-names", ""), source.get("quoted", False)) elements.append(config) elements.append(etree.Element("hashTree")) return elements def __gen_keystore_config(self, scenario): elements = [] keystore_config = scenario.get(self.FIELD_KEYSTORE_CONFIG) if keystore_config: variable_name = keystore_config["variable-name"] start_index = keystore_config["start-index"] end_index = keystore_config["end-index"] preload = keystore_config["preload"] config = JMX.get_keystore_config_elements(variable_name, start_index, end_index, preload) elements.append(config) elements.append(etree.Element("hashTree")) return elements
bzt/jmx/tools.py
30,218
Helper to build JMeter test plan from Scenario :type protocol_handlers: dict[str,ProtocolHandler] :type children: etree.Element :type req: Request Generates HTTP Authorization Manager Generate the test plan :type executor: ScenarioExecutor :type original: JMX Add shaper :param jmx: JMX :return: Detect preferred thread group :param executor: :return: calculate target concurrency for every thread group :type block: ActionBlock :return: :type block: ForEachBlock :type request: HierarchicHTTPRequest :return: Generate test plan and save :type filename: str Module holds JMX handlers implementations Copyright 2017 BlazeMeter Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. IMPORTANT: fix groups order as changing of element type changes order of getting of groups user concurrency is jmeter variable, write it to tg as is concurrency is numeric or empty keep existed concurrency if self.load.concurrency is omitted divide numeric concurrency avoid zeroing avoid starting too fast constant full form short form
1,496
en
0.78949
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import sys import socket import struct import asyncio @asyncio.coroutine def proxy_data(reader, writer): try: while 1: buf = yield from reader.read(4096) if not buf: break writer.write(buf) yield from writer.drain() writer.close() except Exception: pass class SOCKS5Server: def __init__(self, host, port, username=None, password=None, *, loop=None): self.username = self.password = None if username and password: self.username = username.encode('utf-8') self.password = password.encode('utf-8') self.loop = loop or asyncio.get_event_loop() coro = asyncio.start_server(self.handle_socks, host, port, loop=self.loop) self.server = self.loop.run_until_complete(coro) # for random ports self.host, self.port = self.server.sockets[0].getsockname()[:2] def handle_socks(self, reader, writer): version, authnum = yield from reader.read(2) if version != 0x05: writer.close() return methods = yield from reader.read(authnum) if self.username and 0x02 in methods: # Username/password writer.write(b'\x05\x02') version, ulen = yield from reader.read(2) username = yield from reader.read(ulen) ulen = (yield from reader.read(1))[0] password = yield from reader.read(ulen) if version == 0x01 and ( username == self.username and password == self.password): writer.write(b'\x01\x00') else: writer.write(b'\x01\xFF') writer.close() return elif self.username is None and 0x00 in methods: # No authentication writer.write(b'\x05\x00') else: writer.write(b'\x05\xFF') writer.close() return try: version, command, reserved, addrtype = yield from reader.read(4) except ValueError: writer.close() return if version != 0x05: writer.close() return if addrtype == 0x01: host = yield from reader.read(4) hostname = socket.inet_ntop(socket.AF_INET, host) elif addrtype == 0x03: length = (yield from reader.read(1))[0] hostname = (yield from reader.read(length)).decode('utf-8') elif addrtype == 0x04: host = yield from reader.read(16) hostname = socket.inet_ntop(socket.AF_INET6, host) port = struct.unpack('!H', (yield from reader.read(2)))[0] sockname = writer.get_extra_info('sockname') # a (address, port) 2-tuple for AF_INET, # a (address, port, flow info, scope id) 4-tuple for AF_INET6 if len(sockname) == 2: bndinfo = b'\x01' + socket.inet_pton(socket.AF_INET, sockname[0]) else: bndinfo = b'\x04' + socket.inet_pton(socket.AF_INET6, sockname[0]) bndinfo += struct.pack('!H', sockname[1]) if command == 0x01: writer.write(b'\x05\x00\x00' + bndinfo) else: writer.write(b'\x05\x07\x00' + bndinfo) writer.close() return r_reader, r_writer = yield from asyncio.open_connection(hostname, port) asyncio.ensure_future(proxy_data(reader, r_writer), loop=self.loop) asyncio.ensure_future(proxy_data(r_reader, writer), loop=self.loop) def run_forever(self): self.loop.run_forever() def close(self): self.server.close() self.loop.run_until_complete(self.server.wait_closed()) if __name__ == '__main__': try: host = '0.0.0.0' port = 1080 if len(sys.argv) == 1: pass elif len(sys.argv) == 2: if sys.argv[1] in ('-h', '--help'): print('usage: python3 %s [port|listen port]' % __file__) sys.exit(0) else: port = int(sys.argv[1]) elif len(sys.argv) == 3: host = sys.argv[1] port = int(sys.argv[2]) except Exception as ex: print(ex) print('usage: python3 %s [port|listen port]' % sys.argv[0]) sys.exit(1) srv = SOCKS5Server(host, port) print('Listening on %s:%d' % (host, port)) try: srv.run_forever() except KeyboardInterrupt: pass finally: srv.close()
socksserver.py
4,574
!/usr/bin/env python3 -*- coding: utf-8 -*- for random ports Username/password No authentication a (address, port) 2-tuple for AF_INET, a (address, port, flow info, scope id) 4-tuple for AF_INET6
195
en
0.593343
# coding: utf-8 """ Properties All HubSpot objects store data in default and custom properties. These endpoints provide access to read and modify object properties in HubSpot. # noqa: E501 The version of the OpenAPI document: v3 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from hubspot.crm.properties.configuration import Configuration class PropertyGroupUpdate(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'label': 'str', 'display_order': 'int' } attribute_map = { 'label': 'label', 'display_order': 'displayOrder' } def __init__(self, label=None, display_order=None, local_vars_configuration=None): # noqa: E501 """PropertyGroupUpdate - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._label = None self._display_order = None self.discriminator = None if label is not None: self.label = label if display_order is not None: self.display_order = display_order @property def label(self): """Gets the label of this PropertyGroupUpdate. # noqa: E501 A human-readable label that will be shown in HubSpot. # noqa: E501 :return: The label of this PropertyGroupUpdate. # noqa: E501 :rtype: str """ return self._label @label.setter def label(self, label): """Sets the label of this PropertyGroupUpdate. A human-readable label that will be shown in HubSpot. # noqa: E501 :param label: The label of this PropertyGroupUpdate. # noqa: E501 :type: str """ self._label = label @property def display_order(self): """Gets the display_order of this PropertyGroupUpdate. # noqa: E501 Property groups are displayed in order starting with the lowest positive integer value. Values of -1 will cause the property group to be displayed after any positive values. # noqa: E501 :return: The display_order of this PropertyGroupUpdate. # noqa: E501 :rtype: int """ return self._display_order @display_order.setter def display_order(self, display_order): """Sets the display_order of this PropertyGroupUpdate. Property groups are displayed in order starting with the lowest positive integer value. Values of -1 will cause the property group to be displayed after any positive values. # noqa: E501 :param display_order: The display_order of this PropertyGroupUpdate. # noqa: E501 :type: int """ self._display_order = display_order def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, PropertyGroupUpdate): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, PropertyGroupUpdate): return True return self.to_dict() != other.to_dict()
hubspot/crm/properties/models/property_group_update.py
4,705
NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Returns true if both objects are equal PropertyGroupUpdate - a model defined in OpenAPI Returns true if both objects are not equal For `print` and `pprint` Gets the display_order of this PropertyGroupUpdate. # noqa: E501 Property groups are displayed in order starting with the lowest positive integer value. Values of -1 will cause the property group to be displayed after any positive values. # noqa: E501 :return: The display_order of this PropertyGroupUpdate. # noqa: E501 :rtype: int Sets the display_order of this PropertyGroupUpdate. Property groups are displayed in order starting with the lowest positive integer value. Values of -1 will cause the property group to be displayed after any positive values. # noqa: E501 :param display_order: The display_order of this PropertyGroupUpdate. # noqa: E501 :type: int Gets the label of this PropertyGroupUpdate. # noqa: E501 A human-readable label that will be shown in HubSpot. # noqa: E501 :return: The label of this PropertyGroupUpdate. # noqa: E501 :rtype: str Sets the label of this PropertyGroupUpdate. A human-readable label that will be shown in HubSpot. # noqa: E501 :param label: The label of this PropertyGroupUpdate. # noqa: E501 :type: str Returns the model properties as a dict Returns the string representation of the model Properties All HubSpot objects store data in default and custom properties. These endpoints provide access to read and modify object properties in HubSpot. # noqa: E501 The version of the OpenAPI document: v3 Generated by: https://openapi-generator.tech coding: utf-8 noqa: F401 noqa: E501 noqa: E501
1,741
en
0.605958
from __future__ import print_function import sys import logging import os os.environ['ENABLE_CNNL_TRYCATCH'] = 'OFF' # pylint: disable=C0413 from itertools import product import unittest import torch import torch_mlu.core.mlu_model as ct cur_dir = os.path.dirname(os.path.abspath(__file__)) sys.path.append(cur_dir + "/../../") from common_utils import testinfo, TestCase # pylint: disable=C0413,C0411 logging.basicConfig(level=logging.DEBUG) class TestTypeOp(TestCase): # @unittest.skip("not test") @testinfo() def test_type_param_empty(self): shape_list = [(512, 1024, 2, 2, 4), (2, 3, 4), (254, 254, 112, 1, 1, 3), (1000,), ()] dtype_list = [torch.half, torch.float, torch.uint8, torch.int8, torch.short, torch.int, torch.long, torch.bool] for shape, src_type in product(shape_list, dtype_list): if src_type in [torch.half, torch.float]: x = torch.randn(shape, dtype=src_type) elif src_type == torch.uint8: x = torch.randint(0, 255, shape).to(src_type) else: x = torch.randint(-128, 128, shape).to(src_type) out_cpu_type = x.type() out_mlu_type = x.to(ct.mlu_device()).type() l_tmp = out_cpu_type.split('.') l_tmp.insert(1, 'mlu') self.assertEqual('.'.join(l_tmp), out_mlu_type) # @unittest.skip("not test") @testinfo() def test_type_param_empty_channels_last(self): shape_list = [(512, 1024, 2, 2), (2, 3, 4, 5), (254, 254, 112, 1), (2, 3, 24, 30), (1, 1, 1, 30)] dtype_list = [torch.half, torch.float, torch.uint8, torch.int8, torch.short, torch.int, torch.long, torch.bool] for shape, src_type in product(shape_list, dtype_list): if src_type in [torch.half, torch.float]: x = torch.randn(shape, dtype=src_type).to(memory_format = torch.channels_last) elif src_type == torch.uint8: x = torch.randint(0, 255, shape).to(src_type).to( memory_format = torch.channels_last) else: x = torch.randint(-128, 128, shape).to(src_type).to( memory_format = torch.channels_last) out_cpu_type = x.type() out_mlu_type = x.to(ct.mlu_device()).type() l_tmp = out_cpu_type.split('.') l_tmp.insert(1, 'mlu') self.assertEqual('.'.join(l_tmp), out_mlu_type) # @unittest.skip("not test") @testinfo() def test_type_param_empty_not_dense(self): shape_list = [(16, 32, 2, 30), (2, 3, 4, 32), (24, 26, 112, 64), (2, 3, 24, 30), (1, 1, 1, 30)] dtype_list = [torch.half, torch.float, torch.uint8, torch.int8, torch.short, torch.int, torch.long, torch.bool] for shape, src_type in product(shape_list, dtype_list): if src_type in [torch.half, torch.float]: x = torch.randn(shape, dtype=src_type)[:, :, :, :15] elif src_type == torch.uint8: x = torch.randint(0, 255, shape).to(src_type)[:, :, :, :15] else: x = torch.randint(-128, 128, shape).to(src_type)[:, :, :, :15] out_cpu_type = x.type() out_mlu_type = x.to(ct.mlu_device()).type() l_tmp = out_cpu_type.split('.') l_tmp.insert(1, 'mlu') self.assertEqual('.'.join(l_tmp), out_mlu_type) # @unittest.skip("not test") @testinfo() def test_type_param_dtype(self): shape_list = [(512, 1024, 2, 2, 4), (2, 3, 4), (254, 254, 112, 1, 1, 3), (1000,), ()] cast_map = {torch.float: {torch.half, torch.int, torch.short, torch.int8, torch.bool}, torch.half: {torch.float, torch.int, torch.short, torch.int8, torch.bool}, torch.long: {torch.float, torch.half, torch.short, torch.int8}, torch.int: {torch.float, torch.half, torch.short, torch.int8}, torch.short: {torch.float, torch.half, torch.int}, torch.int8: {torch.float, torch.half, torch.int}, torch.uint8: {torch.float, torch.half}, torch.bool: {torch.float, torch.half, torch.int}, } for shape, src_type in product(shape_list, cast_map.keys()): for dst_type in cast_map[src_type]: if src_type in [torch.half, torch.float]: x = torch.randn(shape, dtype=src_type) elif src_type == torch.uint8: x = torch.randint(0, 255, shape).to(src_type) else: x = torch.randint(-128, 128, shape).to(src_type) for is_async in [False, True]: out_cpu = x.type(dst_type, non_blocking=is_async) out_mlu = x.to(ct.mlu_device()).type(dst_type, non_blocking=is_async) self.assertEqual(out_mlu.dtype, dst_type) self.assertEqual(out_cpu, out_mlu.cpu()) # @unittest.skip("not test") @testinfo() def test_type_param_dtype_channels_last(self): shape_list = [(512, 1024, 2, 2), (2, 3, 4, 16), (254, 254, 112, 1), (2, 3, 24, 30), (1, 1, 1, 30)] cast_map = {torch.float: {torch.half, torch.int, torch.short, torch.int8, torch.bool}, torch.half: {torch.float, torch.int, torch.short, torch.int8, torch.bool}, torch.long: {torch.float, torch.half, torch.short, torch.int8}, torch.int: {torch.float, torch.half, torch.short, torch.int8}, torch.short: {torch.float, torch.half, torch.int}, torch.int8: {torch.float, torch.half, torch.int}, torch.uint8: {torch.float, torch.half}, torch.bool: {torch.float, torch.half, torch.int}, } for shape, src_type in product(shape_list, cast_map.keys()): for dst_type in cast_map[src_type]: if src_type in [torch.half, torch.float]: x = torch.randn(shape, dtype=src_type).to(memory_format = torch.channels_last) elif src_type == torch.uint8: x = torch.randint(0, 255, shape).to(src_type).to( memory_format = torch.channels_last) else: x = torch.randint(-128, 128, shape).to(src_type).to( memory_format = torch.channels_last) for is_async in [False, True]: out_cpu = x.type(dst_type, non_blocking=is_async) out_mlu = x.to(ct.mlu_device()).type(dst_type, non_blocking=is_async) self.assertEqual(out_mlu.dtype, dst_type) self.assertEqual(out_cpu, out_mlu.cpu()) # @unittest.skip("not test") @testinfo() def test_type_param_dtype_not_dense(self): shape_list = [(16, 32, 2, 30), (2, 3, 4, 32), (24, 26, 112, 64), (2, 3, 24, 30), (1, 1, 1, 30)] cast_map = {torch.float: {torch.half, torch.int, torch.short, torch.int8, torch.bool}, torch.half: {torch.float, torch.int, torch.short, torch.int8, torch.bool}, torch.long: {torch.float, torch.half, torch.short, torch.int8}, torch.int: {torch.float, torch.half, torch.short, torch.int8}, torch.short: {torch.float, torch.half, torch.int}, torch.int8: {torch.float, torch.half, torch.int}, torch.uint8: {torch.float, torch.half}, torch.bool: {torch.float, torch.half, torch.int}, } for shape, src_type in product(shape_list, cast_map.keys()): for dst_type in cast_map[src_type]: if src_type in [torch.half, torch.float]: x = torch.randn(shape, dtype=src_type)[:, :, :, :15] elif src_type == torch.uint8: x = torch.randint(0, 255, shape).to(src_type)[:, :, :, :15] else: x = torch.randint(-128, 128, shape).to(src_type)[:, :, :, :15] for is_async in [False, True]: out_cpu = x.type(dst_type, non_blocking=is_async) out_mlu = x.to(ct.mlu_device()).type(dst_type, non_blocking=is_async) self.assertEqual(out_mlu.dtype, dst_type) self.assertEqual(out_cpu, out_mlu.cpu()) if __name__ == '__main__': unittest.main()
test/cnnl/op_test/test_type.py
8,765
pylint: disable=C0413 pylint: disable=C0413,C0411 @unittest.skip("not test") @unittest.skip("not test") @unittest.skip("not test") @unittest.skip("not test") @unittest.skip("not test") @unittest.skip("not test")
211
en
0.130675
# Copyright (C) 2001-2006 Python Software Foundation # Author: Barry Warsaw # Contact: email-sig@python.org """email package exception classes.""" class MessageError(Exception): """Base class for errors in the email package.""" class MessageParseError(MessageError): """Base class for message parsing errors.""" class HeaderParseError(MessageParseError): """Error while parsing headers.""" class BoundaryError(MessageParseError): """Couldn't find terminating boundary.""" class MultipartConversionError(MessageError, TypeError): """Conversion to a multipart is prohibited.""" class CharsetError(MessageError): """An illegal charset was given.""" # These are parsing defects which the parser was able to work around. class MessageDefect(ValueError): """Base class for a message defect.""" def __init__(self, line=None): if line is not None: super().__init__(line) self.line = line class NoBoundaryInMultipartDefect(MessageDefect): """A message claimed to be a multipart but had no boundary parameter.""" class StartBoundaryNotFoundDefect(MessageDefect): """The claimed start boundary was never found.""" class CloseBoundaryNotFoundDefect(MessageDefect): """A start boundary was found, but not the corresponding close boundary.""" class FirstHeaderLineIsContinuationDefect(MessageDefect): """A message had a continuation line as its first header line.""" class MisplacedEnvelopeHeaderDefect(MessageDefect): """A 'Unix-from' header was found in the middle of a header block.""" class MissingHeaderBodySeparatorDefect(MessageDefect): """Found line with no leading whitespace and no colon before blank line.""" # XXX: backward compatibility, just in case (it was never emitted). MalformedHeaderDefect = MissingHeaderBodySeparatorDefect class MultipartInvariantViolationDefect(MessageDefect): """A message claimed to be a multipart but no subparts were found.""" class InvalidMultipartContentTransferEncodingDefect(MessageDefect): """An invalid content transfer encoding was set on the multipart itself.""" class UndecodableBytesDefect(MessageDefect): """Header contained bytes that could not be decoded""" class InvalidBase64PaddingDefect(MessageDefect): """base64 encoded sequence had an incorrect length""" class InvalidBase64CharactersDefect(MessageDefect): """base64 encoded sequence had characters not in base64 alphabet""" # These errors are specific to header parsing. class HeaderDefect(MessageDefect): """Base class for a header defect.""" def __init__(self, *args, **kw): super().__init__(*args, **kw) class InvalidHeaderDefect(HeaderDefect): """Header is not valid, message gives details.""" class HeaderMissingRequiredValue(HeaderDefect): """A header that must have a value had none""" class NonPrintableDefect(HeaderDefect): """ASCII characters outside the ascii-printable range found""" def __init__(self, non_printables): super().__init__(non_printables) self.non_printables = non_printables def __str__(self): return ("the following ASCII non-printables found in header: " "{}".format(self.non_printables)) class ObsoleteHeaderDefect(HeaderDefect): """Header uses syntax declared obsolete by RFC 5322""" class NonASCIILocalPartDefect(HeaderDefect): """local_part contains non-ASCII characters""" # This defect only occurs during unicode parsing, not when # parsing messages decoded from binary.
Src/StdLib/Lib/email/errors.py
3,535
Couldn't find terminating boundary. An illegal charset was given. A start boundary was found, but not the corresponding close boundary. A message had a continuation line as its first header line. Base class for a header defect. A header that must have a value had none Error while parsing headers. base64 encoded sequence had characters not in base64 alphabet base64 encoded sequence had an incorrect length Header is not valid, message gives details. An invalid content transfer encoding was set on the multipart itself. Base class for a message defect. Base class for errors in the email package. Base class for message parsing errors. A 'Unix-from' header was found in the middle of a header block. Found line with no leading whitespace and no colon before blank line. Conversion to a multipart is prohibited. A message claimed to be a multipart but no subparts were found. A message claimed to be a multipart but had no boundary parameter. local_part contains non-ASCII characters ASCII characters outside the ascii-printable range found Header uses syntax declared obsolete by RFC 5322 The claimed start boundary was never found. Header contained bytes that could not be decoded email package exception classes. Copyright (C) 2001-2006 Python Software Foundation Author: Barry Warsaw Contact: email-sig@python.org These are parsing defects which the parser was able to work around. XXX: backward compatibility, just in case (it was never emitted). These errors are specific to header parsing. This defect only occurs during unicode parsing, not when parsing messages decoded from binary.
1,594
en
0.932007
from unittest.mock import ANY, AsyncMock, MagicMock, create_autospec, patch import aioredis import pytest from tests.utils import Keys from aiocache.backends.redis import RedisBackend, RedisCache, conn from aiocache.base import BaseCache from aiocache.serializers import JsonSerializer pytest.skip("aioredis code is broken", allow_module_level=True) @pytest.fixture # type: ignore[unreachable] def redis_connection(): return create_autospec(aioredis.RedisConnection) @pytest.fixture def redis_pool(redis_connection): class FakePool: def __await__(self): yield return redis_connection pool = FakePool() pool._conn = redis_connection pool.release = AsyncMock() pool.clear = AsyncMock() pool.acquire = AsyncMock(return_value=redis_connection) pool.__call__ = MagicMock(return_value=pool) return pool @pytest.fixture def redis(redis_pool): redis = RedisBackend() redis._pool = redis_pool yield redis @pytest.fixture def create_pool(): with patch("aiocache.backends.redis.aioredis.create_pool") as create_pool: yield create_pool @pytest.fixture(autouse=True) def mock_redis_v1(mocker, redis_connection): mocker.patch("aiocache.backends.redis.aioredis.Redis", return_value=redis_connection) class TestRedisBackend: def test_setup(self): redis_backend = RedisBackend() assert redis_backend.endpoint == "127.0.0.1" assert redis_backend.port == 6379 assert redis_backend.db == 0 assert redis_backend.password is None assert redis_backend.pool_min_size == 1 assert redis_backend.pool_max_size == 10 def test_setup_override(self): redis_backend = RedisBackend(db=2, password="pass") assert redis_backend.endpoint == "127.0.0.1" assert redis_backend.port == 6379 assert redis_backend.db == 2 assert redis_backend.password == "pass" def test_setup_casts(self): redis_backend = RedisBackend( db="2", port="6379", pool_min_size="1", pool_max_size="10", create_connection_timeout="1.5", ) assert redis_backend.db == 2 assert redis_backend.port == 6379 assert redis_backend.pool_min_size == 1 assert redis_backend.pool_max_size == 10 assert redis_backend.create_connection_timeout == 1.5 @pytest.mark.asyncio async def test_acquire_conn(self, redis, redis_connection): assert await redis.acquire_conn() == redis_connection @pytest.mark.asyncio async def test_release_conn(self, redis): conn = await redis.acquire_conn() await redis.release_conn(conn) redis._pool.release.assert_called_with(conn) @pytest.mark.asyncio async def test_get_pool_sets_pool(self, redis, redis_pool, create_pool): redis._pool = None await redis._get_pool() assert redis._pool == create_pool.return_value @pytest.mark.asyncio async def test_get_pool_reuses_existing_pool(self, redis): redis._pool = "pool" await redis._get_pool() assert redis._pool == "pool" @pytest.mark.asyncio async def test_get_pool_locked(self, mocker, redis, create_pool): redis._pool = None mocker.spy(redis._pool_lock, "acquire") mocker.spy(redis._pool_lock, "release") assert await redis._get_pool() == create_pool.return_value assert redis._pool_lock.acquire.call_count == 1 assert redis._pool_lock.release.call_count == 1 @pytest.mark.asyncio async def test_get_pool_calls_create_pool(self, redis, create_pool): redis._pool = None await redis._get_pool() create_pool.assert_called_with( (redis.endpoint, redis.port), db=redis.db, password=redis.password, loop=redis._loop, encoding="utf-8", minsize=redis.pool_min_size, maxsize=redis.pool_max_size, create_connection_timeout=redis.create_connection_timeout, ) @pytest.mark.asyncio async def test_get(self, redis, redis_connection): await redis._get(Keys.KEY) redis_connection.get.assert_called_with(Keys.KEY, encoding="utf-8") @pytest.mark.asyncio async def test_gets(self, mocker, redis, redis_connection): mocker.spy(redis, "_get") await redis._gets(Keys.KEY) redis._get.assert_called_with(Keys.KEY, encoding="utf-8", _conn=ANY) @pytest.mark.asyncio async def test_set(self, redis, redis_connection): await redis._set(Keys.KEY, "value") redis_connection.set.assert_called_with(Keys.KEY, "value") await redis._set(Keys.KEY, "value", ttl=1) redis_connection.setex.assert_called_with(Keys.KEY, 1, "value") @pytest.mark.asyncio async def test_set_cas_token(self, mocker, redis, redis_connection): mocker.spy(redis, "_cas") await redis._set(Keys.KEY, "value", _cas_token="old_value", _conn=redis_connection) redis._cas.assert_called_with( Keys.KEY, "value", "old_value", ttl=None, _conn=redis_connection ) @pytest.mark.asyncio async def test_cas(self, mocker, redis, redis_connection): mocker.spy(redis, "_raw") await redis._cas(Keys.KEY, "value", "old_value", ttl=10, _conn=redis_connection) redis._raw.assert_called_with( "eval", redis.CAS_SCRIPT, [Keys.KEY], ["value", "old_value", "EX", 10], _conn=redis_connection, ) @pytest.mark.asyncio async def test_cas_float_ttl(self, mocker, redis, redis_connection): mocker.spy(redis, "_raw") await redis._cas(Keys.KEY, "value", "old_value", ttl=0.1, _conn=redis_connection) redis._raw.assert_called_with( "eval", redis.CAS_SCRIPT, [Keys.KEY], ["value", "old_value", "PX", 100], _conn=redis_connection, ) @pytest.mark.asyncio async def test_multi_get(self, redis, redis_connection): await redis._multi_get([Keys.KEY, Keys.KEY_1]) redis_connection.mget.assert_called_with(Keys.KEY, Keys.KEY_1, encoding="utf-8") @pytest.mark.asyncio async def test_multi_set(self, redis, redis_connection): await redis._multi_set([(Keys.KEY, "value"), (Keys.KEY_1, "random")]) redis_connection.mset.assert_called_with(Keys.KEY, "value", Keys.KEY_1, "random") @pytest.mark.asyncio async def test_multi_set_with_ttl(self, redis, redis_connection): await redis._multi_set([(Keys.KEY, "value"), (Keys.KEY_1, "random")], ttl=1) assert redis_connection.multi_exec.call_count == 1 redis_connection.mset.assert_called_with(Keys.KEY, "value", Keys.KEY_1, "random") redis_connection.expire.assert_any_call(Keys.KEY, timeout=1) redis_connection.expire.assert_any_call(Keys.KEY_1, timeout=1) assert redis_connection.execute.call_count == 1 @pytest.mark.asyncio async def test_add(self, redis, redis_connection): await redis._add(Keys.KEY, "value") redis_connection.set.assert_called_with(Keys.KEY, "value", exist=ANY, expire=None) await redis._add(Keys.KEY, "value", 1) redis_connection.set.assert_called_with(Keys.KEY, "value", exist=ANY, expire=1) @pytest.mark.asyncio async def test_add_existing(self, redis, redis_connection): redis_connection.set.return_value = False with pytest.raises(ValueError): await redis._add(Keys.KEY, "value") @pytest.mark.asyncio async def test_add_float_ttl(self, redis, redis_connection): await redis._add(Keys.KEY, "value", 0.1) redis_connection.set.assert_called_with(Keys.KEY, "value", exist=ANY, pexpire=100) @pytest.mark.asyncio async def test_exists(self, redis, redis_connection): redis_connection.exists.return_value = 1 await redis._exists(Keys.KEY) redis_connection.exists.assert_called_with(Keys.KEY) @pytest.mark.asyncio async def test_expire(self, redis, redis_connection): await redis._expire(Keys.KEY, ttl=1) redis_connection.expire.assert_called_with(Keys.KEY, 1) @pytest.mark.asyncio async def test_increment(self, redis, redis_connection): await redis._increment(Keys.KEY, delta=2) redis_connection.incrby.assert_called_with(Keys.KEY, 2) @pytest.mark.asyncio async def test_increment_typerror(self, redis, redis_connection): redis_connection.incrby.side_effect = aioredis.errors.ReplyError("msg") with pytest.raises(TypeError): await redis._increment(Keys.KEY, 2) @pytest.mark.asyncio async def test_expire_0_ttl(self, redis, redis_connection): await redis._expire(Keys.KEY, ttl=0) redis_connection.persist.assert_called_with(Keys.KEY) @pytest.mark.asyncio async def test_delete(self, redis, redis_connection): await redis._delete(Keys.KEY) redis_connection.delete.assert_called_with(Keys.KEY) @pytest.mark.asyncio async def test_clear(self, redis, redis_connection): redis_connection.keys.return_value = ["nm:a", "nm:b"] await redis._clear("nm") redis_connection.delete.assert_called_with("nm:a", "nm:b") @pytest.mark.asyncio async def test_clear_no_keys(self, redis, redis_connection): redis_connection.keys.return_value = [] await redis._clear("nm") redis_connection.delete.assert_not_called() @pytest.mark.asyncio async def test_clear_no_namespace(self, redis, redis_connection): await redis._clear() assert redis_connection.flushdb.call_count == 1 @pytest.mark.asyncio async def test_raw(self, redis, redis_connection): await redis._raw("get", Keys.KEY) await redis._raw("set", Keys.KEY, 1) redis_connection.get.assert_called_with(Keys.KEY, encoding=ANY) redis_connection.set.assert_called_with(Keys.KEY, 1) @pytest.mark.asyncio async def test_redlock_release(self, mocker, redis): mocker.spy(redis, "_raw") await redis._redlock_release(Keys.KEY, "random") redis._raw.assert_called_with("eval", redis.RELEASE_SCRIPT, [Keys.KEY], ["random"]) @pytest.mark.asyncio async def test_close_when_connected(self, redis): await redis._raw("set", Keys.KEY, 1) await redis._close() assert redis._pool.clear.call_count == 1 @pytest.mark.asyncio async def test_close_when_not_connected(self, redis, redis_pool): redis._pool = None await redis._close() assert redis_pool.clear.call_count == 0 class TestConn: async def dummy(self, *args, _conn=None, **kwargs): pass @pytest.mark.asyncio async def test_conn(self, redis, redis_connection, mocker): mocker.spy(self, "dummy") d = conn(self.dummy) await d(redis, "a", _conn=None) self.dummy.assert_called_with(redis, "a", _conn=redis_connection) @pytest.mark.asyncio async def test_conn_reuses(self, redis, redis_connection, mocker): mocker.spy(self, "dummy") d = conn(self.dummy) await d(redis, "a", _conn=redis_connection) self.dummy.assert_called_with(redis, "a", _conn=redis_connection) await d(redis, "a", _conn=redis_connection) self.dummy.assert_called_with(redis, "a", _conn=redis_connection) class TestRedisCache: @pytest.fixture def set_test_namespace(self, redis_cache): redis_cache.namespace = "test" yield redis_cache.namespace = None def test_name(self): assert RedisCache.NAME == "redis" def test_inheritance(self): assert isinstance(RedisCache(), BaseCache) def test_default_serializer(self): assert isinstance(RedisCache().serializer, JsonSerializer) @pytest.mark.parametrize( "path,expected", [("", {}), ("/", {}), ("/1", {"db": "1"}), ("/1/2/3", {"db": "1"})] ) def test_parse_uri_path(self, path, expected): assert RedisCache().parse_uri_path(path) == expected @pytest.mark.parametrize( "namespace, expected", ([None, "test:" + Keys.KEY], ["", Keys.KEY], ["my_ns", "my_ns:" + Keys.KEY]), ) def test_build_key_double_dot(self, set_test_namespace, redis_cache, namespace, expected): assert redis_cache.build_key(Keys.KEY, namespace=namespace) == expected def test_build_key_no_namespace(self, redis_cache): assert redis_cache.build_key(Keys.KEY, namespace=None) == Keys.KEY
tests/ut/backends/test_redis.py
12,662
type: ignore[unreachable]
25
en
0.245221
from django.db.models import Max from datahub.company.models import Company as DBCompany, CompanyPermission from datahub.core.query_utils import get_aggregate_subquery from datahub.search.apps import SearchApp from datahub.search.company.models import Company class CompanySearchApp(SearchApp): """SearchApp for company.""" name = 'company' es_model = Company view_permissions = (f'company.{CompanyPermission.view_company}',) export_permission = f'company.{CompanyPermission.export_company}' queryset = DBCompany.objects.select_related( 'archived_by', 'business_type', 'employee_range', 'export_experience_category', 'headquarter_type', 'one_list_account_owner', 'global_headquarters__one_list_account_owner', 'global_headquarters', 'address_country', 'registered_address_country', 'sector', 'sector__parent', 'sector__parent__parent', 'turnover_range', 'uk_region', ).prefetch_related( 'export_countries__country', ).annotate( latest_interaction_date=get_aggregate_subquery( DBCompany, Max('interactions__date'), ), )
datahub/search/company/apps.py
1,227
SearchApp for company.
22
en
0.950199
import Gramatica.Gramatica as g import graphviz import sys import threading import Errores.Nodo_Error as error import Errores.ListaErrores as lista_err from tkinter import * from tkinter import filedialog from tkinter import font from tkinter import ttk #------------------------------------ Interfaz ---------------------------------------------------------- root = Tk() root.title('TytusDB - Team 19') root.geometry("1000x750") def ejecutar(): reporteg=[] errores=lista_err.ListaErrores() entrada = my_text.get("1.0",END) SQLparser = g.parse(entrada, errores); print(SQLparser); Output.delete(1.0,"end") Output.insert("1.0","Salida"); def open_File(): text_file = filedialog.askopenfilename(initialdir="C:/gui/", title="Text File", filetypes=(("Text Files", "*.txt"), )) text_file = open(text_file, 'r') stuff = text_file.read() my_text.insert(END, stuff) text_file.close() def get_line_numbers(): output = '' if show_line_number.get(): row, col = my_text.index("end").split('.') for i in range(1, int(row)): output += str(i) + '\n' return output def on_content_changed(event=None): update_line_numbers() def update_line_numbers(event=None): line_numbers = get_line_numbers() line_number_bar.config(state='normal') line_number_bar.delete('1.0', 'end') line_number_bar.insert('1.0', line_numbers) line_number_bar.config(state='disabled') menu_bar = Menu(root) file_menu = Menu(menu_bar, tearoff=0) file_menu.add_command(label='Open', compound='left', underline=0, command=open_File) file_menu.add_command(label='Ejecutar', compound='left', underline=0, command=ejecutar) menu_bar.add_cascade(label='File', menu=file_menu) reportes_menu = Menu(menu_bar, tearoff=0) reportes_menu.add_command(label='Errores', compound='left', underline=0) reportes_menu.add_separator() reportes_menu.add_command(label='Gramaticas',compound='left', underline=0) reportes_menu.add_separator() reportes_menu.add_command(label='AST', compound='left', underline=0) reportes_menu.add_separator() reportes_menu.add_command(label='TS',compound='left', underline=0) menu_bar.add_cascade(label='Reportes', menu=reportes_menu) show_line_number=IntVar() show_line_number.set(1) root.config(menu=menu_bar) my_frame = Frame(root) my_frame.pack(pady=10) text_scroll = Scrollbar(my_frame) text_scroll.pack(side=RIGHT, fill=Y) line_number_bar = Text(my_frame, width=4, padx=3, takefocus=0, fg='white', border=0, background='#282828',state='disabled', wrap='none') line_number_bar.pack(side='left', fill='y') my_text = Text(my_frame, width=110, height=30, selectforeground="black", yscrollcommand=text_scroll.set) text_scroll.config(command=my_text.yview) separator = ttk.Separator(root, orient='horizontal') separator.place(relx=0, rely=0.47, relwidth=1, relheight=1) Output = Text(root, height = 10,width = 115,bg = "light cyan") my_text.bind('<Any-KeyPress>', on_content_changed) entrada = my_text.get("1.0",END) my_text.pack() separator.pack() Output.pack() root.mainloop()
parser/team19/BDTytus/main.py
3,070
------------------------------------ Interfaz ----------------------------------------------------------
104
en
0.11083
import random # averaging the embeddings between 2 words # return the averaged embeddings def average_two_embeddings_vectors(a, b): avg_embeddings = [] i = 0 for embed in a: z = (embed + b[i]) / 2.0 avg_embeddings.append(z) i += 1 return avg_embeddings # helper func; updates tokens and embeddings with the new combined tokens and averaged embeddings # return the updated tokens string and embeddings vector def update_tok_and_embed(tokens, embeddings, index, embed2_index, averaged_embeddings): # update tokens if embed2_index > index: tokens[index] = tokens[index] + " " + tokens[embed2_index] else: tokens[index] = tokens[embed2_index] + " " + tokens[index] # update embeddings embeddings[index] = averaged_embeddings # delete old tokens and embeddings del tokens[embed2_index] del embeddings[embed2_index] return tokens, embeddings # helper func def preprocessing_helper(tokens, embeddings, e, combine_with): index = 0 avg_embeddings = [] index = tokens.index(e) first, last = False, False if (index - 1) == -1: first = True if (index + 1) == len(tokens): last = True embed1 = embeddings[index] embed2 = [] embed2_index = 0 # the words following these type of words usually have some relation syntactically and semantically if combine_with == "after": if last: # check if element is the last element return tokens, embeddings embed2_index = index + 1 embed2 = embeddings[embed2_index] else: # the words before if first: # check if first element return tokens, embeddings embed2_index = index - 1 embed2 = embeddings[embed2_index] averaged_embeddings = average_two_embeddings_vectors(embed1, embed2) return update_tok_and_embed(tokens, embeddings, index, embed2_index, averaged_embeddings) # common tokens that might fit well with other tokens based on syntactic rules of english # therefore, standardize with these before running the default algorithm # return updated tokens and embeddings def syntactic_rules_for_preprocessing(tokens, embeddings, std_length): # not comprehensive but a start. combined_after_set = {"a", "an", "the", "some", "each", "all", "to", "for", "in", "on", "of", "about", "with", "from", "at", "have", "has", "is", "are", "was", "were", "be", "been", "being", "should", "would", "will", "do", "don't", "did", "no", "not", "my", "his", "her", "your", "their", "our", "its", "whose", "go", "going", "went", "come", "came", "coming"} combined_before_set = {"him", "her", "them", "us", ",", ".", "!", "?", "...", ";", "-", "~"} if len(tokens) > std_length: for e in tokens: # average embeddings with the token that follows the current token if e in combined_after_set: tokens, embeddings = preprocessing_helper(tokens, embeddings, e, "after") if len(tokens) == std_length: break continue # avg embedding with the token that precedes the current token elif e in combined_before_set: tokens, embeddings = preprocessing_helper(tokens, embeddings, e, "before") if len(tokens) == std_length: break continue return tokens, embeddings # takes in tokens list and corresponding embeddings # shortens the list until the specified length(default 10) # shortens by averaging the embedding vectors and combining the corresponding tokens # combined tokens separated by a space even if it's punctuation. e.g. 'end' + '.' -> "end ." # returns the standardized tokens and embeddings lists # implementation: averaging some words that might go together first (e.g. "the cat", "to her") # then, just randomly select tokens and their adjacent token and average those embedding vectors def standardize_by_averaging(tokens, embeddings, std_length=10): flag = True # so as to not change the original lists tokens = tokens.copy() embeddings = embeddings.copy() while len(tokens) > std_length: # attempt to standardize with some regards to syntactical knowledge first if flag: flag = False tokens, embeddings = syntactic_rules_for_preprocessing(tokens, embeddings, std_length) continue length = len(tokens) index = random.randint(1, length - 1) # uses randomizer so to vary the averaging place embed1 = embeddings[index] embed2 = embeddings[index - 1] averaged_embeddings = average_two_embeddings_vectors(embed1, embed2) token, embeddings = update_tok_and_embed(tokens, embeddings, index, index - 1, averaged_embeddings) return tokens, embeddings def standardize_by_duplicating(tokens, embeddings, std_length=10): token_copy, embeddings_copy = tokens[:], embeddings[:] while len(tokens) < std_length: # duplicate the whole message once tokens += token_copy embeddings += embeddings_copy return standardize_by_averaging(tokens, embeddings, std_length) def main(): # fill long_tokens = ["this", "is", "a", "sentence", "that", "is", "over", "ten", "embeddings", "long", "and", "that", "there", "are", "punctuations", ".", "this", "is", "a", "sentence", "that", "is", "over", "ten", "embeddings", "long", "and", "that", "there", "are", "punctuations", "."] long_tokens2 = [".", ".", "gonna", "be", "a", "long", "in", "order", "for", "the", "testing", "of", "the", "code", ".", "there", "will", "be", "some", "weird", "tokens", "hello", "this", "spellings", "to", "see", "how", "that's", "this", "will", "be", "the"] long_embeddings = [[1.2, 3.34], [2.3, 3.5], [5.6, 6.6], [5.1, 2.3], [2.3, 4.4], [3.3, 5.8], [8.8, 7.7], [1.1, 2.3], [9.9, 1.2], [2.1, 2.1], [1.0, 1.0], [1.1, 3.4], [1.2, 3.2], [3.4, 4.0], [1.1, 2.3], [1.1, 1.1], [1.2, 3.34], [2.3, 3.5], [5.6, 6.6], [5.1, 2.3], [2.3, 4.4], [3.3, 5.8], [8.8, 7.7], [1.1, 2.3], [9.9, 1.2], [2.1, 2.1], [1.0, 1.0], [1.1, 3.4], [1.2, 3.2], [3.4, 4.0], [1.1, 2.3], [1.1, 1.1]] # for testing purposes print("test standardize_by_averaging") print("before; tokens:\n", long_tokens) # before standardizing print("before; embeddings:\n", long_embeddings, "\n\n") tokens, embeddings = standardize_by_averaging(long_tokens, long_embeddings) print("after; tokens:\n", tokens) # after standardizing print("after; embeddings:\n", embeddings, "\n\n") # test standardize_by_averaging #2, uses the same embeddings as test #1 print("test standardize_by_averaging#2") print("before; tokens:\n", long_tokens2) # before standardizing print("before; embeddings:\n", long_embeddings, "\n\n") tokens, embeddings = standardize_by_averaging(long_tokens2, long_embeddings) print("after; tokens:\n", tokens) # after standardizing print("after; embeddings:\n", embeddings, "\n\n") # standardize by duplicating short_tokens = ["This", "is", "looking", "Bullish"] short_embeddings = [[1.2, 3.34], [2.3, 3.5], [5.6, 6.6], [5.1, 2.3]] # for testing purposes print("test standardize_by_duplicating") print("before; tokens:\n", short_tokens) # before standardizing print("before embeddings:\n", short_embeddings, "\n\n") tokens, embeddings = standardize_by_duplicating(short_tokens, short_embeddings) print("after; tokens:\n", tokens) # after standardizing print("after; embeddings:\n", embeddings, "\n\n") return if __name__ == "__main__": # execute only if run as a script main()
support/standardize.py
7,923
averaging the embeddings between 2 words return the averaged embeddings helper func; updates tokens and embeddings with the new combined tokens and averaged embeddings return the updated tokens string and embeddings vector update tokens update embeddings delete old tokens and embeddings helper func the words following these type of words usually have some relation syntactically and semantically check if element is the last element the words before check if first element common tokens that might fit well with other tokens based on syntactic rules of english therefore, standardize with these before running the default algorithm return updated tokens and embeddings not comprehensive but a start. average embeddings with the token that follows the current token avg embedding with the token that precedes the current token takes in tokens list and corresponding embeddings shortens the list until the specified length(default 10) shortens by averaging the embedding vectors and combining the corresponding tokens combined tokens separated by a space even if it's punctuation. e.g. 'end' + '.' -> "end ." returns the standardized tokens and embeddings lists implementation: averaging some words that might go together first (e.g. "the cat", "to her") then, just randomly select tokens and their adjacent token and average those embedding vectors so as to not change the original lists attempt to standardize with some regards to syntactical knowledge first uses randomizer so to vary the averaging place duplicate the whole message once fill for testing purposes before standardizing after standardizing test standardize_by_averaging 2, uses the same embeddings as test 1 before standardizing after standardizing standardize by duplicating for testing purposes before standardizing after standardizing execute only if run as a script
1,837
en
0.817664
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .resource import Resource class VirtualMachine(Resource): """Describes a Virtual Machine. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: Resource Id :vartype id: str :ivar name: Resource name :vartype name: str :ivar type: Resource type :vartype type: str :param location: Required. Resource location :type location: str :param tags: Resource tags :type tags: dict[str, str] :param plan: Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**. :type plan: ~azure.mgmt.compute.v2017_03_30.models.Plan :param hardware_profile: Specifies the hardware settings for the virtual machine. :type hardware_profile: ~azure.mgmt.compute.v2017_03_30.models.HardwareProfile :param storage_profile: Specifies the storage settings for the virtual machine disks. :type storage_profile: ~azure.mgmt.compute.v2017_03_30.models.StorageProfile :param os_profile: Specifies the operating system settings for the virtual machine. :type os_profile: ~azure.mgmt.compute.v2017_03_30.models.OSProfile :param network_profile: Specifies the network interfaces of the virtual machine. :type network_profile: ~azure.mgmt.compute.v2017_03_30.models.NetworkProfile :param diagnostics_profile: Specifies the boot diagnostic settings state. <br><br>Minimum api-version: 2015-06-15. :type diagnostics_profile: ~azure.mgmt.compute.v2017_03_30.models.DiagnosticsProfile :param availability_set: Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). <br><br> For more information on Azure planned maintainance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) <br><br> Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set. :type availability_set: ~azure.mgmt.compute.v2017_03_30.models.SubResource :ivar provisioning_state: The provisioning state, which only appears in the response. :vartype provisioning_state: str :ivar instance_view: The virtual machine instance view. :vartype instance_view: ~azure.mgmt.compute.v2017_03_30.models.VirtualMachineInstanceView :param license_type: Specifies that the image or disk that is being used was licensed on-premises. This element is only used for images that contain the Windows Server operating system. <br><br> Possible values are: <br><br> Windows_Client <br><br> Windows_Server <br><br> If this element is included in a request for an update, the value must match the initial value. This value cannot be updated. <br><br> For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) <br><br> Minimum api-version: 2015-06-15 :type license_type: str :ivar vm_id: Specifies the VM unique ID which is a 128-bits identifier that is encoded and stored in all Azure IaaS VMs SMBIOS and can be read using platform BIOS commands. :vartype vm_id: str :ivar resources: The virtual machine child extension resources. :vartype resources: list[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtension] :param identity: The identity of the virtual machine, if configured. :type identity: ~azure.mgmt.compute.v2017_03_30.models.VirtualMachineIdentity :param zones: The virtual machine zones. :type zones: list[str] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'location': {'required': True}, 'provisioning_state': {'readonly': True}, 'instance_view': {'readonly': True}, 'vm_id': {'readonly': True}, 'resources': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'plan': {'key': 'plan', 'type': 'Plan'}, 'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'}, 'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'}, 'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'}, 'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'}, 'diagnostics_profile': {'key': 'properties.diagnosticsProfile', 'type': 'DiagnosticsProfile'}, 'availability_set': {'key': 'properties.availabilitySet', 'type': 'SubResource'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineInstanceView'}, 'license_type': {'key': 'properties.licenseType', 'type': 'str'}, 'vm_id': {'key': 'properties.vmId', 'type': 'str'}, 'resources': {'key': 'resources', 'type': '[VirtualMachineExtension]'}, 'identity': {'key': 'identity', 'type': 'VirtualMachineIdentity'}, 'zones': {'key': 'zones', 'type': '[str]'}, } def __init__(self, *, location: str, tags=None, plan=None, hardware_profile=None, storage_profile=None, os_profile=None, network_profile=None, diagnostics_profile=None, availability_set=None, license_type: str=None, identity=None, zones=None, **kwargs) -> None: super(VirtualMachine, self).__init__(location=location, tags=tags, **kwargs) self.plan = plan self.hardware_profile = hardware_profile self.storage_profile = storage_profile self.os_profile = os_profile self.network_profile = network_profile self.diagnostics_profile = diagnostics_profile self.availability_set = availability_set self.provisioning_state = None self.instance_view = None self.license_type = license_type self.vm_id = None self.resources = None self.identity = identity self.zones = zones
azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/models/virtual_machine_py3.py
7,800
Describes a Virtual Machine. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: Resource Id :vartype id: str :ivar name: Resource name :vartype name: str :ivar type: Resource type :vartype type: str :param location: Required. Resource location :type location: str :param tags: Resource tags :type tags: dict[str, str] :param plan: Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**. :type plan: ~azure.mgmt.compute.v2017_03_30.models.Plan :param hardware_profile: Specifies the hardware settings for the virtual machine. :type hardware_profile: ~azure.mgmt.compute.v2017_03_30.models.HardwareProfile :param storage_profile: Specifies the storage settings for the virtual machine disks. :type storage_profile: ~azure.mgmt.compute.v2017_03_30.models.StorageProfile :param os_profile: Specifies the operating system settings for the virtual machine. :type os_profile: ~azure.mgmt.compute.v2017_03_30.models.OSProfile :param network_profile: Specifies the network interfaces of the virtual machine. :type network_profile: ~azure.mgmt.compute.v2017_03_30.models.NetworkProfile :param diagnostics_profile: Specifies the boot diagnostic settings state. <br><br>Minimum api-version: 2015-06-15. :type diagnostics_profile: ~azure.mgmt.compute.v2017_03_30.models.DiagnosticsProfile :param availability_set: Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). <br><br> For more information on Azure planned maintainance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) <br><br> Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set. :type availability_set: ~azure.mgmt.compute.v2017_03_30.models.SubResource :ivar provisioning_state: The provisioning state, which only appears in the response. :vartype provisioning_state: str :ivar instance_view: The virtual machine instance view. :vartype instance_view: ~azure.mgmt.compute.v2017_03_30.models.VirtualMachineInstanceView :param license_type: Specifies that the image or disk that is being used was licensed on-premises. This element is only used for images that contain the Windows Server operating system. <br><br> Possible values are: <br><br> Windows_Client <br><br> Windows_Server <br><br> If this element is included in a request for an update, the value must match the initial value. This value cannot be updated. <br><br> For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) <br><br> Minimum api-version: 2015-06-15 :type license_type: str :ivar vm_id: Specifies the VM unique ID which is a 128-bits identifier that is encoded and stored in all Azure IaaS VMs SMBIOS and can be read using platform BIOS commands. :vartype vm_id: str :ivar resources: The virtual machine child extension resources. :vartype resources: list[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtension] :param identity: The identity of the virtual machine, if configured. :type identity: ~azure.mgmt.compute.v2017_03_30.models.VirtualMachineIdentity :param zones: The virtual machine zones. :type zones: list[str] coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. --------------------------------------------------------------------------
4,747
en
0.626091
# a = 15*15 + 14*14 + 13*13 + 12*12 + 11*11 # print(a) # a = 3**33333333 % 100 # b = 7**77777777 % 10 # print(a * b) # x = 6 # y = 4 # z = 0 # for i in range(1,14): # z += x*y # y += 1 # x += # print(x, y, z) # print(z) # idx = 0 # a = [5,10,20,50,100] # for i in a: ax = 6 a = 6 b = 4 while b <= 210: c = a*b print(a,b,c) a b += 1
Code Politan/KSI POSI INFORMATIKA.py
403
a = 15*15 + 14*14 + 13*13 + 12*12 + 11*11 print(a) a = 3**33333333 % 100 b = 7**77777777 % 10 print(a * b) x = 6 y = 4 z = 0 for i in range(1,14): z += x*y y += 1 x += print(x, y, z) print(z) idx = 0 a = [5,10,20,50,100] for i in a:
249
en
0.181624
import dj_database_url SECRET_KEY = 'django-migration-docs' # Install the tests as an app so that we can make test models INSTALLED_APPS = ['migration_docs', 'migration_docs.tests'] # Database url comes from the DATABASE_URL env var DATABASES = {'default': dj_database_url.config()}
settings.py
285
Install the tests as an app so that we can make test models Database url comes from the DATABASE_URL env var
108
en
0.872511
# -*- coding: utf-8 -*- # Copyright (C) 2018 by # Marta Grobelna <marta.grobelna@rwth-aachen.de> # Petre Petrov <petrepp4@gmail.com> # Rudi Floren <rudi.floren@gmail.com> # Tobias Winkler <tobias.winkler1@rwth-aachen.de> # All rights reserved. # BSD license. # # Authors: Marta Grobelna <marta.grobelna@rwth-aachen.de> # Petre Petrov <petrepp4@gmail.com> # Rudi Floren <rudi.floren@gmail.com> # Tobias Winkler <tobias.winkler1@rwth-aachen.de> import random as rnd from collections import deque import networkx as nx from planar_graph_sampler.combinatorial_classes.half_edge_graph import HalfEdgeGraph class IrreducibleDissection(HalfEdgeGraph): """ Represents the class 'I' of irreducible dissections from the paper. It is however also used for rooted and derived dissections (sizes are incorrect then). Parameters ---------- half_edge: ClosureHalfEdge A half-edge on the hexagonal boundary of a closed binary tree. """ def __init__(self, half_edge): assert half_edge.is_hexagonal if half_edge.color is not 'black': half_edge = half_edge.opposite.next assert half_edge.color is 'black' super(IrreducibleDissection, self).__init__(half_edge) @property def is_consistent(self): super_ok = super(IrreducibleDissection, self).is_consistent root_is_black = self.half_edge.color is 'black' root_is_hex = self.half_edge.is_hexagonal twelve_hex_he = len([he for he in self.half_edge.get_all_half_edges() if he.is_hexagonal]) == 12 return all([super_ok, root_is_black, root_is_hex, twelve_hex_he, self.is_admissible]) @property def hexagonal_edges(self): """Gets the three half-edges on the hexagonal boundary incident to a black node and point in ccw direction.""" first = self.half_edge res = [first] second = first.opposite.next.opposite.next res.append(second) third = second.opposite.next.opposite.next res.append(third) for he in res: assert he.is_hexagonal and he.color is 'black' return res def root_at_random_hexagonal_edge(self): """Selects a random hexagonal half-edge and makes it the root.""" self._half_edge = rnd.choice(self.hexagonal_edges) @property def is_admissible_slow(self): """Checks if there is a path of length 3 with an inner edge from the root to the opposite outer vertex.""" start_node = self.half_edge assert start_node.color is 'black' end_node = self.half_edge.opposite.next.opposite.next.opposite assert end_node.color is 'white' start_node = start_node.node_nr end_node = end_node.node_nr g = self.to_networkx_graph() # There are always 2 path of length 4 (meaning 4 nodes) from start to end (on the hexagon boundary). # If there is one more, then this is a forbidden path! paths = nx.shortest_simple_paths(g, start_node, end_node) path_1 = next(paths) assert len(path_1) == 4 path_2 = next(paths) assert len(path_2) == 4 path_3 = next(paths) return len(path_3) > 4 @property def is_admissible(self): """Checks if there is a path of length 3 with an inner edge from the root to the opposite outer vertex.""" start_node = self.half_edge assert start_node.color is 'black' end_node = self.half_edge.opposite.next.opposite.next.opposite assert end_node.color is 'white' # Creates the queue for the BFS. queue = deque(list()) # Put the init half edge into the queue. queue.append((self.half_edge, 0, False, set())) while len(queue) != 0: # Pop the _first element from the FIFO queue. top_element = queue.popleft() # Extract the components from the top element. top_half_edge = top_element[0] distance = top_element[1] has_been_inner_edge_included = top_element[2] visited_nodes = top_element[3] # Updated the visited_nodes_set. visited_nodes.add(top_half_edge.node_nr) # Start BFS for the half edges connected with the specific node. incident_half_edges = top_half_edge.incident_half_edges() for walker_half_edge in incident_half_edges: opposite = walker_half_edge.opposite # Skip the vertex if it was already visited. if opposite in visited_nodes: continue # Prepare the new components of the element. updated_distance = distance + 1 new_visited_nodes = set() new_visited_nodes.update(visited_nodes) inner_edge_included = has_been_inner_edge_included or (opposite.is_hexagonal is False) # If the distance is smaller than 3 then the element is added into the queue. if updated_distance < 3: queue.append((opposite, updated_distance, inner_edge_included, new_visited_nodes)) else: # If the distance is equal to 3 than we check whether the new vertex is the outer one and # does an inner edge have been included in the path. If both conditions are True, then a path # has been found which means that the dissection is not irreducible. -> Return false. if opposite.node_nr == end_node.node_nr and inner_edge_included: return False # A path has not been found, therefore the dissection is irreducible and we return True. return True # CombinatorialClass interface. @property def u_size(self): """The u-size is the number of inner faces.""" return (self.number_of_half_edges - 6) / 4 @property def l_size(self): """The l-size is the number of black inner vertices.""" node_dict = self.half_edge.node_dict() black_vertices = len([node_nr for node_nr in node_dict if node_dict[node_nr][0].color is 'black']) # There are always 3 hexagonal outer black vertices. return black_vertices - 3 # Networkx related functionality. def to_networkx_graph(self, include_unpaired=None): """Converts to networkx graph, encodes hexagonal nodes with colors.""" from planar_graph_sampler.combinatorial_classes.half_edge_graph import color_scale # Get dict of nodes. nodes = self.half_edge.node_dict() # Include the leaves as well. G = super(IrreducibleDissection, self).to_networkx_graph(include_unpaired=False) for v in G: if any([he.is_hexagonal for he in nodes[v]]): G.nodes[v]['color'] = '#e8f442' else: G.nodes[v]['color'] = '#aaaaaa' if nodes[v][0].color is 'black': # Make black nodes darker. G.nodes[v]['color'] = color_scale(G.nodes[v]['color'], 0.5) return G
planar_graph_sampler/combinatorial_classes/dissection.py
7,132
Represents the class 'I' of irreducible dissections from the paper. It is however also used for rooted and derived dissections (sizes are incorrect then). Parameters ---------- half_edge: ClosureHalfEdge A half-edge on the hexagonal boundary of a closed binary tree. Gets the three half-edges on the hexagonal boundary incident to a black node and point in ccw direction. Checks if there is a path of length 3 with an inner edge from the root to the opposite outer vertex. Checks if there is a path of length 3 with an inner edge from the root to the opposite outer vertex. The l-size is the number of black inner vertices. Selects a random hexagonal half-edge and makes it the root. Converts to networkx graph, encodes hexagonal nodes with colors. The u-size is the number of inner faces. -*- coding: utf-8 -*- Copyright (C) 2018 by Marta Grobelna <marta.grobelna@rwth-aachen.de> Petre Petrov <petrepp4@gmail.com> Rudi Floren <rudi.floren@gmail.com> Tobias Winkler <tobias.winkler1@rwth-aachen.de> All rights reserved. BSD license. Authors: Marta Grobelna <marta.grobelna@rwth-aachen.de> Petre Petrov <petrepp4@gmail.com> Rudi Floren <rudi.floren@gmail.com> Tobias Winkler <tobias.winkler1@rwth-aachen.de> There are always 2 path of length 4 (meaning 4 nodes) from start to end (on the hexagon boundary). If there is one more, then this is a forbidden path! Creates the queue for the BFS. Put the init half edge into the queue. Pop the _first element from the FIFO queue. Extract the components from the top element. Updated the visited_nodes_set. Start BFS for the half edges connected with the specific node. Skip the vertex if it was already visited. Prepare the new components of the element. If the distance is smaller than 3 then the element is added into the queue. If the distance is equal to 3 than we check whether the new vertex is the outer one and does an inner edge have been included in the path. If both conditions are True, then a path has been found which means that the dissection is not irreducible. -> Return false. A path has not been found, therefore the dissection is irreducible and we return True. CombinatorialClass interface. There are always 3 hexagonal outer black vertices. Networkx related functionality. Get dict of nodes. Include the leaves as well. Make black nodes darker.
2,368
en
0.868012
from flask import Flask, render_template, request, flash, url_for from flask_mail import Message, Mail import json from typing import Dict, List from pathlib import Path from forms import ContactForm from development_config import Config """ This file launches the application. """ # init application app = Flask(__name__) # add secretkey, mail and debug configurations app.config.from_object(Config) # attaching mail to the flask app mail = Mail(app) def read_json(json_file: str, debug=False) -> List[Dict]: """ reads the json files, and formats the description that is associated with each of the json dictionaries that are read in. :param json_file: json file to parse from :param debug: if set to true, will print the json dictionaries as they are read in :return: list of all of the json dictionaries """ # parsing json file with open(json_file, "r") as json_desc: # read json file project_list: List[Dict] = json.load(json_desc) # formats the description data which I stored in a json list for project in project_list: project['description'] = " ".join(project['description']) if debug: print(project) return project_list @app.route("/") @app.route("/home") def home_page(): return render_template("home.html", title="home") @app.route("/portfolio") def portfolio(): # json file to parse json_file = "static/json/projects.json" project_list = read_json(json_file) # grouping portfolio into two's project_groups = [[project_list[i*2], project_list[i*2+1]] for i in range(len(project_list) // 2)] # getting the last project project_singles = False if len(project_list) % 2 != 0: project_singles = project_list[-1:] return render_template("portfolio.html", title="portfolio", project_groups=project_groups, project_singles=project_singles) @app.route("/talks") def talks(): # json file to parse json_file = "static/json/talks.json" # parsed json results project_list = read_json(json_file) return render_template("talks.html", project_list=project_list, title="talks") @app.route("/contact", methods=['GET', 'POST']) def contact(): # although I am recreating this form object for every call # - it's state seems to persist... form = ContactForm() if request.method == 'POST': if form.validate() is False: flash("All fields are required", "flash") return render_template("contact.html", form=form) else: msg = Message(form.subject.data, sender='jimmy.shaddix2.0@gmail.com', recipients=['jimmy.shaddix2.0@gmail.com']) msg.body = """ From: {} <{}> {} """.format(form.name.data, form.email.data, form.message.data) mail.send(msg) return render_template('contact.html', success=True) elif request.method == 'GET': return render_template("contact.html", form=form, title="email") if __name__ == "__main__": app.run()
app.py
3,292
reads the json files, and formats the description that is associated with each of the json dictionaries that are read in. :param json_file: json file to parse from :param debug: if set to true, will print the json dictionaries as they are read in :return: list of all of the json dictionaries init application add secretkey, mail and debug configurations attaching mail to the flask app parsing json file read json file formats the description data which I stored in a json list json file to parse grouping portfolio into two's getting the last project json file to parse parsed json results although I am recreating this form object for every call - it's state seems to persist...
684
en
0.877038
import numpy as np from caffe2.python import core, workspace from caffe2.python.test_util import TestCase from caffe2.proto import caffe2_pb2 class TestPrependDim(TestCase): def _test_fwd_bwd(self): old_shape = (128, 2, 4) new_shape = (8, 16, 2, 4) X = np.random.rand(*old_shape).astype(np.float32) Y = np.random.rand(*new_shape).astype(np.float32) net = core.Net('net') net.GivenTensorFill([], 'X', shape=old_shape, values=X.flatten()) net.GivenTensorFill([], 'Y', shape=new_shape, values=Y.flatten()) net.PrependDim(['X'], ['X_out'], dim_size=8) net.DotProduct(['X_out', 'Y'], 'Z') net.AddGradientOperators(['Z']) workspace.RunNetOnce(net) X_out = workspace.FetchBlob('X_out') X_grad = workspace.FetchBlob('X_grad') Y_grad = workspace.FetchBlob('Y_grad') # Check the shape of the gradient np.testing.assert_array_equal(X_out.shape, Y.shape) np.testing.assert_array_equal(X_grad.shape, X.shape) np.testing.assert_array_equal(Y_grad.shape, Y.shape) def test_prepend_dim(self): devices = [core.DeviceOption(caffe2_pb2.CPU, 0)] if workspace.NumGpuDevices() > 0: devices.append(core.DeviceOption(workspace.GpuDeviceType, 0)) for device_opt in devices: with core.DeviceScope(device_opt): self._test_fwd_bwd() if __name__ == "__main__": import unittest unittest.main()
venv/Lib/site-packages/caffe2/python/operator_test/prepend_dim_test.py
1,556
Check the shape of the gradient
31
en
0.713439
# coding: utf-8 import attr from ..util.log import sanitize_dictionary @attr.s(slots=True) class BoxRequest: """Represents a Box API request. :param url: The URL being requested. :type url: `unicode` :param method: The HTTP method to use for the request. :type method: `unicode` or None :param headers: HTTP headers to include with the request. :type headers: `dict` or None :param auto_session_renewal: Whether or not the session can be automatically renewed if the request fails. :type auto_session_renewal: `bool` or None :param expect_json_response: Whether or not the API response must be JSON. :type expect_json_response: `bool` or None """ url = attr.ib() method = attr.ib(default='GET') headers = attr.ib(default=attr.Factory(dict)) auto_session_renewal = attr.ib(default=True) expect_json_response = attr.ib(default=True) def __repr__(self): return '<BoxRequest for {self.method} {self.url} with headers {headers}'.format( self=self, headers=sanitize_dictionary(self.headers), )
boxsdk/session/box_request.py
1,229
Represents a Box API request. :param url: The URL being requested. :type url: `unicode` :param method: The HTTP method to use for the request. :type method: `unicode` or None :param headers: HTTP headers to include with the request. :type headers: `dict` or None :param auto_session_renewal: Whether or not the session can be automatically renewed if the request fails. :type auto_session_renewal: `bool` or None :param expect_json_response: Whether or not the API response must be JSON. :type expect_json_response: `bool` or None coding: utf-8
670
en
0.643058
#!/usr/bin/env python """The setup script.""" from setuptools import setup, find_packages with open('README.rst') as readme_file: readme = readme_file.read() setup( author="Faris A Chugthai", author_email='farischugthai@gmail.com', description="Python Boilerplate contains all the boilerplate you need to create a Python package.", entry_points={ 'console_scripts': [ 'fatal_police_shootings=fatal_police_shootings.core:main', ], }, license="MIT license", include_package_data=True, keywords='fatal_police_shootings', name='fatal_police_shootings', packages=find_packages( include=[ 'fatal_police_shootings', 'fatal_police_shootings.*' ]), test_suite='tests', url='https://github.com/farisachugthai/fatal_police_shootings', version='0.1.0', zip_safe=False, )
setup.py
873
The setup script. !/usr/bin/env python
39
en
0.349468
import tensorflow as tf import numpy as np import os from tqdm import tqdm import argparse from utils.utils import create_tfr_files, prob_to_secondary_structure from utils.FastaMLtoSL import FastaMLtoSL import time start = time.time() from argparse import RawTextHelpFormatter parser = argparse.ArgumentParser() parser.add_argument('--inputs', default='sample_inputs/2zzm-B.fasta', type=str, help='Path to input file in fasta format, accept multiple sequences as well in fasta format; default = ''sample_inputs/single_seq.fasta''\n', metavar='') parser.add_argument('--outputs',default='outputs/', type=str, help='Path to output files; SPOT-RNA outputs at least three files .ct, .bpseq, and .prob files; default = ''outputs/\n', metavar='') parser.add_argument('--gpu', default=-1, type=int, help='To run on GPU, specifiy GPU number. If only one GPU in computer specifiy 0; default = -1 (no GPU)\n', metavar='') parser.add_argument('--plots',default=False, type=bool, help='Set this to "True" to get the 2D plots of predicted secondary structure by SPOT-RNA; default = False\n', metavar='') parser.add_argument('--motifs',default=False, type=bool, help='Set this to "True" to get the motifs of predicted secondary structure by SPOT-RNA; default = False\n', metavar='') parser.add_argument('--cpu',default=16, type=int, help='Specify number of cpu threads that SPOT-RNA can use; default = 16\n', metavar='') #parser.add_argument('--NC',default=True, type=bool, help='Set this to "False" to predict only canonical pairs; default = True\n', metavar='') args = parser.parse_args() os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) FastaMLtoSL(args.inputs) base_path = os.path.dirname(os.path.realpath(__file__)) input_file = os.path.basename(args.inputs) create_tfr_files(args.inputs, base_path, input_file) with open(args.inputs) as file: input_data = [line.strip() for line in file.read().splitlines() if line.strip()] count = int(len(input_data)/2) ids = [input_data[2*i].replace(">", "") for i in range(count)] sequences = {} for i,I in enumerate(ids): sequences[I] = input_data[2*i+1].replace(" ", "").upper().replace("T", "U") os.environ["CUDA_VISIBLE_DEVICES"]= str(args.gpu) #os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' NUM_MODELS = 5 test_loc = [os.path.join(base_path, 'input_tfr_files', input_file+'.tfrecords')] outputs = {} mask = {} def sigmoid(x): return 1/(1+np.exp(-np.array(x, dtype=np.float128))) for MODEL in range(NUM_MODELS): if args.gpu==-1: config = tf.ConfigProto(intra_op_parallelism_threads=args.cpu, inter_op_parallelism_threads=args.cpu) else: config = tf.compat.v1.ConfigProto() config.allow_soft_placement=True config.log_device_placement=False print('\nPredicting for SPOT-RNA model '+str(MODEL)) with tf.compat.v1.Session(config=config) as sess: saver = tf.compat.v1.train.import_meta_graph(os.path.join(base_path, 'SPOT-RNA-models', 'model' + str(MODEL) + '.meta')) saver.restore(sess,os.path.join(base_path, 'SPOT-RNA-models', 'model' + str(MODEL))) graph = tf.compat.v1.get_default_graph() init_test = graph.get_operation_by_name('make_initializer_2') tmp_out = graph.get_tensor_by_name('output_FC/fully_connected/BiasAdd:0') name_tensor = graph.get_tensor_by_name('tensors_2/component_0:0') RNA_name = graph.get_tensor_by_name('IteratorGetNext:0') label_mask = graph.get_tensor_by_name('IteratorGetNext:4') sess.run(init_test,feed_dict={name_tensor:test_loc}) pbar = tqdm(total = count) while True: try: out = sess.run([tmp_out,RNA_name,label_mask],feed_dict={'dropout:0':1}) out[1] = out[1].decode() mask[out[1]] = out[2] if MODEL == 0: outputs[out[1]] = [sigmoid(out[0])] else: outputs[out[1]].append(sigmoid(out[0])) #print('RNA name: %s'%(out[1])) pbar.update(1) except: break pbar.close() tf.compat.v1.reset_default_graph() RNA_ids = [i for i in list(outputs.keys())] ensemble_outputs = {} print('\nPost Processing and Saving Output') for i in RNA_ids: ensemble_outputs[i] = np.mean(outputs[i],0) prob_to_secondary_structure(ensemble_outputs[i], mask[i], sequences[i], i, args, base_path) print('\nFinished!') end = time.time() print('\nProcesssing Time {} seconds'.format(end - start))
SPOT-RNA.py
4,593
parser.add_argument('--NC',default=True, type=bool, help='Set this to "False" to predict only canonical pairs; default = True\n', metavar='')os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' print('RNA name: %s'%(out[1]))
212
en
0.208669
import os,sys import cytnx as cy class Hising(cy.LinOp): def __init__(self,L,J,Hx): cy.LinOp.__init__(self,"mv_elem",2**L,cy.Type.Double,cy.Device.cpu) ## custom members: self.J = J self.Hx = Hx self.L = L def SzSz(self,i,j,ipt_id): return ipt_id,(1. - 2.*(((ipt_id>>i)&0x1)^((ipt_id>>j)&0x1))) def Sx(self,i,ipt_id): out_id = ipt_id^((0x1)<<i) return out_id,1.0 ## let's overload this with custom operation: def pre_construct(self): for a in range(self.nx()): tmp = [[],[]] for i in range(self.L): oid,amp = self.SzSz(i,(i+1)%self.L,a) if not oid in tmp[0]: tmp[0].append(oid) tmp[1].append(amp*self.J) else: idx = tmp[0].index(oid) tmp[1][idx] += amp*self.J #self.set_elem(oid,a,amp*self.J) oid,amp = self.Sx(i,a) if not oid in tmp[0]: tmp[0].append(oid) tmp[1].append(amp*(-self.Hx)) else: idx = tmp[0].index(oid) tmp[1][idx]+=amp*(-self.Hx) for i in range(len(tmp[0])): self.set_elem(tmp[0][i],a,tmp[1][i]) #def matvec(self,v): # out = cy.zeros(v.shape()[0],v.dtype(),v.device()); # return out L = 4 J = 1 Hx = 0.3 H = Hising(L,J,Hx) H.pre_construct() v = cy.ones(16) print(cy.linalg.Lanczos_ER(H,3))
example/ED/ed_ising_mve.py
1,595
custom members: let's overload this with custom operation:self.set_elem(oid,a,amp*self.J) def matvec(self,v): out = cy.zeros(v.shape()[0],v.dtype(),v.device()); return out
177
en
0.376182
#!/usr/bin/env python """ Translator Class and builder """ from __future__ import print_function import codecs import os import math import torch from tensorboardX import SummaryWriter from others.utils import rouge_results_to_str, test_rouge, tile from translate.beam import GNMTGlobalScorer def build_predictor(args, tokenizer, symbols, model, logger=None): scorer = GNMTGlobalScorer(args.alpha,length_penalty='wu') translator = Translator(args, model, tokenizer, symbols, global_scorer=scorer, logger=logger) return translator class Translator(object): """ Uses a model to translate a batch of sentences. Args: model (:obj:`onmt.modules.NMTModel`): NMT model to use for translation fields (dict of Fields): data fields beam_size (int): size of beam to use n_best (int): number of translations produced max_length (int): maximum length output to produce global_scores (:obj:`GlobalScorer`): object to rescore final translations copy_attn (bool): use copy attention during translation cuda (bool): use cuda beam_trace (bool): trace beam search for debugging logger(logging.Logger): logger. """ def __init__(self, args, model, vocab, symbols, global_scorer=None, logger=None, dump_beam=""): self.logger = logger self.cuda = args.visible_gpus != '-1' self.args = args self.model = model self.generator = self.model.generator self.vocab = vocab self.symbols = symbols self.start_token = symbols['BOS'] self.end_token = symbols['EOS'] self.global_scorer = global_scorer self.beam_size = args.beam_size self.min_length = args.min_length self.max_length = args.max_length self.dump_beam = dump_beam # for debugging self.beam_trace = self.dump_beam != "" self.beam_accum = None tensorboard_log_dir = args.model_path self.tensorboard_writer = SummaryWriter(tensorboard_log_dir, comment="Unmt") if self.beam_trace: self.beam_accum = { "predicted_ids": [], "beam_parent_ids": [], "scores": [], "log_probs": []} def _build_target_tokens(self, pred): # vocab = self.fields["tgt"].vocab tokens = [] for tok in pred: tok = int(tok) tokens.append(tok) if tokens[-1] == self.end_token: tokens = tokens[:-1] break tokens = [t for t in tokens if t < len(self.vocab)] tokens = self.vocab.DecodeIds(tokens).split(' ') return tokens def from_batch(self, translation_batch): batch = translation_batch["batch"] assert (len(translation_batch["gold_score"]) == len(translation_batch["predictions"])) batch_size = batch.batch_size preds, pred_score, gold_score, tgt_str, src = translation_batch["predictions"],translation_batch["scores"],translation_batch["gold_score"],batch.tgt_str, batch.src translations = [] for b in range(batch_size): pred_sents = self.vocab.convert_ids_to_tokens([int(n) for n in preds[b][0]]) pred_sents = ' '.join(pred_sents).replace(' ##','') gold_sent = ' '.join(tgt_str[b].split()) # translation = Translation(fname[b],src[:, b] if src is not None else None, # src_raw, pred_sents, # attn[b], pred_score[b], gold_sent, # gold_score[b]) # src = self.spm.DecodeIds([int(t) for t in translation_batch['batch'].src[0][5] if int(t) != len(self.spm)]) raw_src = [self.vocab.ids_to_tokens[int(t)] for t in src[b]][:500] raw_src = ' '.join(raw_src) translation = (pred_sents, gold_sent, raw_src) # translation = (pred_sents[0], gold_sent) translations.append(translation) return translations def translate(self, data_iter, step, attn_debug=False): self.model.eval() gold_path = self.args.result_path + '.%d.gold' % step can_path = self.args.result_path + '.%d.candidate' % step self.gold_out_file = codecs.open(gold_path, 'w', 'utf-8') self.can_out_file = codecs.open(can_path, 'w', 'utf-8') # raw_gold_path = self.args.result_path + '.%d.raw_gold' % step # raw_can_path = self.args.result_path + '.%d.raw_candidate' % step self.gold_out_file = codecs.open(gold_path, 'w', 'utf-8') self.can_out_file = codecs.open(can_path, 'w', 'utf-8') raw_src_path = self.args.result_path + '.%d.raw_src' % step self.src_out_file = codecs.open(raw_src_path, 'w', 'utf-8') # pred_results, gold_results = [], [] ct = 0 with torch.no_grad(): for batch in data_iter: if(self.args.recall_eval): gold_tgt_len = batch.tgt.size(1) self.min_length = gold_tgt_len + 20 self.max_length = gold_tgt_len + 60 batch_data = self.translate_batch(batch) translations = self.from_batch(batch_data) for trans in translations: pred, gold, src = trans pred_str = pred.replace('[unused1]', '').replace('[unused4]', '').replace('[PAD]', '').replace('[unused2]', '').replace(r' +', ' ').replace(' [unused3] ', '<q>').replace('[unused3]', '').strip() gold_str = gold.strip() if(self.args.recall_eval): _pred_str = '' gap = 1e3 for sent in pred_str.split('<q>'): can_pred_str = _pred_str+ '<q>'+sent.strip() can_gap = math.fabs(len(_pred_str.split())-len(gold_str.split())) # if(can_gap>=gap): if(len(can_pred_str.split())>=len(gold_str.split())+10): pred_str = _pred_str break else: gap = can_gap _pred_str = can_pred_str # pred_str = ' '.join(pred_str.split()[:len(gold_str.split())]) # self.raw_can_out_file.write(' '.join(pred).strip() + '\n') # self.raw_gold_out_file.write(' '.join(gold).strip() + '\n') self.can_out_file.write(pred_str + '\n') self.gold_out_file.write(gold_str + '\n') self.src_out_file.write(src.strip() + '\n') ct += 1 self.can_out_file.flush() self.gold_out_file.flush() self.src_out_file.flush() self.can_out_file.close() self.gold_out_file.close() self.src_out_file.close() if (step != -1): rouges = self._report_rouge(gold_path, can_path) self.logger.info('Rouges at step %d \n%s' % (step, rouge_results_to_str(rouges))) if self.tensorboard_writer is not None: self.tensorboard_writer.add_scalar('test/rouge1-F', rouges['rouge_1_f_score'], step) self.tensorboard_writer.add_scalar('test/rouge2-F', rouges['rouge_2_f_score'], step) self.tensorboard_writer.add_scalar('test/rougeL-F', rouges['rouge_l_f_score'], step) def _report_rouge(self, gold_path, can_path): self.logger.info("Calculating Rouge") results_dict = test_rouge(self.args.temp_dir, can_path, gold_path) return results_dict def translate_batch(self, batch, fast=False): """ Translate a batch of sentences. Mostly a wrapper around :obj:`Beam`. Args: batch (:obj:`Batch`): a batch from a dataset object data (:obj:`Dataset`): the dataset object fast (bool): enables fast beam search (may not support all features) Todo: Shouldn't need the original dataset. """ with torch.no_grad(): return self._fast_translate_batch( batch, self.max_length, min_length=self.min_length) def _fast_translate_batch(self, batch, max_length, min_length=0): # TODO: faster code path for beam_size == 1. # TODO: support these blacklisted features. assert not self.dump_beam beam_size = self.beam_size batch_size = batch.batch_size src = batch.src segs = batch.segs mask_src = batch.mask_src src_features = self.model.bert(src, segs, mask_src) dec_states = self.model.decoder.init_decoder_state(src, src_features, with_cache=True) device = src_features.device # Tile states and memory beam_size times. dec_states.map_batch_fn( lambda state, dim: tile(state, beam_size, dim=dim)) src_features = tile(src_features, beam_size, dim=0) batch_offset = torch.arange( batch_size, dtype=torch.long, device=device) beam_offset = torch.arange( 0, batch_size * beam_size, step=beam_size, dtype=torch.long, device=device) alive_seq = torch.full( [batch_size * beam_size, 1], self.start_token, dtype=torch.long, device=device) # Give full probability to the first beam on the first step. topk_log_probs = ( torch.tensor([0.0] + [float("-inf")] * (beam_size - 1), device=device).repeat(batch_size)) # Structure that holds finished hypotheses. hypotheses = [[] for _ in range(batch_size)] # noqa: F812 results = {} results["predictions"] = [[] for _ in range(batch_size)] # noqa: F812 results["scores"] = [[] for _ in range(batch_size)] # noqa: F812 results["gold_score"] = [0] * batch_size results["batch"] = batch for step in range(max_length): decoder_input = alive_seq[:, -1].view(1, -1) # Decoder forward. decoder_input = decoder_input.transpose(0,1) dec_out, dec_states = self.model.decoder(decoder_input, src_features, dec_states, step=step) # Generator forward. log_probs = self.generator.forward(dec_out.transpose(0,1).squeeze(0)) vocab_size = log_probs.size(-1) if step < min_length: log_probs[:, self.end_token] = -1e20 # Multiply probs by the beam probability. log_probs += topk_log_probs.view(-1).unsqueeze(1) alpha = self.global_scorer.alpha length_penalty = ((5.0 + (step + 1)) / 6.0) ** alpha # Flatten probs into a list of possibilities. curr_scores = log_probs / length_penalty if(self.args.block_trigram): cur_len = alive_seq.size(1) if(cur_len>3): for i in range(alive_seq.size(0)): fail = False words = [int(w) for w in alive_seq[i]] words = [self.vocab.ids_to_tokens[w] for w in words] words = ' '.join(words).replace(' ##','').split() if(len(words)<=3): continue trigrams = [(words[i-1],words[i],words[i+1]) for i in range(1,len(words)-1)] trigram = tuple(trigrams[-1]) if trigram in trigrams[:-1]: fail = True if fail: curr_scores[i] = -10e20 curr_scores = curr_scores.reshape(-1, beam_size * vocab_size) topk_scores, topk_ids = curr_scores.topk(beam_size, dim=-1) # Recover log probs. topk_log_probs = topk_scores * length_penalty # Resolve beam origin and true word ids. topk_beam_index = topk_ids.div(vocab_size) topk_ids = topk_ids.fmod(vocab_size) # Map beam_index to batch_index in the flat representation. batch_index = ( topk_beam_index + beam_offset[:topk_beam_index.size(0)].unsqueeze(1)) select_indices = batch_index.view(-1) # Append last prediction. alive_seq = torch.cat( [alive_seq.index_select(0, select_indices), topk_ids.view(-1, 1)], -1) is_finished = topk_ids.eq(self.end_token) if step + 1 == max_length: is_finished.fill_(1) # End condition is top beam is finished. end_condition = is_finished[:, 0].eq(1) # Save finished hypotheses. if is_finished.any(): predictions = alive_seq.view(-1, beam_size, alive_seq.size(-1)) for i in range(is_finished.size(0)): b = batch_offset[i] if end_condition[i]: is_finished[i].fill_(1) finished_hyp = is_finished[i].nonzero().view(-1) # Store finished hypotheses for this batch. for j in finished_hyp: hypotheses[b].append(( topk_scores[i, j], predictions[i, j, 1:])) # If the batch reached the end, save the n_best hypotheses. if end_condition[i]: best_hyp = sorted( hypotheses[b], key=lambda x: x[0], reverse=True) score, pred = best_hyp[0] results["scores"][b].append(score) results["predictions"][b].append(pred) non_finished = end_condition.eq(0).nonzero().view(-1) # If all sentences are translated, no need to go further. if len(non_finished) == 0: break # Remove finished batches for the next step. topk_log_probs = topk_log_probs.index_select(0, non_finished) batch_index = batch_index.index_select(0, non_finished) batch_offset = batch_offset.index_select(0, non_finished) alive_seq = predictions.index_select(0, non_finished) \ .view(-1, alive_seq.size(-1)) # Reorder states. select_indices = batch_index.view(-1) src_features = src_features.index_select(0, select_indices) dec_states.map_batch_fn( lambda state, dim: state.index_select(dim, select_indices)) return results class Translation(object): """ Container for a translated sentence. Attributes: src (`LongTensor`): src word ids src_raw ([str]): raw src words pred_sents ([[str]]): words from the n-best translations pred_scores ([[float]]): log-probs of n-best translations attns ([`FloatTensor`]) : attention dist for each translation gold_sent ([str]): words from gold translation gold_score ([float]): log-prob of gold translation """ def __init__(self, fname, src, src_raw, pred_sents, attn, pred_scores, tgt_sent, gold_score): self.fname = fname self.src = src self.src_raw = src_raw self.pred_sents = pred_sents self.attns = attn self.pred_scores = pred_scores self.gold_sent = tgt_sent self.gold_score = gold_score def log(self, sent_number): """ Log translation. """ output = '\nSENT {}: {}\n'.format(sent_number, self.src_raw) best_pred = self.pred_sents[0] best_score = self.pred_scores[0] pred_sent = ' '.join(best_pred) output += 'PRED {}: {}\n'.format(sent_number, pred_sent) output += "PRED SCORE: {:.4f}\n".format(best_score) if self.gold_sent is not None: tgt_sent = ' '.join(self.gold_sent) output += 'GOLD {}: {}\n'.format(sent_number, tgt_sent) output += ("GOLD SCORE: {:.4f}\n".format(self.gold_score)) if len(self.pred_sents) > 1: output += '\nBEST HYP:\n' for score, sent in zip(self.pred_scores, self.pred_sents): output += "[{:.4f}] {}\n".format(score, sent) return output
src/models/predictor.py
17,006
Container for a translated sentence. Attributes: src (`LongTensor`): src word ids src_raw ([str]): raw src words pred_sents ([[str]]): words from the n-best translations pred_scores ([[float]]): log-probs of n-best translations attns ([`FloatTensor`]) : attention dist for each translation gold_sent ([str]): words from gold translation gold_score ([float]): log-prob of gold translation Uses a model to translate a batch of sentences. Args: model (:obj:`onmt.modules.NMTModel`): NMT model to use for translation fields (dict of Fields): data fields beam_size (int): size of beam to use n_best (int): number of translations produced max_length (int): maximum length output to produce global_scores (:obj:`GlobalScorer`): object to rescore final translations copy_attn (bool): use copy attention during translation cuda (bool): use cuda beam_trace (bool): trace beam search for debugging logger(logging.Logger): logger. Log translation. Translate a batch of sentences. Mostly a wrapper around :obj:`Beam`. Args: batch (:obj:`Batch`): a batch from a dataset object data (:obj:`Dataset`): the dataset object fast (bool): enables fast beam search (may not support all features) Todo: Shouldn't need the original dataset. Translator Class and builder !/usr/bin/env python for debugging vocab = self.fields["tgt"].vocab translation = Translation(fname[b],src[:, b] if src is not None else None, src_raw, pred_sents, attn[b], pred_score[b], gold_sent, gold_score[b]) src = self.spm.DecodeIds([int(t) for t in translation_batch['batch'].src[0][5] if int(t) != len(self.spm)]) translation = (pred_sents[0], gold_sent) raw_gold_path = self.args.result_path + '.%d.raw_gold' % step raw_can_path = self.args.result_path + '.%d.raw_candidate' % step pred_results, gold_results = [], [] if(can_gap>=gap): pred_str = ' '.join(pred_str.split()[:len(gold_str.split())]) self.raw_can_out_file.write(' '.join(pred).strip() + '\n') self.raw_gold_out_file.write(' '.join(gold).strip() + '\n') TODO: faster code path for beam_size == 1. TODO: support these blacklisted features. Tile states and memory beam_size times. Give full probability to the first beam on the first step. Structure that holds finished hypotheses. noqa: F812 noqa: F812 noqa: F812 Decoder forward. Generator forward. Multiply probs by the beam probability. Flatten probs into a list of possibilities. Recover log probs. Resolve beam origin and true word ids. Map beam_index to batch_index in the flat representation. Append last prediction. End condition is top beam is finished. Save finished hypotheses. Store finished hypotheses for this batch. If the batch reached the end, save the n_best hypotheses. If all sentences are translated, no need to go further. Remove finished batches for the next step. Reorder states.
2,938
en
0.671323
# -*- coding: utf-8 -*- __version__ = "3.0.0.dev0" try: __EMCEE3_SETUP__ except NameError: __EMCEE3_SETUP__ = False if not __EMCEE3_SETUP__: __all__ = [ "moves", "pools", "autocorr", "Model", "SimpleModel", "Sampler", "Ensemble", "State", ] from . import moves, pools, autocorr from .model import Model, SimpleModel from .ensemble import Ensemble from .samplers import Sampler from .state import State
emcee3/__init__.py
506
-*- coding: utf-8 -*-
21
en
0.767281
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorboard.uploader.exporter.""" from unittest import mock import numpy as np import pandas from tensorboard import test as tb_test from tensorboard.data.experimental import experiment_from_dev from tensorboard.uploader import test_util from tensorboard.uploader.proto import export_service_pb2 from tensorboard.util import grpc_util class ExperimentFromDevTest(tb_test.TestCase): def test_get_scalars_works(self): mock_api_client = mock.Mock() def stream_experiment_data(request, **kwargs): self.assertEqual(request.experiment_id, "789") self.assertEqual(kwargs["metadata"], grpc_util.version_metadata()) for run in ("train", "test"): for tag in ("accuracy", "loss"): response = export_service_pb2.StreamExperimentDataResponse() response.run_name = run response.tag_name = tag display_name = "%s:%s" % (request.experiment_id, tag) response.tag_metadata.CopyFrom( test_util.scalar_metadata(display_name) ) for step in range(10): response.points.steps.append(step) if tag == "loss": if run == "train": value = 1.0 / (step + 1) seconds = step else: value = -1.0 / (step + 1) seconds = 600 + step else: # "accuracy" if run == "train": value = 1.0 / (10 - step) seconds = step * 2 else: value = -1.0 / (10 - step) seconds = 600 + step * 2 response.points.values.append(value) response.points.wall_times.add(seconds=seconds, nanos=0) yield response mock_api_client.StreamExperimentData = mock.Mock( wraps=stream_experiment_data ) with mock.patch.object( experiment_from_dev, "get_api_client", lambda api_endpoint: mock_api_client, ): experiment = experiment_from_dev.ExperimentFromDev("789") for pivot in (False, True): for include_wall_time in (False, True): with self.subTest( "pivot=%s; include_wall_time=%s" % (pivot, include_wall_time) ): dataframe = experiment.get_scalars( pivot=pivot, include_wall_time=include_wall_time ) if pivot: run_key = ( ("run", "") if include_wall_time else "run" ) step_key = ( ("step", "") if include_wall_time else "step" ) accuracy_value_key = ( ("value", "accuracy") if include_wall_time else "accuracy" ) loss_value_key = ( ("value", "loss") if include_wall_time else "loss" ) data = { run_key: ["test"] * 10 + ["train"] * 10, step_key: np.concatenate( [np.arange(0, 10), np.arange(0, 10)] ), accuracy_value_key: np.concatenate( [ -1.0 / (10.0 - np.arange(0, 10)), 1.0 / (10.0 - np.arange(0, 10)), ], ), loss_value_key: np.concatenate( [ -1.0 / (1.0 + np.arange(0, 10)), 1.0 / (1.0 + np.arange(0, 10)), ], ), } if include_wall_time: data[ ("wall_time", "accuracy") ] = np.concatenate( [ 600.0 + 2.0 * np.arange(0, 10), 2.0 * np.arange(0, 10), ] ) data[("wall_time", "loss")] = np.concatenate( [ 600.0 + np.arange(0, 10), 1.0 * np.arange(0, 10), ] ) expected = pandas.DataFrame(data) else: # No pivot_table. data = { "run": ["train"] * 20 + ["test"] * 20, "tag": (["accuracy"] * 10 + ["loss"] * 10) * 2, "step": list(np.arange(0, 10)) * 4, "value": np.concatenate( [ 1.0 / (10.0 - np.arange(0, 10)), 1.0 / (1.0 + np.arange(0, 10)), -1.0 / (10.0 - np.arange(0, 10)), -1.0 / (1.0 + np.arange(0, 10)), ] ), } if include_wall_time: data["wall_time"] = np.concatenate( [ 2.0 * np.arange(0, 10), 1.0 * np.arange(0, 10), 600.0 + 2.0 * np.arange(0, 10), 600.0 + np.arange(0, 10), ] ) expected = pandas.DataFrame(data) pandas.testing.assert_frame_equal( dataframe, expected, check_names=True, ) def test_get_scalars_with_pivot_table_with_missing_value(self): mock_api_client = mock.Mock() def stream_experiment_data(request, **kwargs): self.assertEqual(request.experiment_id, "789") self.assertEqual(kwargs["metadata"], grpc_util.version_metadata()) response = export_service_pb2.StreamExperimentDataResponse() response.run_name = "train" response.tag_name = "batch_loss" response.points.steps.append(0) response.points.values.append(0.5) response.points.wall_times.add(seconds=0, nanos=0) response.points.steps.append(1) response.points.values.append(0.25) response.points.wall_times.add(seconds=1, nanos=0) yield response response = export_service_pb2.StreamExperimentDataResponse() response.run_name = "train" response.tag_name = "epoch_loss" response.points.steps.append(0) response.points.values.append(0.375) response.points.wall_times.add(seconds=2, nanos=0) yield response mock_api_client.StreamExperimentData = mock.Mock( wraps=stream_experiment_data ) with mock.patch.object( experiment_from_dev, "get_api_client", lambda api_endpoint: mock_api_client, ): experiment = experiment_from_dev.ExperimentFromDev("789") with self.assertRaisesRegexp( ValueError, r"contains missing value\(s\).*different sets of " r"steps.*pivot=False", ): experiment.get_scalars(pivot=True) def test_get_scalars_with_actual_inf_and_nan(self): """Test for get_scalars() call that involve inf and nan in user data.""" mock_api_client = mock.Mock() def stream_experiment_data(request, **kwargs): self.assertEqual(request.experiment_id, "789") self.assertEqual(kwargs["metadata"], grpc_util.version_metadata()) response = export_service_pb2.StreamExperimentDataResponse() response.run_name = "train" response.tag_name = "batch_loss" response.points.steps.append(0) response.points.values.append(np.nan) response.points.wall_times.add(seconds=0, nanos=0) response.points.steps.append(1) response.points.values.append(np.inf) response.points.wall_times.add(seconds=10, nanos=0) yield response mock_api_client.StreamExperimentData = mock.Mock( wraps=stream_experiment_data ) with mock.patch.object( experiment_from_dev, "get_api_client", lambda api_endpoint: mock_api_client, ): experiment = experiment_from_dev.ExperimentFromDev("789") dataframe = experiment.get_scalars(pivot=True) expected = pandas.DataFrame( { "run": ["train"] * 2, "step": [0, 1], "batch_loss": [np.nan, np.inf], } ) pandas.testing.assert_frame_equal(dataframe, expected, check_names=True) if __name__ == "__main__": tb_test.main()
tensorboard/data/experimental/experiment_from_dev_test.py
11,003
Test for get_scalars() call that involve inf and nan in user data. Tests for tensorboard.uploader.exporter. Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== "accuracy" No pivot_table.
797
en
0.806164
import asyncio import functools import importlib import inspect import logging from typing import Text, Dict, Optional, Any, List, Callable, Collection, Type from rasa.shared.exceptions import RasaException logger = logging.getLogger(__name__) def class_from_module_path( module_path: Text, lookup_path: Optional[Text] = None ) -> Type: """Given the module name and path of a class, tries to retrieve the class. The loaded class can be used to instantiate new objects. Args: module_path: either an absolute path to a Python class, or the name of the class in the local / global scope. lookup_path: a path where to load the class from, if it cannot be found in the local / global scope. Returns: a Python class Raises: ImportError, in case the Python class cannot be found. RasaException, in case the imported result is something other than a class """ klass = None if "." in module_path: module_name, _, class_name = module_path.rpartition(".") m = importlib.import_module(module_name) klass = getattr(m, class_name, None) elif lookup_path: # try to import the class from the lookup path m = importlib.import_module(lookup_path) klass = getattr(m, module_path, None) if klass is None: raise ImportError(f"Cannot retrieve class from path {module_path}.") if not inspect.isclass(klass): raise RasaException( f"`class_from_module_path()` is expected to return a class, " f"but for {module_path} we got a {type(klass)}." ) return klass def all_subclasses(cls: Any) -> List[Any]: """Returns all known (imported) subclasses of a class.""" classes = cls.__subclasses__() + [ g for s in cls.__subclasses__() for g in all_subclasses(s) ] return [subclass for subclass in classes if not inspect.isabstract(subclass)] def module_path_from_instance(inst: Any) -> Text: """Return the module path of an instance's class.""" return inst.__module__ + "." + inst.__class__.__name__ def sort_list_of_dicts_by_first_key(dicts: List[Dict]) -> List[Dict]: """Sorts a list of dictionaries by their first key.""" return sorted(dicts, key=lambda d: list(d.keys())[0]) def lazy_property(function: Callable) -> Any: """Allows to avoid recomputing a property over and over. The result gets stored in a local var. Computation of the property will happen once, on the first call of the property. All succeeding calls will use the value stored in the private property.""" attr_name = "_lazy_" + function.__name__ @property def _lazyprop(self: Any) -> Any: if not hasattr(self, attr_name): setattr(self, attr_name, function(self)) return getattr(self, attr_name) return _lazyprop def cached_method(f: Callable[..., Any]) -> Callable[..., Any]: """Caches method calls based on the call's `args` and `kwargs`. Works for `async` and `sync` methods. Don't apply this to functions. Args: f: The decorated method whose return value should be cached. Returns: The return value which the method gives for the first call with the given arguments. """ assert "self" in arguments_of(f), "This decorator can only be used with methods." class Cache: """Helper class to abstract the caching details.""" def __init__(self, caching_object: object, args: Any, kwargs: Any) -> None: self.caching_object = caching_object self.cache = getattr(caching_object, self._cache_name(), {}) # noinspection PyUnresolvedReferences self.cache_key = functools._make_key(args, kwargs, typed=False) def _cache_name(self) -> Text: return f"_cached_{self.caching_object.__class__.__name__}_{f.__name__}" def is_cached(self) -> bool: return self.cache_key in self.cache def cache_result(self, result: Any) -> None: self.cache[self.cache_key] = result setattr(self.caching_object, self._cache_name(), self.cache) def cached_result(self) -> Any: return self.cache[self.cache_key] if asyncio.iscoroutinefunction(f): @functools.wraps(f) async def decorated(self: object, *args: Any, **kwargs: Any) -> Any: cache = Cache(self, args, kwargs) if not cache.is_cached(): # Store the task immediately so that other concurrent calls of the # method can re-use the same task and don't schedule a second execution. to_cache = asyncio.ensure_future(f(self, *args, **kwargs)) cache.cache_result(to_cache) return await cache.cached_result() return decorated else: @functools.wraps(f) def decorated(self: object, *args: Any, **kwargs: Any) -> Any: cache = Cache(self, args, kwargs) if not cache.is_cached(): to_cache = f(self, *args, **kwargs) cache.cache_result(to_cache) return cache.cached_result() return decorated def transform_collection_to_sentence(collection: Collection[Text]) -> Text: """Transforms e.g. a list like ['A', 'B', 'C'] into a sentence 'A, B and C'.""" x = list(collection) if len(x) >= 2: return ", ".join(map(str, x[:-1])) + " and " + x[-1] return "".join(collection) def minimal_kwargs( kwargs: Dict[Text, Any], func: Callable, excluded_keys: Optional[List] = None ) -> Dict[Text, Any]: """Returns only the kwargs which are required by a function. Keys, contained in the exception list, are not included. Args: kwargs: All available kwargs. func: The function which should be called. excluded_keys: Keys to exclude from the result. Returns: Subset of kwargs which are accepted by `func`. """ excluded_keys = excluded_keys or [] possible_arguments = arguments_of(func) return { k: v for k, v in kwargs.items() if k in possible_arguments and k not in excluded_keys } def mark_as_experimental_feature(feature_name: Text) -> None: """Warns users that they are using an experimental feature.""" logger.warning( f"The {feature_name} is currently experimental and might change or be " "removed in the future 🔬 Please share your feedback on it in the " "forum (https://forum.rasa.com) to help us make this feature " "ready for production." ) def arguments_of(func: Callable) -> List[Text]: """Return the parameters of the function `func` as a list of names.""" import inspect return list(inspect.signature(func).parameters.keys())
rasa/shared/utils/common.py
6,859
Helper class to abstract the caching details. Returns all known (imported) subclasses of a class. Return the parameters of the function `func` as a list of names. Caches method calls based on the call's `args` and `kwargs`. Works for `async` and `sync` methods. Don't apply this to functions. Args: f: The decorated method whose return value should be cached. Returns: The return value which the method gives for the first call with the given arguments. Given the module name and path of a class, tries to retrieve the class. The loaded class can be used to instantiate new objects. Args: module_path: either an absolute path to a Python class, or the name of the class in the local / global scope. lookup_path: a path where to load the class from, if it cannot be found in the local / global scope. Returns: a Python class Raises: ImportError, in case the Python class cannot be found. RasaException, in case the imported result is something other than a class Allows to avoid recomputing a property over and over. The result gets stored in a local var. Computation of the property will happen once, on the first call of the property. All succeeding calls will use the value stored in the private property. Warns users that they are using an experimental feature. Returns only the kwargs which are required by a function. Keys, contained in the exception list, are not included. Args: kwargs: All available kwargs. func: The function which should be called. excluded_keys: Keys to exclude from the result. Returns: Subset of kwargs which are accepted by `func`. Return the module path of an instance's class. Sorts a list of dictionaries by their first key. Transforms e.g. a list like ['A', 'B', 'C'] into a sentence 'A, B and C'. try to import the class from the lookup path noinspection PyUnresolvedReferences Store the task immediately so that other concurrent calls of the method can re-use the same task and don't schedule a second execution.
2,043
en
0.856942
# Generated by Django 3.1 on 2020-08-08 05:58 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Battle', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('time_created', models.DateTimeField(auto_now_add=True)), ('archived', models.BooleanField(default=False)), ('blue_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='battles_as_blue', to=settings.AUTH_USER_MODEL)), ('red_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='battles_as_red', to=settings.AUTH_USER_MODEL)), ], ), ]
prickly-pufferfish/arena/battle/migrations/0001_initial.py
1,019
Generated by Django 3.1 on 2020-08-08 05:58
43
en
0.687921
from django.contrib import admin from leaflet.admin import LeafletGeoAdmin from .models import ProblemLabel, ProblemStatus # Register your models here. admin.site.register(ProblemLabel, LeafletGeoAdmin) admin.site.register(ProblemStatus)
app/problem_register/admin.py
242
Register your models here.
26
en
0.957485
# -------------------------------------------------------- # Fast R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Ross Girshick # -------------------------------------------------------- """Transform a roidb into a trainable roidb by adding a bunch of metadata.""" import numpy as np from lib.fast_rcnn.config import cfg import lib.utils.cython_bbox def prepare_roidb(imdb): """Enrich the imdb's roidb by adding some derived quantities that are useful for training. This function precomputes the maximum overlap, taken over ground-truth boxes, between each ROI and each ground-truth box. The class with maximum overlap is also recorded. """ roidb = imdb.roidb for i in xrange(len(imdb.image_index)): roidb[i]['image'] = imdb.image_path_at(i) # need gt_overlaps as a dense array for argmax gt_overlaps = roidb[i]['gt_overlaps'].toarray() # max overlap with gt over classes (columns) max_overlaps = gt_overlaps.max(axis=1) # gt class that had the max overlap max_classes = gt_overlaps.argmax(axis=1) roidb[i]['max_classes'] = max_classes roidb[i]['max_overlaps'] = max_overlaps # sanity checks # max overlap of 0 => class should be zero (background) zero_inds = np.where(max_overlaps == 0)[0] assert all(max_classes[zero_inds] == 0) # max overlap > 0 => class should not be zero (must be a fg class) nonzero_inds = np.where(max_overlaps > 0)[0] assert all(max_classes[nonzero_inds] != 0) def add_bbox_regression_targets(roidb): """Add information needed to train bounding-box regressors.""" assert len(roidb) > 0 assert 'max_classes' in roidb[0], 'Did you call prepare_roidb first?' num_images = len(roidb) # Infer number of classes from the number of columns in gt_overlaps num_classes = roidb[0]['gt_overlaps'].shape[1] for im_i in xrange(num_images): rois = roidb[im_i]['boxes'] max_overlaps = roidb[im_i]['max_overlaps'] max_classes = roidb[im_i]['max_classes'] roidb[im_i]['bbox_targets'] = \ _compute_targets(rois, max_overlaps, max_classes) # Compute values needed for means and stds # var(x) = E(x^2) - E(x)^2 class_counts = np.zeros((num_classes, 1)) + cfg.EPS sums = np.zeros((num_classes, 4)) squared_sums = np.zeros((num_classes, 4)) for im_i in xrange(num_images): targets = roidb[im_i]['bbox_targets'] for cls in xrange(1, num_classes): cls_inds = np.where(targets[:, 0] == cls)[0] if cls_inds.size > 0: class_counts[cls] += cls_inds.size sums[cls, :] += targets[cls_inds, 1:].sum(axis=0) squared_sums[cls, :] += (targets[cls_inds, 1:] ** 2).sum(axis=0) means = sums / class_counts stds = np.sqrt(squared_sums / class_counts - means ** 2) # Normalize targets for im_i in xrange(num_images): targets = roidb[im_i]['bbox_targets'] for cls in xrange(1, num_classes): cls_inds = np.where(targets[:, 0] == cls)[0] roidb[im_i]['bbox_targets'][cls_inds, 1:] -= means[cls, :] roidb[im_i]['bbox_targets'][cls_inds, 1:] /= stds[cls, :] # These values will be needed for making predictions # (the predicts will need to be unnormalized and uncentered) return means.ravel(), stds.ravel() def _compute_targets(rois, overlaps, labels): """Compute bounding-box regression targets for an image.""" # Ensure ROIs are floats rois = rois.astype(np.float, copy=False) # Indices of ground-truth ROIs gt_inds = np.where(overlaps == 1)[0] # Indices of examples for which we try to make predictions ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0] # Get IoU overlap between each ex ROI and gt ROI ex_gt_overlaps = utils.cython_bbox.bbox_overlaps(rois[ex_inds, :], rois[gt_inds, :]) # Find which gt ROI each ex ROI has max overlap with: # this will be the ex ROI's gt target gt_assignment = ex_gt_overlaps.argmax(axis=1) gt_rois = rois[gt_inds[gt_assignment], :] ex_rois = rois[ex_inds, :] ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + cfg.EPS ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + cfg.EPS ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + cfg.EPS gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + cfg.EPS gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights targets_dw = np.log(gt_widths / ex_widths) targets_dh = np.log(gt_heights / ex_heights) targets = np.zeros((rois.shape[0], 5), dtype=np.float32) targets[ex_inds, 0] = labels[ex_inds] targets[ex_inds, 1] = targets_dx targets[ex_inds, 2] = targets_dy targets[ex_inds, 3] = targets_dw targets[ex_inds, 4] = targets_dh return targets
lib/roi_data_layer/roidb.py
5,184
Compute bounding-box regression targets for an image. Add information needed to train bounding-box regressors. Enrich the imdb's roidb by adding some derived quantities that are useful for training. This function precomputes the maximum overlap, taken over ground-truth boxes, between each ROI and each ground-truth box. The class with maximum overlap is also recorded. Transform a roidb into a trainable roidb by adding a bunch of metadata. -------------------------------------------------------- Fast R-CNN Copyright (c) 2015 Microsoft Licensed under The MIT License [see LICENSE for details] Written by Ross Girshick -------------------------------------------------------- need gt_overlaps as a dense array for argmax max overlap with gt over classes (columns) gt class that had the max overlap sanity checks max overlap of 0 => class should be zero (background) max overlap > 0 => class should not be zero (must be a fg class) Infer number of classes from the number of columns in gt_overlaps Compute values needed for means and stds var(x) = E(x^2) - E(x)^2 Normalize targets These values will be needed for making predictions (the predicts will need to be unnormalized and uncentered) Ensure ROIs are floats Indices of ground-truth ROIs Indices of examples for which we try to make predictions Get IoU overlap between each ex ROI and gt ROI Find which gt ROI each ex ROI has max overlap with: this will be the ex ROI's gt target
1,438
en
0.91677
"""This module contains the general information for AdaptorFruCapRef ManagedObject.""" import sys, os from ...ucsmo import ManagedObject from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta from ...ucsmeta import VersionMeta class AdaptorFruCapRefConsts(): IS_SUPPORTED_NO = "no" IS_SUPPORTED_YES = "yes" class AdaptorFruCapRef(ManagedObject): """This is AdaptorFruCapRef class.""" consts = AdaptorFruCapRefConsts() naming_props = set([u'vendor', u'model', u'revision']) mo_meta = MoMeta("AdaptorFruCapRef", "adaptorFruCapRef", "manufacturer-[vendor]-model-[model]-revision-[revision]", VersionMeta.Version141i, "InputOutput", 0xff, [], [""], [u'equipmentBladeCapProvider', u'equipmentRackUnitCapProvider'], [], ["Get"]) prop_meta = { "child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version141i, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []), "dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []), "is_supported": MoPropertyMeta("is_supported", "isSupported", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, None, ["no", "yes"], []), "model": MoPropertyMeta("model", "model", "string", VersionMeta.Version141i, MoPropertyMeta.NAMING, 0x8, 1, 510, None, [], []), "revision": MoPropertyMeta("revision", "revision", "string", VersionMeta.Version141i, MoPropertyMeta.NAMING, 0x10, 1, 510, None, [], []), "rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, 0x20, 0, 256, None, [], []), "sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []), "status": MoPropertyMeta("status", "status", "string", VersionMeta.Version141i, MoPropertyMeta.READ_WRITE, 0x40, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []), "vendor": MoPropertyMeta("vendor", "vendor", "string", VersionMeta.Version141i, MoPropertyMeta.NAMING, 0x80, 1, 510, None, [], []), } prop_map = { "childAction": "child_action", "dn": "dn", "isSupported": "is_supported", "model": "model", "revision": "revision", "rn": "rn", "sacl": "sacl", "status": "status", "vendor": "vendor", } def __init__(self, parent_mo_or_dn, vendor, model, revision, **kwargs): self._dirty_mask = 0 self.vendor = vendor self.model = model self.revision = revision self.child_action = None self.is_supported = None self.sacl = None self.status = None ManagedObject.__init__(self, "AdaptorFruCapRef", parent_mo_or_dn, **kwargs)
ucsmsdk/mometa/adaptor/AdaptorFruCapRef.py
3,040
This is AdaptorFruCapRef class. This module contains the general information for AdaptorFruCapRef ManagedObject.
112
en
0.635195
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # This example provides an end-to-end pipeline for a common Kaggle competition. # The entire pipeline includes common utilities such as k-fold cross validation # and data pre-processing. # # Specifically, the example studies the `House Prices: Advanced Regression # Techniques` challenge as a case study. # # The link to the problem on Kaggle: # https://www.kaggle.com/c/house-prices-advanced-regression-techniques import numpy as np import pandas as pd from mxnet import autograd from mxnet import gluon from mxnet import ndarray as nd # After logging in www.kaggle.com, the training and testing data sets can be downloaded at: # https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/train.csv # https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/test.csv train = pd.read_csv("train.csv") test = pd.read_csv("test.csv") all_X = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) # Get all the numerical features and apply standardization. numeric_feas = all_X.dtypes[all_X.dtypes != "object"].index all_X[numeric_feas] = all_X[numeric_feas].apply(lambda x: (x - x.mean()) / (x.std())) # Convert categorical feature values to numerical (including N/A). all_X = pd.get_dummies(all_X, dummy_na=True) # Approximate N/A feature value by the mean value of the current feature. all_X = all_X.fillna(all_X.mean()) num_train = train.shape[0] # Convert data formats to NDArrays to feed into gluon. X_train = all_X[:num_train].as_matrix() X_test = all_X[num_train:].as_matrix() y_train = train.SalePrice.as_matrix() X_train = nd.array(X_train) y_train = nd.array(y_train) y_train.reshape((num_train, 1)) X_test = nd.array(X_test) square_loss = gluon.loss.L2Loss() def get_rmse_log(net, X_train, y_train): """Gets root mse between the logarithms of the prediction and the truth.""" num_train = X_train.shape[0] clipped_preds = nd.clip(net(X_train), 1, float('inf')) return np.sqrt(2 * nd.sum(square_loss( nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train) def get_net(): """Gets a neural network. Better results are obtained with modifications.""" net = gluon.nn.Sequential() with net.name_scope(): net.add(gluon.nn.Dense(50, activation="relu")) net.add(gluon.nn.Dense(1)) net.initialize() return net def train(net, X_train, y_train, epochs, verbose_epoch, learning_rate, weight_decay, batch_size): """Trains the model.""" dataset_train = gluon.data.ArrayDataset(X_train, y_train) data_iter_train = gluon.data.DataLoader(dataset_train, batch_size, shuffle=True) trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': learning_rate, 'wd': weight_decay}) net.collect_params().initialize(force_reinit=True) for epoch in range(epochs): for data, label in data_iter_train: with autograd.record(): output = net(data) loss = square_loss(output, label) loss.backward() trainer.step(batch_size) avg_loss = get_rmse_log(net, X_train, y_train) if epoch > verbose_epoch: print("Epoch %d, train loss: %f" % (epoch, avg_loss)) return avg_loss def k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train, learning_rate, weight_decay, batch_size): """Conducts k-fold cross validation for the model.""" assert k > 1 fold_size = X_train.shape[0] // k train_loss_sum = 0.0 test_loss_sum = 0.0 for test_idx in range(k): X_val_test = X_train[test_idx * fold_size: (test_idx + 1) * fold_size, :] y_val_test = y_train[test_idx * fold_size: (test_idx + 1) * fold_size] val_train_defined = False for i in range(k): if i != test_idx: X_cur_fold = X_train[i * fold_size: (i + 1) * fold_size, :] y_cur_fold = y_train[i * fold_size: (i + 1) * fold_size] if not val_train_defined: X_val_train = X_cur_fold y_val_train = y_cur_fold val_train_defined = True else: X_val_train = nd.concat(X_val_train, X_cur_fold, dim=0) y_val_train = nd.concat(y_val_train, y_cur_fold, dim=0) net = get_net() train_loss = train(net, X_val_train, y_val_train, epochs, verbose_epoch, learning_rate, weight_decay, batch_size) train_loss_sum += train_loss test_loss = get_rmse_log(net, X_val_test, y_val_test) print("Test loss: %f" % test_loss) test_loss_sum += test_loss return train_loss_sum / k, test_loss_sum / k # The sets of parameters. Better results are obtained with modifications. # These parameters can be fine-tuned with k-fold cross-validation. k = 5 epochs = 100 verbose_epoch = 95 learning_rate = 0.3 weight_decay = 100 batch_size = 100 train_loss, test_loss = \ k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train, learning_rate, weight_decay, batch_size) print("%d-fold validation: Avg train loss: %f, Avg test loss: %f" % (k, train_loss, test_loss)) def learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate, weight_decay, batch_size): """Trains the model and predicts on the test data set.""" net = get_net() _ = train(net, X_train, y_train, epochs, verbose_epoch, learning_rate, weight_decay, batch_size) preds = net(X_test).asnumpy() test['SalePrice'] = pd.Series(preds.reshape(1, -1)[0]) submission = pd.concat([test['Id'], test['SalePrice']], axis=1) submission.to_csv('submission.csv', index=False) learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate, weight_decay, batch_size)
example/gluon/kaggle_k_fold_cross_validation.py
6,871
Gets a neural network. Better results are obtained with modifications. Gets root mse between the logarithms of the prediction and the truth. Conducts k-fold cross validation for the model. Trains the model and predicts on the test data set. Trains the model. Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. This example provides an end-to-end pipeline for a common Kaggle competition. The entire pipeline includes common utilities such as k-fold cross validation and data pre-processing. Specifically, the example studies the `House Prices: Advanced Regression Techniques` challenge as a case study. The link to the problem on Kaggle: https://www.kaggle.com/c/house-prices-advanced-regression-techniques After logging in www.kaggle.com, the training and testing data sets can be downloaded at: https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/train.csv https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/test.csv Get all the numerical features and apply standardization. Convert categorical feature values to numerical (including N/A). Approximate N/A feature value by the mean value of the current feature. Convert data formats to NDArrays to feed into gluon. The sets of parameters. Better results are obtained with modifications. These parameters can be fine-tuned with k-fold cross-validation.
2,060
en
0.857024
#!/usr/bin/env python3 class Solution: def removeDuplicates(self, nums): i, ret = 0, 0 for j, n in enumerate(nums): if nums[i] == n and j-i < 2: ret += 1 elif nums[i] != n: i = j ret += 1 return ret sol = Solution() nums = [0,0,1,1,1,1,2,3,3] nums = [1,1,1,2,2,3] nums = [] nums = [1] nums = [1,1] print(sol.removeDuplicates(nums))
interview/leet/80_Remove_Duplicates_from_Sorted_Array_II_v2.py
434
!/usr/bin/env python3
21
fr
0.448822
#!/usr/bin/env python3 from string import ascii_uppercase from re import fullmatch from time import sleep from random import Random # Default game presets. testing_preset = {'height': 10, 'width': 10, '5_ships': 0, '4_ships': 0, '3_ships': 0, '2_ships': 2, '1_ships': 0, 'allow_mines': True, 'allow_moves': True, 'mine_turns': 5, 'p_type': 'Player', 'player_timer': 0} normal_mode_preset = {'height': 10, 'width': 10, '5_ships': 1, '4_ships': 1, '3_ships': 2, '2_ships': 1, '1_ships': 0, 'allow_mines': False, 'allow_moves': False, 'mine_turns': None, 'p_type': 'CPU', 'player_timer': 5} advanced_mode_preset = {'height': 15, 'width': 15, '5_ships': 2, '4_ships': 2, '3_ships': 2, '2_ships': 1, '1_ships': 0, 'allow_mines': True, 'allow_moves': True, 'mine_turns': 5, 'p_type': 'CPU', 'player_timer': 5} # Miscellaneous global values. letters = ascii_uppercase # Global user-variables. PAD_AMOUNT = 50 class Utils(object): """ Utility class used for getting input and other common functions. Contains many functions to save space by condensing input and custom string formatting methods into one place. All methods are static, and do not modify parameters in-place. """ @staticmethod def box_string(string, min_width=-1, print_string=False): """ Place a string into an ASCII box. The result is placed inside of a ASCII box consisting of '+' characters for the corners and '-' characters for the edges. Parameters ---------- string : str String to be boxed. min_width : int, optional Specifies that the box be of a certain minimum width. Defaults to input string width. print_string : bool, optional If True, prints the string after building it. Defaults to False. Returns ------- str Input string with a box around it. """ # Parameters. split_string = string.split('\n') height = len(split_string) length = max(min_width, *[len(x) for x in split_string]) # String builder. result = '+' + '-' * (length + 2) + '+\n' for i in range(height): result += '| %s |\n' % split_string[i].center(length) result += '+' + '-' * (length + 2) + '+' # Print and return result. if print_string: print(result) return result @staticmethod def num_input(question, *choices): """ Take user input based on several different options. The input question will be repeated until valid input is given. The choices will be displayed in order with a number next to them indicating their id. Responses can be given as the choice id or the full choice name. Parameters ---------- question : str String to be displayed as the input question. Will be boxed with Utils#box_string before printing. *choices : *str Options for the user to choose from. Returns ------- int Number of the answer choice, corresponding to the index of the choice in *choices. """ error = '' while True: # Print question and ask for input. Utils.box_string((error + '\n' + question).strip(), print_string=True) for i in range(len(choices)): print('%d: %s' % (i, choices[i])) response = input('Response: ') # Test whether input is an integer or string. if fullmatch(r'\d+', response.strip()): to_int = int(response.strip()) # Determine if input integer corresponds to one of the answer choices. if to_int < len(choices): return to_int else: error = 'ERROR: Invalid input! Input integer is not one of the available choices! Please try again.' continue else: # Determine if input string is one of the answer choices. for i in range(len(choices)): if response.strip().lower() == choices[i].strip().lower(): return i error = 'ERROR: Invalid input! Input string is not one of the available choices! Please try again.' continue @staticmethod def string_input(question, condition=r'.+'): """ Take string-based user input. The input question will be repeated until valid input is given, determined by the condition regex. Parameters ---------- question : str String to be displayed as the input question. Will be boxed with Utils#box_string before printing. condition : r-string, optional Regex to test input string off of. Returns ------- str Input string. """ error = '' while True: # Print question and ask for input. Utils.box_string((error + '\n' + question).strip(), print_string=True) response = input() # Test if input is valid. if fullmatch(condition, response): return response else: error = 'ERROR: Invalid input! Please try again.' continue @staticmethod def print_settings(settings): """ Pretty-print a settings dictionary. Parameters ---------- settings : dict The settings dictionary to pretty-print. Returns ------- None """ Utils.box_string('Current Settings', print_string=True) print('Grid Size:') print('\tWidth: %d' % settings['width']) print('\tHeight: %d' % settings['height']) print('Ship Amount:') print('\t5-Long Ships: %d' % settings['5_ships']) print('\t4-Long Ships: %d' % settings['4_ships']) print('\t3-Long Ships: %d' % settings['3_ships']) print('\t2-Long Ships: %d' % settings['2_ships']) print('\t1-Long Ships: %d' % settings['1_ships']) print('Special Abilities:') print('\tShip Moving: %s' % str(settings['allow_moves'])) print('\tMines: %s' % str(settings['allow_mines'])) if settings['allow_mines']: print('\tTurns Between Mines: %d' % settings['mine_turns']) print('Game Type: Player vs. %s' % settings['p_type']) @staticmethod def grid_pos_input(height, width, question='Enter a Position:'): """ Take user-input in coordinate form. The input question will be repeated until valid input is given. The input must be a valid coordinate in battleship form (r'[A-Z]\d+'). The input coordinate must be inside of the grid defined by height and width. Parameters ---------- height : int Specifies the height of the grid. width : int Specifies the width of the grid. question : str, optional String to be displayed as the input question. Will be boxed with Utils#box_string before printing. Defaults to 'Enter a Position'. Returns ------- tuple Contains the following: int Height-aligned position (y-position) of input. int Width-aligned position (x-position) of input. """ error = '' while True: # Print the question and ask for input. Utils.box_string((error + '\n' + question).strip(), print_string=True) loc = input().upper() # Test if input is a valid coordinate and is in the grid. if not fullmatch(r'[A-Z][1-2]?[0-9]', loc): error = 'ERROR: Invalid input! Input string is not a valid coordinate! Please try again.' continue elif loc[0] in letters[:height] and 0 < int(loc[1:]) <= width: return letters.index(loc[0]), int(loc[1:]) - 1 else: error = 'ERROR: Invalid input! Input string is not in the grid! Please try again.' continue class BattleshipGame(object): """ Class that handles game execution and running. Controls game setup based off of a certain settings preset. Handles all input and output for the game. Attributes ---------- settings : dict Settings that the game is running based off of. height : int Height of the grids used for the game. width : int Width of the grids used for the game. p1_grid : list Two dimensional list of ints containing player 1's board. p1_grid_2 : list Two dimensional list of ints containing player 1's guesses. p1_ships : list List of player 1's ship dicts with position, direction, and size data. p2_grid : list Two dimensional list of ints containing player 2's board. p2_grid_2 : list Two dimensional list of ints containing player 2's guesses. p2_ships : list List of player 2's ship dicts with position, direction, and size data. p2_cpu : bool True if player 2 is not a human player, False otherwise. turn : int Current turn number. p1_mines : int Current amount of mines available to Player 1. p2_mines : int Current amount of mines available to Player 2. p1_move : str Return message to display to Player 2 on their turn. p2_move : str Return message to display to Player 1 on their turn. """ def __init__(self, settings): """ Constructor for the BattleshipGame class. Parameters ---------- settings : dict Settings to create the game based off of. """ # Grid attributes. self.settings = settings self.height = settings['height'] self.width = settings['width'] # Player 1 grids. self.p1_grid = [[0 for _ in range(self.width)] for _ in range(self.height)] self.p1_grid_2 = [[0 for _ in range(self.width)] for _ in range(self.height)] self.p1_ships = [] # Player 2 grids. self.p2_grid = [[0 for _ in range(self.width)] for _ in range(self.height)] self.p2_grid_2 = [[0 for _ in range(self.width)] for _ in range(self.height)] self.p2_ships = [] # Miscellaneous attributes. self.p2_cpu = settings['p_type'] == 'CPU' self.turn = 0 self.p1_mines = 0 self.p2_mines = 0 self.p1_move = '' self.p2_move = '' # CPU attributes. self.cpu_data = {'shots': [], 'misses': [], 'hits': [], 'p1_ships': None} def update_board(self, player): """ Update both grids for a player. Adds new ships and puts them into the right locations. Parameters ---------- player : int Determines which player's grids to print. Zero-indexed. """ # Place ships into grid, if not already. if player == 0: # Player 1 board = self.p1_grid for ship in self.p1_ships: if not ship['setup']: if ship['direction'] == 0: for i in range(ship['size']): if not (1 <= board[ship['y_pos']][ship['x_pos'] + i] <= 26 or board[ship['y_pos']][ship['x_pos'] + i] == 26): board[ship['y_pos']][ship['x_pos'] + i] = ship['num'] + 1 else: for j in range(ship['size']): if not (1 <= board[ship['y_pos'] + j][ship['x_pos']] <= 26 or board[ship['y_pos'] + j][ship['x_pos']] == 26): board[ship['y_pos'] + j][ship['x_pos']] = ship['num'] + 1 ship['setup'] = True else: # Player 2 board = self.p2_grid for ship in self.p2_ships: if not ship['setup']: if ship['direction'] == 0: for i in range(ship['size']): if not (1 <= board[ship['y_pos']][ship['x_pos'] + i] <= 26 or board[ship['y_pos']][ship['x_pos'] + i] == 26): board[ship['y_pos']][ship['x_pos'] + i] = ship['num'] + 1 else: for j in range(ship['size']): if not (1 <= board[ship['y_pos'] + j][ship['x_pos']] <= 26 or board[ship['y_pos'] + j][ship['x_pos']] == 26): board[ship['y_pos'] + j][ship['x_pos']] = ship['num'] + 1 ship['setup'] = True def print_board(self, player): """ Pretty-print the current boards of a player. Prints both boards for a player, along with coordinate references, titles, and boxes around the grids. Parameters ---------- player : int Determines which player's grids to print. Zero-indexed. Returns ------- str Same as the string that is printed. """ # Characters to use while printing. characters = '.' + letters + '*0#' # 0:Null, 1-26:Ships, 27:Hit, 28:Miss, 29:Mine # Update board. self.update_board(player) # Get boards to print. if player == 0: # Player 1 board = self.p1_grid board_2 = self.p1_grid_2 else: # Player 2 board = self.p2_grid board_2 = self.p2_grid_2 # Build header. result = ' +' + '-' * (self.width * 2 + 1) + '+' + '-' * (self.width * 2 + 1) + '+\n' result += ' |' + 'Your Board'.center(self.width * 2 + 1) + '|' + 'Enemy Board'.center(self.width * 2 + 1) + '|\n' result += ' +' + '-' * (self.width * 2 + 1) + '+' + '-' * (self.width * 2 + 1) + '+\n' # Build x-coordinate reference. if self.width > 9: result += ' | ' + ' '.join([str(x + 1).rjust(2)[0] for x in range(self.width)]) + ' | ' + ' '.join([str(x + 1).rjust(2)[0] for x in range(self.width)]) + ' |\n' result += ' | ' + ' '.join([str(x + 1).rjust(2)[1] for x in range(self.width)]) + ' | ' + ' '.join([str(x + 1).rjust(2)[1] for x in range(self.width)]) + ' |\n' result += '+---+' + '-' * (self.width * 2 + 1) + '+' + '-' * (self.width * 2 + 1) + '+\n' # Build y-coordinate reference and grid. for i in range(self.height): result += '| ' + letters[i] + ' | ' + ' '.join([characters[x] for x in board[i]]) + ' | ' + ' '.join([characters[x] for x in board_2[i]]) + ' |\n' result += '+---+' + '-' * (self.width * 2 + 1) + '+' + '-' * (self.width * 2 + 1) + '+' # Print and return result. print(result) return result def setup_ship(self, pos, direction, player, count, size): """ Create a ship. Creates a ship dictionary based on positional, directional, player, and size data and tests if placement is legal. Parameters ---------- pos : tuple (y,x) coordinate pair of top-left corner of the ship. direction : int Determines the direction of the ship: 0: Horizontal. 1: Vertical. player : int Determines which player to assign the ship to. Zero-indexed. count : int Current ship count for internal tracking use. size : int Length of the ship. Returns ------- str Error string if an error occurred, None otherwise. """ try: # Test if the ship does not overlap another ship. if player == 0: # Player 1 board = self.p1_grid if direction == 0: for i in range(size): if board[pos[0]][pos[1] + i] != 0: return 'ERROR: You cannot place a ship on top of another!' else: for j in range(size): if board[pos[0] + j][pos[1]] != 0: return 'ERROR: You cannot place a ship on top of another!' else: # Player 2 board = self.p2_grid if direction == 0: for i in range(size): if board[pos[0]][pos[1] + i] != 0: return 'ERROR: You cannot place a ship on top of another!' else: for j in range(size): if board[pos[0] + j][pos[1]] != 0: return 'ERROR: You cannot place a ship on top of another!' except IndexError: # Catch if ship would be placed out-of-bounds. return 'ERROR: You must place a ship inside the grid boundaries!' # Create the ship's dictionary and append it to the player's ship list. if player == 0: self.p1_ships.append({'num': count, 'size': size, 'x_pos': pos[1], 'y_pos': pos[0], 'direction': direction, 'setup': False, 'health': size, 'hits': []}) else: self.p2_ships.append({'num': count, 'size': size, 'x_pos': pos[1], 'y_pos': pos[0], 'direction': direction, 'setup': False, 'health': size, 'hits': []}) return None def setup_ships(self, size, player, count): """ Setup all the ships of a particular size for a certain player. Sets up all of the length-n size ships for a player. Count is not updated in-place. Parameters ---------- size : int Length of the ships. player : int Determines which player to assign the ships to. Zero-indexed. count : int Current ship count for internal tracking use. Returns ------- int The updated cumulative ship count. """ # Setup number of ships based on value defined in game settings. for i in range(self.settings['%d_ships' % size]): error = '' while True: # Print current board for player reference. self.print_board(player) # Take ship details from player. pos = Utils.grid_pos_input(self.height, self.width, question=(error + '\nWhere do you want to place ship \'%s\' (%d-long)?' % (letters[count], size)).strip()) direction = Utils.num_input('Which direction?', 'Horizontal', 'Vertical') # Determine if the ship needs to be inputted again. error = self.setup_ship(pos, direction, player, count, size) if error is None: break count += 1 # Return updated cumulative ship total. return count def p1_turn(self): """ Execute a turn for Player 1. Handles input and output for the turn and updates both player's grids. Returns ------- bool True if game ends after the move, False otherwise """ print('\n' * PAD_AMOUNT) # Pad previous output. Utils.box_string('Player 1\'s Turn', min_width=self.width * 4 + 5, print_string=True) self.p1_move = '' # Test if Player 2 is a human. if not self.p2_cpu: # Alert Player 2 to look away. Utils.box_string('Player 2, please look away.', min_width=self.width * 4 + 5, print_string=True) sleep(self.settings['player_timer']) self.print_board(0) # Notify player if a ship moved. if self.p2_move != '': Utils.box_string('Note: ' + self.p2_move, min_width=self.width * 4 + 5, print_string=True) # Determine input method based on possible actions. if self.settings['allow_moves']: if self.settings['allow_mines'] and self.p1_mines > 0: action = Utils.num_input('What do you want to do?', 'Fire Missile', 'Move a Ship', 'Clear Misses', 'Clear Hits', 'Place a Mine') else: action = Utils.num_input('What do you want to do?', 'Fire Missile', 'Move a Ship', 'Clear Misses', 'Clear Hits') if action == 0: # Fire Missile error = '' while True: y_pos, x_pos = Utils.grid_pos_input(self.height, self.width, question=(error+'\nWhere do you want to fire?').strip()) if True in [(y_pos, x_pos) in self.p2_ships[x]['hits'] for x in range(len(self.p2_ships))] or self.p1_grid_2[y_pos][x_pos] > 26: error = 'ERROR: You already guessed there!' continue if self.p2_grid[y_pos][x_pos] > 26: error = 'ERROR: You already guessed there!' continue if self.p2_grid[y_pos][x_pos] != 0: Utils.box_string('Direct Hit!', min_width=self.width * 4 + 5, print_string=True) # Update ship. self.p2_ships[self.p2_grid[y_pos][x_pos] - 1]['health'] -= 1 self.p2_ships[self.p2_grid[y_pos][x_pos] - 1]['hits'].append((y_pos, x_pos)) # Test if ship still stands. if self.p2_ships[self.p2_grid[y_pos][x_pos] - 1]['health'] == 0: Utils.box_string('You sunk a ship!', min_width=self.width * 4 + 5, print_string=True) # Update grid. self.p1_grid_2[y_pos][x_pos] = 27 self.p2_grid[y_pos][x_pos] = 27 else: Utils.box_string('Miss!', min_width=self.width * 4 + 5, print_string=True) # Update grid. self.p1_grid_2[y_pos][x_pos] = 28 self.p2_grid[y_pos][x_pos] = 28 break elif action == 1: # Move Ship error = '' ship_num = -1 while True: ship_num = letters.index(Utils.string_input((error + '\nWhich ship do you want to move?').strip(), condition=('[A-%sa-%s]' % (letters[len(self.p1_ships) - 1], letters[len(self.p1_ships) - 1].lower()))).upper()) ship = self.p1_ships[ship_num] if ship['health'] == 0: error = 'ERROR: That ship is sunk!' continue move_direction = Utils.num_input('Which direction do you want to move it?', 'Up', 'Down', 'Left', 'Right') error = '' try: if move_direction < 2: # Up or down. true_dir = -1 if move_direction == 0 else 1 board = self.p1_grid if ship['direction'] == 0: for i in range(ship['size']): # Check if another ship is there. for ship2 in self.p1_ships: if ship2['direction'] == 0: for k in range(ship2['size']): if ship2['num'] != ship_num and ship2['y_pos'] == ship['y_pos'] + true_dir and ship2['x_pos'] + k == ship['x_pos'] + i: error = 'ERROR: You cannot move your ship there!' continue else: for l in range(ship2['size']): if ship2['num'] != ship_num and ship2['y_pos'] + l == ship['y_pos'] + true_dir and ship2['x_pos'] == ship['x_pos'] + i: error = 'ERROR: You cannot move your ship there!' continue if (1 <= board[ship['y_pos'] + true_dir][ship['x_pos'] + i] <= 26 or board[ship['y_pos'] + true_dir][ship['x_pos'] + i] == 29) and (board[ship['y_pos'] + true_dir][ship['x_pos'] + i] != ship_num + 1) or ship['y_pos'] + true_dir < 0 or ship['y_pos'] >= self.height: error = 'ERROR: You cannot move your ship there!' else: for j in range(ship['size']): # Check if another ship is there. for ship2 in self.p1_ships: if ship2['direction'] == 0: for k in range(ship2['size']): if ship2['num'] != ship_num and ship2['y_pos'] == ship['y_pos'] + j + true_dir and ship2['x_pos'] + k == ship['x_pos']: error = 'ERROR: You cannot move your ship there!' continue else: for l in range(ship2['size']): if ship2['num'] != ship_num and ship2['y_pos'] + l == ship['y_pos'] + j + true_dir and ship2['x_pos'] == ship['x_pos']: error = 'ERROR: You cannot move your ship there!' continue if (1 <= board[ship['y_pos'] + j + true_dir][ship['x_pos']] <= 26 or board[ship['y_pos'] + j + true_dir][ship['x_pos']] == 29) and (board[ship['y_pos'] + j + true_dir][ship['x_pos']] != ship_num + 1) or ship['y_pos'] + j + true_dir < 0 or ship['y_pos'] >= self.height: error = 'ERROR: You cannot move your ship there!' if error == '': self.p1_ships[ship_num]['setup'] = False self.p1_ships[ship_num]['y_pos'] += true_dir self.p1_move = 'Player 1 just moved a ship ' + ('up!' if move_direction == 0 else 'down!') # Update board positions if ship['direction'] == 0: for i in range(ship['size'] - 1): board[ship['y_pos'] + true_dir][ship['x_pos'] + i] = 0 else: for j in range(ship['size'] - 1): board[ship['y_pos'] + j + true_dir][ship['x_pos']] = 0 break else: # Left or right. true_dir = -1 if move_direction == 2 else 1 board = self.p1_grid if ship['direction'] == 0: for i in range(ship['size']): # Check if another ship is there. for ship2 in self.p1_ships: if ship2['direction'] == 0: for k in range(ship2['size']): if ship2['num'] != ship_num and ship2['y_pos'] == ship['y_pos'] and ship2['x_pos'] + k == ship['x_pos'] + i + true_dir: error = 'ERROR: You cannot move your ship there!' continue else: for l in range(ship2['size']): if ship2['num'] != ship_num and ship2['y_pos'] + l == ship['y_pos'] and ship2['x_pos'] == ship['x_pos'] + i + true_dir: error = 'ERROR: You cannot move your ship there!' continue if (1 <= board[ship['y_pos']][ship['x_pos'] + i + true_dir] <= 26 or board[ship['y_pos']][ship['x_pos'] + i + true_dir] == 29) and (board[ship['y_pos']][ship['x_pos'] + i + true_dir] != ship_num + 1) or ship['x_pos'] + i + true_dir < 0 or ship['x_pos'] >= self.width: error = 'ERROR: You cannot move your ship there!' else: for j in range(ship['size']): # Check if another ship is there. for ship2 in self.p1_ships: if ship2['direction'] == 0: for k in range(ship2['size']): if ship2['num'] != ship_num and ship2['y_pos'] == ship['y_pos'] + j and ship2['x_pos'] + k == ship['x_pos'] + true_dir: error = 'ERROR: You cannot move your ship there!' continue else: for l in range(ship2['size']): if ship2['num'] != ship_num and ship2['y_pos'] + l == ship['y_pos'] + j and ship2['x_pos'] == ship['x_pos'] + true_dir: error = 'ERROR: You cannot move your ship there!' continue if (1 <= board[ship['y_pos'] + j][ship['x_pos'] + true_dir] <= 26 or board[ship['y_pos'] + j][ship['x_pos'] + true_dir] == 29) and (board[ship['y_pos'] + j][ship['x_pos'] + true_dir] != ship_num + 1) or ship['x_pos'] + true_dir < 0 or ship['x_pos'] >= self.width: error = 'ERROR: You cannot move your ship there!' if error == '': self.p1_ships[ship_num]['setup'] = False self.p1_ships[ship_num]['x_pos'] += true_dir self.p1_move = 'Player 1 just moved a ship to the ' + ('left!' if move_direction == 2 else 'right!') # Update board positions. if ship['direction'] == 0: for i in range(ship['size'] - 1): board[ship['y_pos']][ship['x_pos'] + i + true_dir] = 0 else: for j in range(ship['size'] - 1): board[ship['y_pos'] + j][ship['x_pos'] + true_dir] = 0 break except IndexError: error = 'ERROR: You cannot move your ship there!' # Update board positions again, just in case. for i in range(self.height): for j in range(self.width): if board[i][j] == ship_num + 1: board[i][j] = 0 self.p1_ships[ship_num]['hits'] = [] self.update_board(0) elif action == 2: # Clear Misses for i in range(self.height): for j in range(self.width): if self.p1_grid_2[i][j] == 28: self.p1_grid_2[i][j] = 0 return self.p1_turn() elif action == 3: # Clear Hits for i in range(self.height): for j in range(self.width): if self.p1_grid_2[i][j] == 27: self.p1_grid_2[i][j] = 0 return self.p1_turn() else: # Place Mine error = '' while True: y_pos, x_pos = Utils.grid_pos_input(self.height, self.width, question=(error + '\nWhere do you want to place the mine?').strip()) if self.p2_grid[y_pos][x_pos] == 29: error = 'ERROR: You already placed a mine there!' continue if 1 <= self.p2_grid[y_pos][x_pos] <= 26: ship_num = self.p2_grid[y_pos][x_pos] - 1 self.p2_ships[ship_num]['health'] = 0 for i in range(self.height): for j in range(self.width): if self.p2_grid[i][j] == ship_num + 1: self.p2_grid[i][j] = 27 Utils.box_string('You sunk a ship!', min_width=self.width * 4 + 5, print_string=True) self.p2_grid[y_pos][x_pos] = 29 self.p1_grid_2[y_pos][x_pos] = 29 self.p1_mines -= 1 break else: error = '' while True: y_pos, x_pos = Utils.grid_pos_input(self.height, self.width, question=(error + '\nWhere do you want to fire?').strip()) if self.p1_grid_2[y_pos][x_pos] != 0: error = 'ERROR: You already guessed there!' continue if self.p2_grid[y_pos][x_pos] > 26: error = 'ERROR: You already guessed there!' continue if self.p2_grid[y_pos][x_pos] != 0: Utils.box_string('Direct Hit!', min_width=self.width * 4 + 5, print_string=True) # Update ship. self.p2_ships[self.p2_grid[y_pos][x_pos] - 1]['health'] -= 1 self.p2_ships[self.p2_grid[y_pos][x_pos] - 1]['hits'].append((y_pos, x_pos)) # Test if ship still stands. if self.p2_ships[self.p2_grid[y_pos][x_pos] - 1]['health'] == 0: Utils.box_string('You sunk a ship!', min_width=self.width * 4 + 5, print_string=True) # Update grid. self.p1_grid_2[y_pos][x_pos] = 27 self.p2_grid[y_pos][x_pos] = 27 else: Utils.box_string('Miss!', min_width=self.width * 4 + 5, print_string=True) # Update grid. self.p1_grid_2[y_pos][x_pos] = 28 self.p2_grid[y_pos][x_pos] = 28 break # End turn. Utils.box_string('Your turn is now over.', print_string=True) sleep(self.settings['player_timer']) # Detect if game is over. return sum([x['health'] for x in self.p2_ships]) == 0 def p2_turn(self): """ Execute a turn for Player 2. Handles input and output for the turn and updates both player's grids. Returns ------- bool True if game ends after the move, False otherwise """ print('\n' * PAD_AMOUNT) # Pad previous output. Utils.box_string('Player 2\'s Turn', min_width=self.width * 4 + 5, print_string=True) self.p2_move = '' # Test if Player 2 is a human. if not self.p2_cpu: # Player is a human # Alert Player 1 to look away. Utils.box_string('Player 1, please look away.', min_width=self.width * 4 + 5, print_string=True) sleep(self.settings['player_timer']) self.print_board(1) if self.p1_move != '': Utils.box_string('Note: ' + self.p1_move, min_width=self.width * 4 + 5, print_string=True) # Determine input method based on possible actions. if self.settings['allow_moves']: if self.settings['allow_mines'] and self.p2_mines > 0: action = Utils.num_input('What do you want to do?', 'Fire Missile', 'Move a Ship', 'Clear Misses', 'Clear Hits', 'Place a Mine') else: action = Utils.num_input('What do you want to do?', 'Fire Missile', 'Move a Ship', 'Clear Misses', 'Clear Hits') if action == 0: # Fire Missile error = '' while True: y_pos, x_pos = Utils.grid_pos_input(self.height, self.width, question=(error+'\nWhere do you want to fire?').strip()) if True in [(y_pos, x_pos) in self.p1_ships[x]['hits'] for x in range(len(self.p1_ships))] or self.p2_grid_2[y_pos][x_pos] > 26: error = 'ERROR: You already guessed there!' continue if self.p1_grid[y_pos][x_pos] > 26: error = 'ERROR: You already guessed there!' continue if self.p1_grid[y_pos][x_pos] != 0: Utils.box_string('Direct Hit!', min_width=self.width * 4 + 5, print_string=True) # Update ship. self.p1_ships[self.p1_grid[y_pos][x_pos] - 1]['health'] -= 1 self.p1_ships[self.p1_grid[y_pos][x_pos] - 1]['hits'].append((y_pos, x_pos)) # Test if ship still stands. if self.p1_ships[self.p1_grid[y_pos][x_pos] - 1]['health'] == 0: Utils.box_string('You sunk a ship!', min_width=self.width * 4 + 5, print_string=True) # Update grid. self.p2_grid_2[y_pos][x_pos] = 27 self.p1_grid[y_pos][x_pos] = 27 else: Utils.box_string('Miss!', min_width=self.width * 4 + 5, print_string=True) # Update grid. self.p2_grid_2[y_pos][x_pos] = 28 self.p1_grid[y_pos][x_pos] = 28 break elif action == 1: # Move Ship error = '' ship_num = -1 while True: ship_num = letters.index(Utils.string_input((error + '\nWhich ship do you want to move?').strip(), condition=('[A-%sa-%s]' % (letters[len(self.p1_ships) - 1], letters[len(self.p1_ships) - 1].lower()))).upper()) ship = self.p2_ships[ship_num] if ship['health'] == 0: error = 'ERROR: That ship is sunk!' continue move_direction = Utils.num_input('Which direction do you want to move it?', 'Up', 'Down', 'Left', 'Right') error = '' try: if move_direction < 2: # Up or down. true_dir = -1 if move_direction == 0 else 1 board = self.p2_grid if ship['direction'] == 0: for i in range(ship['size']): # Check if another ship is there. for ship2 in self.p2_ships: if ship2['direction'] == 0: for k in range(ship2['size']): if ship2['num'] != ship_num and ship2['y_pos'] == ship['y_pos'] + true_dir and ship2['x_pos'] + k == ship['x_pos'] + i: error = 'ERROR: You cannot move your ship there!' continue else: for l in range(ship2['size']): if ship2['num'] != ship_num and ship2['y_pos'] + l == ship['y_pos'] + true_dir and ship2['x_pos'] == ship['x_pos'] + i: error = 'ERROR: You cannot move your ship there!' continue if (1 <= board[ship['y_pos'] + true_dir][ship['x_pos'] + i] <= 26 or board[ship['y_pos'] + true_dir][ship['x_pos'] + i] == 29) and (board[ship['y_pos'] + true_dir][ship['x_pos'] + i] != ship_num + 1) or ship['y_pos'] + true_dir < 0 or ship['y_pos'] >= self.height: error = 'ERROR: You cannot move your ship there!' else: for j in range(ship['size']): # Check if another ship is there. for ship2 in self.p2_ships: if ship2['direction'] == 0: for k in range(ship2['size']): if ship2['num'] != ship_num and ship2['y_pos'] == ship['y_pos'] + j + true_dir and ship2['x_pos'] + k == ship['x_pos']: error = 'ERROR: You cannot move your ship there!' continue else: for l in range(ship2['size']): if ship2['num'] != ship_num and ship2['y_pos'] + l == ship['y_pos'] + j + true_dir and ship2['x_pos'] == ship['x_pos']: error = 'ERROR: You cannot move your ship there!' continue if (1 <= board[ship['y_pos'] + j + true_dir][ship['x_pos']] <= 26 or board[ship['y_pos'] + j + true_dir][ship['x_pos']] == 29) and (board[ship['y_pos'] + j + true_dir][ship['x_pos']] != ship_num + 1) or ship['y_pos'] + j + true_dir < 0 or ship['y_pos'] >= self.height: error = 'ERROR: You cannot move your ship there!' if error == '': self.p2_ships[ship_num]['setup'] = False self.p2_ships[ship_num]['y_pos'] += true_dir self.p2_move = 'Player 2 just moved a ship ' + ('up!' if move_direction == 0 else 'down!') # Update board positions if ship['direction'] == 0: for i in range(ship['size'] - 1): board[ship['y_pos'] + true_dir][ship['x_pos'] + i] = 0 else: for j in range(ship['size'] - 1): board[ship['y_pos'] + j + true_dir][ship['x_pos']] = 0 break else: # Left or right. true_dir = -1 if move_direction == 2 else 1 board = self.p2_grid if ship['direction'] == 0: for i in range(ship['size']): # Check if another ship is there. for ship2 in self.p2_ships: if ship2['direction'] == 0: for k in range(ship2['size']): if ship2['num'] != ship_num and ship2['y_pos'] == ship['y_pos'] and ship2['x_pos'] + k == ship['x_pos'] + i + true_dir: error = 'ERROR: You cannot move your ship there!' continue else: for l in range(ship2['size']): if ship2['num'] != ship_num and ship2['y_pos'] + l == ship['y_pos'] and ship2['x_pos'] == ship['x_pos'] + i + true_dir: error = 'ERROR: You cannot move your ship there!' continue if (1 <= board[ship['y_pos']][ship['x_pos'] + i + true_dir] <= 26 or board[ship['y_pos']][ship['x_pos'] + i + true_dir] == 29) and (board[ship['y_pos']][ship['x_pos'] + i + true_dir] != ship_num + 1) or ship['x_pos'] + i + true_dir < 0 or ship['x_pos'] >= self.width: error = 'ERROR: You cannot move your ship there!' else: for j in range(ship['size']): # Check if another ship is there. for ship2 in self.p2_ships: if ship2['direction'] == 0: for k in range(ship2['size']): if ship2['num'] != ship_num and ship2['y_pos'] == ship['y_pos'] + j and ship2['x_pos'] + k == ship['x_pos'] + true_dir: error = 'ERROR: You cannot move your ship there!' continue else: for l in range(ship2['size']): if ship2['num'] != ship_num and ship2['y_pos'] + l == ship['y_pos'] + j and ship2['x_pos'] == ship['x_pos'] + true_dir: error = 'ERROR: You cannot move your ship there!' continue if (1 <= board[ship['y_pos'] + j][ship['x_pos'] + true_dir] <= 26 or board[ship['y_pos'] + j][ship['x_pos'] + true_dir] == 29) and (board[ship['y_pos'] + j][ship['x_pos'] + true_dir] != ship_num + 1) or ship['x_pos'] + true_dir < 0 or ship['x_pos'] >= self.width: error = 'ERROR: You cannot move your ship there!' if error == '': self.p2_ships[ship_num]['setup'] = False self.p2_ships[ship_num]['x_pos'] += true_dir self.p2_move = 'Player 2 just moved a ship to the ' + ('left!' if move_direction == 2 else 'right!') # Update board positions if ship['direction'] == 0: for i in range(ship['size'] - 1): board[ship['y_pos']][ship['x_pos'] + i + true_dir] = 0 else: for j in range(ship['size'] - 1): board[ship['y_pos'] + j][ship['x_pos'] + true_dir] = 0 break except IndexError: error = 'ERROR: You cannot move your ship there! (INDEX ERROR)' # Update board positions again, just in case. for i in range(self.height): for j in range(self.width): if board[i][j] == ship_num + 1: board[i][j] = 0 self.p2_ships[ship_num]['hits'] = [] self.update_board(1) elif action == 2: # Clear Misses for i in range(self.height): for j in range(self.width): if self.p2_grid_2[i][j] == 28: self.p2_grid_2[i][j] = 0 return self.p2_turn() elif action == 3: # Clear Hits for i in range(self.height): for j in range(self.width): if self.p2_grid_2[i][j] == 27: self.p2_grid_2[i][j] = 0 return self.p2_turn() else: # Place Mine error = '' while True: y_pos, x_pos = Utils.grid_pos_input(self.height, self.width, question=(error + '\nWhere do you want to place the mine?').strip()) if self.p1_grid[y_pos][x_pos] == 29: error = 'ERROR: You already placed a mine there!' continue if 1 <= self.p1_grid[y_pos][x_pos] <= 26: ship_num = self.p1_grid[y_pos][x_pos] - 1 self.p1_ships[ship_num]['health'] = 0 for i in range(self.height): for j in range(self.width): if self.p1_grid[i][j] == ship_num + 1: self.p1_grid[i][j] = 27 Utils.box_string('You sunk a ship!', min_width=self.width * 4 + 5, print_string=True) self.p1_grid[y_pos][x_pos] = 29 self.p2_grid_2[y_pos][x_pos] = 29 self.p2_mines -= 1 break else: error = '' while True: y_pos, x_pos = Utils.grid_pos_input(self.height, self.width, question=(error + '\nWhere do you want to fire?').strip()) if self.p2_grid_2[y_pos][x_pos] != 0: error = 'ERROR: You already guessed there!' continue if self.p1_grid[y_pos][x_pos] > 26: error = 'ERROR: You already guessed there!' continue if self.p1_grid[y_pos][x_pos] != 0: Utils.box_string('Direct Hit!', min_width=self.width * 4 + 5, print_string=True) # Update ship. self.p1_ships[self.p1_grid[y_pos][x_pos] - 1]['health'] -= 1 self.p1_ships[self.p1_grid[y_pos][x_pos] - 1]['hits'].append((y_pos, x_pos)) # Test if ship still stands. if self.p1_ships[self.p1_grid[y_pos][x_pos] - 1]['health'] == 0: Utils.box_string('You sunk a ship!', min_width=self.width * 4 + 5, print_string=True) # Update grid. self.p2_grid_2[y_pos][x_pos] = 27 self.p1_grid[y_pos][x_pos] = 27 else: Utils.box_string('Miss!', min_width=self.width * 4 + 5, print_string=True) # Update grid. self.p2_grid_2[y_pos][x_pos] = 28 self.p1_grid[y_pos][x_pos] = 28 break else: # Player is CPU # Alert Player 1 of CPU turn. Utils.box_string('CPU is deciding...', min_width=self.width * 4 + 5, print_string=True) sleep(2) rng = Random() while True: pos = (rng.randrange(self.height), rng.randrange(self.width)) y_pos, x_pos = pos if self.p1_grid[y_pos][x_pos] != 0: # Update ship. self.p1_ships[self.p1_grid[y_pos][x_pos] - 1]['health'] -= 1 self.p1_ships[self.p1_grid[y_pos][x_pos] - 1]['hits'].append((y_pos, x_pos)) # Test if ship still stands. if self.p1_ships[self.p1_grid[y_pos][x_pos] - 1]['health'] == 0: self.cpu_data['p1_ships']['%d_ships' % self.p1_ships[self.p1_grid[y_pos][x_pos] - 1]['size']] -= 1 # Update grid. self.p2_grid_2[y_pos][x_pos] = 27 self.p1_grid[y_pos][x_pos] = 27 else: # Update grid. self.p2_grid_2[y_pos][x_pos] = 28 self.p1_grid[y_pos][x_pos] = 28 break # End turn. Utils.box_string('Your turn is now over.', print_string=True) sleep(self.settings['player_timer']) # Detect if game is over. return sum([x['health'] for x in self.p1_ships]) == 0 def start_game(self): """ Start a new game. Starts a game with the settings provided in the constructor. All game code is contained here, with relevant helper methods also called here. Every game has two stages: Setup and Play. Returns ------- int Winning player's number. Zero-indexed. """ # Setup Phase: # In this stage, both players choose where to place their ships. print('\n' * PAD_AMOUNT) # Pad previous output. Utils.box_string('Setup Phase', min_width=self.width * 4 + 5, print_string=True) Utils.box_string('Player 1\'s Turn', min_width=self.width * 4 + 5, print_string=True) # Test if Player 2 is a human. if not self.p2_cpu: # Alert Player 2 to look away. Utils.box_string('Player 2, please look away.', min_width=self.width * 4 + 5, print_string=True) sleep(self.settings['player_timer']) # Player 1 Utils.box_string('Player 1 Setup', min_width=self.width * 4 + 5, print_string=True) p1_ship_count = 0 for i in range(5): p1_ship_count = self.setup_ships(i + 1, 0, p1_ship_count) # Test if Player 2 is a human. if self.p2_cpu: # Player 2 is CPU # Setup CPU data. self.cpu_data['p1_ships'] = {} for size in range(1, 6): self.cpu_data['p1_ships']['%d_ships' % size] = self.settings['%d_ships' % size] # Setup ships. p2_ship_count = 0 rng = Random() for size in range(1, 6): count = 0 # Setup number of ships based on value defined in game settings. for i in range(self.settings['%d_ships' % size]): while True: # Generate ship details. pos = (rng.randrange(self.height), rng.randrange(self. width)) direction = rng.randrange(2) # Determine if the ship needs to be randomized again. error = self.setup_ship(pos, direction, 1, p2_ship_count + count, size) if error is None: print('Placed ship ' + str(p2_ship_count + count) + ' at ' + str(pos) + ' with direction ' + str(direction) + ' with size ' + str(size)) break count += 1 # Update cumulative ship total. p2_ship_count += count else: # Player 2 is a human print('\n' * PAD_AMOUNT) # Pad previous output. Utils.box_string('Player 2\'s Turn', min_width=self.width * 4 + 5, print_string=True) # Alert Player 1 to look away. Utils.box_string('Player 1, please look away.', min_width=self.width * 4 + 5, print_string=True) sleep(self.settings['player_timer']) # Player 2 Utils.box_string('Player 2 Setup', min_width=self.width * 4 + 5, print_string=True) p2_ship_count = 0 for i in range(5): p2_ship_count = self.setup_ships(i + 1, 1, p2_ship_count) # Update both boards. self.update_board(0) self.update_board(1) # Play Phase: # In this stage, the game itself is played. Utils.box_string('Play Phase', min_width=self.width * 4 + 5, print_string=True) # Main game loop. winner = None while True: if self.settings['mine_turns'] is not None and self.turn % (self.settings['mine_turns'] * 2) == 0: self.p1_mines += 1 self.p2_mines += 1 if self.turn % 2 == 0: if self.p1_turn(): winner = 1 break else: if self.p2_turn(): winner = 2 break self.turn += 1 # Print winner. Utils.box_string('Player %d won!' % winner, min_width=self.width * 4 + 5, print_string=True) return winner def create_game(gm): """ Configure and create a game. Creates a game with base settings equivalent to one of the default presets. Allows user to customize the settings before starting the game. Parameters ---------- gm : int Game type to replicate: 0: Normal mode. 1: Advanced mode. Returns ------- BattleshipGame Game instance with user-chosen settings. """ print('\n' * PAD_AMOUNT) # Pad previous output. # Choose and print default settings. if gm == 0: Utils.box_string('Normal Mode', print_string=True) settings = normal_mode_preset elif gm == 1: Utils.box_string('Advanced Mode', print_string=True) settings = advanced_mode_preset else: # TODO: REMOVE TESTING MODE Utils.box_string('Testing Mode', print_string=True) settings = testing_preset # Print current settings. Utils.print_settings(settings) # Change settings, if applicable. if Utils.num_input('Would you like to change the settings?', 'No', 'Yes') == 1: while True: # Determine which setting group to modify. setting = Utils.num_input('Settings', 'Grid Size', 'Ship Amount', 'Special Abilities', 'Game Type', 'Exit') # Modify setting groups. if setting == 0: # Grid Size # Take grid dimensions. settings['width'] = int(Utils.string_input('Grid Width (5-26)', condition=r'^[5-9]$|^1[0-9]$|^2[0-6]$')) settings['height'] = int(Utils.string_input('Grid Height (5-26)', condition=r'^[5-9]$|^1[0-9]$|^2[0-6]$')) elif setting == 1: # Ship Amount while True: # Take ship amounts. settings['5_ships'] = int(Utils.string_input('5-Long Ships (0-9)', condition=r'[0-9]')) settings['4_ships'] = int(Utils.string_input('4-Long Ships (0-9)', condition=r'[0-9]')) settings['3_ships'] = int(Utils.string_input('3-Long Ships (0-9)', condition=r'[0-9]')) settings['2_ships'] = int(Utils.string_input('2-Long Ships (0-9)', condition=r'[0-9]')) settings['1_ships'] = int(Utils.string_input('1-Long Ships (0-9)', condition=r'[0-9]')) # Test if ship amounts are valid. count = settings['5_ships'] + settings['4_ships'] + settings['3_ships'] + settings['2_ships'] + settings['1_ships'] if count == 0: Utils.box_string('You must have at least one ship!', print_string=True) elif count > 26: Utils.box_string('You have put in too many ships! (max 26)', print_string=True) elif settings['5_ships'] * 5 + settings['4_ships'] * 4 + settings['3_ships'] * 3 + settings['2_ships'] * 2 + settings['1_ships'] > settings['width'] * settings['height']: Utils.box_string('Your ships will not fit inside of the board!', print_string=True) else: break elif setting == 2: # Special Abilities # Take abilities. settings['allow_moves'] = Utils.num_input('Ship Moving', 'Enable', 'Disable') == 0 if settings['allow_moves']: settings['allow_mines'] = Utils.num_input('Mines', 'Enable', 'Disable') == 0 settings['mine_turns'] = int(Utils.string_input('Turns Between Mines', condition=r'\d+')) if settings['allow_mines'] else None elif setting == 3: # Game Type # Take game type. settings['p_type'] = ['CPU', 'Player'][Utils.num_input('Game Type', 'CPU', 'Player')] # Print updated settings. Utils.print_settings(settings) if setting == 4: # Exit break return BattleshipGame(settings) # Test if code is run independently or on repl.it. if __name__ == '__main__' or __name__ == 'builtins': print('\n' * PAD_AMOUNT) # Pad previous output. Utils.box_string('Welcome to Battleship!', print_string=True) passed_settings = None while True: # Create game. gamemode = Utils.num_input('Which gamemode do you want to play?', 'Normal', 'Advanced', 'testing') # TODO: REMOVE TESTING MODE if passed_settings is not None: bs = BattleshipGame(passed_settings) else: bs = create_game(gamemode) passed_settings = bs.settings # Play game. bs.start_game() # Determine if the game should be played again. again = Utils.num_input('Do you want to play again?', 'Yes [Same Settings]', 'Yes [Different Settings]', 'No') if again == 0: pass elif again == 1: passed_settings = None else: break
battleship.py
62,208
Class that handles game execution and running. Controls game setup based off of a certain settings preset. Handles all input and output for the game. Attributes ---------- settings : dict Settings that the game is running based off of. height : int Height of the grids used for the game. width : int Width of the grids used for the game. p1_grid : list Two dimensional list of ints containing player 1's board. p1_grid_2 : list Two dimensional list of ints containing player 1's guesses. p1_ships : list List of player 1's ship dicts with position, direction, and size data. p2_grid : list Two dimensional list of ints containing player 2's board. p2_grid_2 : list Two dimensional list of ints containing player 2's guesses. p2_ships : list List of player 2's ship dicts with position, direction, and size data. p2_cpu : bool True if player 2 is not a human player, False otherwise. turn : int Current turn number. p1_mines : int Current amount of mines available to Player 1. p2_mines : int Current amount of mines available to Player 2. p1_move : str Return message to display to Player 2 on their turn. p2_move : str Return message to display to Player 1 on their turn. Utility class used for getting input and other common functions. Contains many functions to save space by condensing input and custom string formatting methods into one place. All methods are static, and do not modify parameters in-place. Constructor for the BattleshipGame class. Parameters ---------- settings : dict Settings to create the game based off of. Place a string into an ASCII box. The result is placed inside of a ASCII box consisting of '+' characters for the corners and '-' characters for the edges. Parameters ---------- string : str String to be boxed. min_width : int, optional Specifies that the box be of a certain minimum width. Defaults to input string width. print_string : bool, optional If True, prints the string after building it. Defaults to False. Returns ------- str Input string with a box around it. Configure and create a game. Creates a game with base settings equivalent to one of the default presets. Allows user to customize the settings before starting the game. Parameters ---------- gm : int Game type to replicate: 0: Normal mode. 1: Advanced mode. Returns ------- BattleshipGame Game instance with user-chosen settings. Take user-input in coordinate form. The input question will be repeated until valid input is given. The input must be a valid coordinate in battleship form (r'[A-Z]\d+'). The input coordinate must be inside of the grid defined by height and width. Parameters ---------- height : int Specifies the height of the grid. width : int Specifies the width of the grid. question : str, optional String to be displayed as the input question. Will be boxed with Utils#box_string before printing. Defaults to 'Enter a Position'. Returns ------- tuple Contains the following: int Height-aligned position (y-position) of input. int Width-aligned position (x-position) of input. Take user input based on several different options. The input question will be repeated until valid input is given. The choices will be displayed in order with a number next to them indicating their id. Responses can be given as the choice id or the full choice name. Parameters ---------- question : str String to be displayed as the input question. Will be boxed with Utils#box_string before printing. *choices : *str Options for the user to choose from. Returns ------- int Number of the answer choice, corresponding to the index of the choice in *choices. Execute a turn for Player 1. Handles input and output for the turn and updates both player's grids. Returns ------- bool True if game ends after the move, False otherwise Execute a turn for Player 2. Handles input and output for the turn and updates both player's grids. Returns ------- bool True if game ends after the move, False otherwise Pretty-print the current boards of a player. Prints both boards for a player, along with coordinate references, titles, and boxes around the grids. Parameters ---------- player : int Determines which player's grids to print. Zero-indexed. Returns ------- str Same as the string that is printed. Pretty-print a settings dictionary. Parameters ---------- settings : dict The settings dictionary to pretty-print. Returns ------- None Create a ship. Creates a ship dictionary based on positional, directional, player, and size data and tests if placement is legal. Parameters ---------- pos : tuple (y,x) coordinate pair of top-left corner of the ship. direction : int Determines the direction of the ship: 0: Horizontal. 1: Vertical. player : int Determines which player to assign the ship to. Zero-indexed. count : int Current ship count for internal tracking use. size : int Length of the ship. Returns ------- str Error string if an error occurred, None otherwise. Setup all the ships of a particular size for a certain player. Sets up all of the length-n size ships for a player. Count is not updated in-place. Parameters ---------- size : int Length of the ships. player : int Determines which player to assign the ships to. Zero-indexed. count : int Current ship count for internal tracking use. Returns ------- int The updated cumulative ship count. Start a new game. Starts a game with the settings provided in the constructor. All game code is contained here, with relevant helper methods also called here. Every game has two stages: Setup and Play. Returns ------- int Winning player's number. Zero-indexed. Take string-based user input. The input question will be repeated until valid input is given, determined by the condition regex. Parameters ---------- question : str String to be displayed as the input question. Will be boxed with Utils#box_string before printing. condition : r-string, optional Regex to test input string off of. Returns ------- str Input string. Update both grids for a player. Adds new ships and puts them into the right locations. Parameters ---------- player : int Determines which player's grids to print. Zero-indexed. !/usr/bin/env python3 Default game presets. Miscellaneous global values. Global user-variables. Parameters. String builder. Print and return result. Print question and ask for input. Test whether input is an integer or string. Determine if input integer corresponds to one of the answer choices. Determine if input string is one of the answer choices. Print question and ask for input. Test if input is valid. Print the question and ask for input. Test if input is a valid coordinate and is in the grid. Grid attributes. Player 1 grids. Player 2 grids. Miscellaneous attributes. CPU attributes. Place ships into grid, if not already. Player 1 Player 2 Characters to use while printing. 0:Null, 1-26:Ships, 27:Hit, 28:Miss, 29:Mine Update board. Get boards to print. Player 1 Player 2 Build header. Build x-coordinate reference. Build y-coordinate reference and grid. Print and return result. Test if the ship does not overlap another ship. Player 1 Player 2 Catch if ship would be placed out-of-bounds. Create the ship's dictionary and append it to the player's ship list. Setup number of ships based on value defined in game settings. Print current board for player reference. Take ship details from player. Determine if the ship needs to be inputted again. Return updated cumulative ship total. Pad previous output. Test if Player 2 is a human. Alert Player 2 to look away. Notify player if a ship moved. Determine input method based on possible actions. Fire Missile Update ship. Test if ship still stands. Update grid. Update grid. Move Ship Up or down. Check if another ship is there. Check if another ship is there. Update board positions Left or right. Check if another ship is there. Check if another ship is there. Update board positions. Update board positions again, just in case. Clear Misses Clear Hits Place Mine Update ship. Test if ship still stands. Update grid. Update grid. End turn. Detect if game is over. Pad previous output. Test if Player 2 is a human. Player is a human Alert Player 1 to look away. Determine input method based on possible actions. Fire Missile Update ship. Test if ship still stands. Update grid. Update grid. Move Ship Up or down. Check if another ship is there. Check if another ship is there. Update board positions Left or right. Check if another ship is there. Check if another ship is there. Update board positions Update board positions again, just in case. Clear Misses Clear Hits Place Mine Update ship. Test if ship still stands. Update grid. Update grid. Player is CPU Alert Player 1 of CPU turn. Update ship. Test if ship still stands. Update grid. Update grid. End turn. Detect if game is over. Setup Phase: In this stage, both players choose where to place their ships. Pad previous output. Test if Player 2 is a human. Alert Player 2 to look away. Player 1 Test if Player 2 is a human. Player 2 is CPU Setup CPU data. Setup ships. Setup number of ships based on value defined in game settings. Generate ship details. Determine if the ship needs to be randomized again. Update cumulative ship total. Player 2 is a human Pad previous output. Alert Player 1 to look away. Player 2 Update both boards. Play Phase: In this stage, the game itself is played. Main game loop. Print winner. Pad previous output. Choose and print default settings. TODO: REMOVE TESTING MODE Print current settings. Change settings, if applicable. Determine which setting group to modify. Modify setting groups. Grid Size Take grid dimensions. Ship Amount Take ship amounts. Test if ship amounts are valid. Special Abilities Take abilities. Game Type Take game type. Print updated settings. Exit Test if code is run independently or on repl.it. Pad previous output. Create game. TODO: REMOVE TESTING MODE Play game. Determine if the game should be played again.
10,164
en
0.846345
#!/usr/bin/python # # Copyright 2018-2020 Polyaxon, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import polyaxon_sdk from marshmallow import fields, validates_schema from polyaxon.schemas.base import BaseCamelSchema, BaseConfig from polyaxon.schemas.fields.docker_image import validate_image from polyaxon.schemas.fields.ref_or_obj import RefOrObject POLYAXON_DOCKERFILE_NAME = "Dockerfile" POLYAXON_DOCKER_WORKDIR = "/code" POLYAXON_DOCKER_SHELL = "/bin/bash" class DockerfileTypeSchema(BaseCamelSchema): image = RefOrObject(fields.Str(), required=True) env = RefOrObject(fields.Dict(keys=fields.Str(), allow_none=True)) path = RefOrObject(fields.List(fields.Str(), allow_none=True)) copy = RefOrObject(fields.List(fields.Str(), allow_none=True)) run = RefOrObject(fields.List(fields.Str(), allow_none=True)) lang_env = RefOrObject(fields.Str(allow_none=True)) uid = RefOrObject(fields.Int(allow_none=True)) gid = RefOrObject(fields.Int(allow_none=True)) filename = RefOrObject(fields.Str(allow_none=True)) workdir = RefOrObject(fields.Str(allow_none=True)) workdir_path = RefOrObject(fields.Str(allow_none=True)) shell = RefOrObject(fields.Str(allow_none=True)) @staticmethod def schema_config(): return V1DockerfileType @validates_schema def validate_dockerfile(self, data, **kwargs): validate_image(data.get("image")) class V1DockerfileType(BaseConfig, polyaxon_sdk.V1DockerfileType): IDENTIFIER = "dockerfile" SCHEMA = DockerfileTypeSchema REDUCED_ATTRIBUTES = [ "image", "env", "path", "copy", "run", "langEnv", "uid", "gid", "filename", "workdir", "workdirPath", "shell", ] @property def filename(self): return ( self._filename if self._filename is not None else POLYAXON_DOCKERFILE_NAME ) @filename.setter def filename(self, filename): self._filename = filename @property def workdir(self): return self._workdir if self._workdir is not None else POLYAXON_DOCKER_WORKDIR @workdir.setter def workdir(self, workdir): self._workdir = workdir @property def shell(self): return self._shell if self._shell is not None else POLYAXON_DOCKER_SHELL @shell.setter def shell(self, shell): self._shell = shell @property def image_tag(self): if not self.image: return None tagged_image = self.image.split(":") if len(tagged_image) == 1: return "latest" if len(tagged_image) == 2: return "latest" if "/" in tagged_image[-1] else tagged_image[-1] if len(tagged_image) == 3: return tagged_image[-1]
core/polyaxon/schemas/types/dockerfile.py
3,317
!/usr/bin/python Copyright 2018-2020 Polyaxon, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
574
en
0.83777
import os # toolchains options ARCH='arm' CPU='cortex-m3' CROSS_TOOL='gcc' # bsp lib config BSP_LIBRARY_TYPE = None if os.getenv('RTT_CC'): CROSS_TOOL = os.getenv('RTT_CC') if os.getenv('RTT_ROOT'): RTT_ROOT = os.getenv('RTT_ROOT') # cross_tool provides the cross compiler # EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR if CROSS_TOOL == 'gcc': PLATFORM = 'gcc' EXEC_PATH = r'C:\Users\XXYYZZ' elif CROSS_TOOL == 'keil': PLATFORM = 'armcc' EXEC_PATH = r'C:/Keil_v5' elif CROSS_TOOL == 'iar': PLATFORM = 'iar' EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0' if os.getenv('RTT_EXEC_PATH'): EXEC_PATH = os.getenv('RTT_EXEC_PATH') BUILD = 'debug' if PLATFORM == 'gcc': # toolchains PREFIX = 'arm-none-eabi-' CC = PREFIX + 'gcc' AS = PREFIX + 'gcc' AR = PREFIX + 'ar' CXX = PREFIX + 'g++' LINK = PREFIX + 'gcc' TARGET_EXT = 'elf' SIZE = PREFIX + 'size' OBJDUMP = PREFIX + 'objdump' OBJCPY = PREFIX + 'objcopy' DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections' CFLAGS = DEVICE + ' -Dgcc' AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb ' LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rt-thread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds' CPATH = '' LPATH = '' if BUILD == 'debug': CFLAGS += ' -O0 -gdwarf-2 -g' AFLAGS += ' -gdwarf-2' else: CFLAGS += ' -O2' CXXFLAGS = CFLAGS POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n' elif PLATFORM == 'armcc': # toolchains CC = 'armcc' CXX = 'armcc' AS = 'armasm' AR = 'armar' LINK = 'armlink' TARGET_EXT = 'axf' DEVICE = ' --cpu Cortex-M3 ' CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99' AFLAGS = DEVICE + ' --apcs=interwork ' LFLAGS = DEVICE + ' --scatter "board\linker_scripts\link.sct" --info sizes --info totals --info unused --info veneers --list rt-thread.map --strict' CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include' LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib' CFLAGS += ' -D__MICROLIB ' AFLAGS += ' --pd "__MICROLIB SETA 1" ' LFLAGS += ' --library_type=microlib ' EXEC_PATH += '/ARM/ARMCC/bin/' if BUILD == 'debug': CFLAGS += ' -g -O0' AFLAGS += ' -g' else: CFLAGS += ' -O2' CXXFLAGS = CFLAGS CFLAGS += ' -std=c99' POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET' elif PLATFORM == 'iar': # toolchains CC = 'iccarm' CXX = 'iccarm' AS = 'iasmarm' AR = 'iarchive' LINK = 'ilinkarm' TARGET_EXT = 'out' DEVICE = '-Dewarm' CFLAGS = DEVICE CFLAGS += ' --diag_suppress Pa050' CFLAGS += ' --no_cse' CFLAGS += ' --no_unroll' CFLAGS += ' --no_inline' CFLAGS += ' --no_code_motion' CFLAGS += ' --no_tbaa' CFLAGS += ' --no_clustering' CFLAGS += ' --no_scheduling' CFLAGS += ' --endian=little' CFLAGS += ' --cpu=Cortex-M3' CFLAGS += ' -e' CFLAGS += ' --fpu=None' CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"' CFLAGS += ' --silent' AFLAGS = DEVICE AFLAGS += ' -s+' AFLAGS += ' -w+' AFLAGS += ' -r' AFLAGS += ' --cpu Cortex-M3' AFLAGS += ' --fpu None' AFLAGS += ' -S' if BUILD == 'debug': CFLAGS += ' --debug' CFLAGS += ' -On' else: CFLAGS += ' -Oh' LFLAGS = ' --config "board/linker_scripts/link.icf"' LFLAGS += ' --entry __iar_program_start' CXXFLAGS = CFLAGS EXEC_PATH = EXEC_PATH + '/arm/bin/' POST_ACTION = 'ielftool --bin $TARGET rtthread.bin' def dist_handle(BSP_ROOT): import sys cwd_path = os.getcwd() sys.path.append(os.path.join(os.path.dirname(BSP_ROOT), 'tools')) from sdk_dist import dist_do_building dist_do_building(BSP_ROOT)
bsp/stm32/stm32f103-mini-system/rtconfig.py
4,004
toolchains options bsp lib config cross_tool provides the cross compiler EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR toolchains toolchains toolchains
186
en
0.585943
""" WSGI config for thirdproject project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'thirdproject.settings') application = get_wsgi_application()
3_thirdproject/thirdproject/wsgi.py
401
WSGI config for thirdproject project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
218
en
0.789304
#! /usr/bin/env python3 # -*- coding: utf-8 -*- # tar command: # tar czvf mugshots.tar.gz -T mugshot_files.txt # where the txt is generated by this script import django django.setup() from django.conf import settings from djforms.scholars.models import Presentation from djtools.fields import TODAY YEAR = int(TODAY.year) presentations = Presentation.objects.filter(date_updated__year=YEAR) # list for failed uploads #bunk = [ ] #if s.mugshot in bunk: # print s.first_name, s.last_name for prez in presentations: for presenter in prez.presenters.all(): if presenter.mugshot: print(presenter.mugshot)
djforms/scholars/tar_mugshots.py
635
! /usr/bin/env python3 -*- coding: utf-8 -*- tar command: tar czvf mugshots.tar.gz -T mugshot_files.txt where the txt is generated by this script list for failed uploadsbunk = [ ]if s.mugshot in bunk: print s.first_name, s.last_name
235
en
0.695172
from ibis.sql.compiler import DDL, DML from .compiler import quote_identifier, _type_to_sql_string import re fully_qualified_re = re.compile(r"(.*)\.(?:`(.*)`|(.*))") def _is_fully_qualified(x): return bool(fully_qualified_re.search(x)) def _is_quoted(x): regex = re.compile(r"(?:`(.*)`|(.*))") quoted, _ = regex.match(x).groups() return quoted is not None class MapDQualifiedSQLStatement: def _get_scoped_name(self, obj_name, database): # noqa: F401 return obj_name class MapDDDL(DDL, MapDQualifiedSQLStatement): pass class MapDDML(DML, MapDQualifiedSQLStatement): pass class CreateDDL(MapDDDL): """Create DDL""" class DropObject(MapDDDL): def __init__(self, must_exist=True): self.must_exist = must_exist def compile(self): if_exists = '' if self.must_exist else 'IF EXISTS ' object_name = self._object_name() return 'DROP {} {}{}'.format(self._object_type, if_exists, object_name) class DropTable(DropObject): _object_type = 'TABLE' def __init__(self, table_name, database=None, must_exist=True): super().__init__(must_exist=must_exist) self.table_name = table_name self.database = database def _object_name(self): return self._get_scoped_name(self.table_name, self.database) def _format_properties(props): tokens = [] for k, v in sorted(props.items()): tokens.append(" '{}'='{}'".format(k, v)) return '(\n{}\n)'.format(',\n'.join(tokens)) class CreateTable(CreateDDL): """ Parameters ---------- table_name : str database : str """ def __init__( self, table_name, database=None ): self.table_name = table_name self.database = database @property def _prefix(self): return 'CREATE TABLE' def _create_line(self): return '{} {}'.format( self._prefix, self.table_name ) @property def pieces(self): yield self._create_line() for piece in filter(None, self._pieces): yield piece def compile(self): return '\n'.join(self.pieces) class CreateTableWithSchema(CreateTable): def __init__( self, table_name, schema, database=None, max_rows=None ): self.table_name = table_name self.database = database self.schema = schema self.max_rows = max_rows @property def with_params(self): return dict(max_rows=self.max_rows) @property def _pieces(self): yield format_schema(self.schema) with_stmt = ','.join([ '{}={}'.format(i, "'{}'".format(v) if isinstance(v, str) else v) for i, v in self.with_params.items() if v is not None ]) if with_stmt: yield ' WITH ({})'.format(with_stmt) class CTAS(CreateTable): """ Create Table As Select """ def __init__(self, table_name, select, database=None): self.table_name = table_name self.database = database self.select = select @property def _prefix(self): return 'CREATE TABLE' @property def _pieces(self): yield 'AS (' yield self.select.compile() yield ')' # VIEW class CreateView(CTAS): """Create a view""" def __init__(self, table_name, select, database=None): super().__init__(table_name, select, database=database) @property def _pieces(self): yield 'AS' yield self.select.compile() @property def _prefix(self): return 'CREATE VIEW' class DropView(DropTable): _object_type = 'VIEW' # USER class AlterUser(MapDDDL): """Create user""" def __init__( self, name, password=None, database=None, is_super=False, insert_access=None ): self.name = name self.password = password self.database = database self.is_super = is_super self.insert_access = insert_access @property def _params(self): if self.password is not None: yield " password='{}'".format(self.password) if self.is_super is not None: yield " is_super='{}'".format( 'true' if self.is_super else 'false' ) if self.insert_access: yield " INSERTACCESS='{}'".format(self.insert_access) @property def pieces(self): yield 'ALTER USER {} ('.format(self.name) yield ','.join(self._params) yield ')' def compile(self): return '\n'.join(self.pieces) class CreateUser(MapDDDL): """Create user""" def __init__(self, name, password, database=None, is_super=False): self.name = name self.password = password self.database = database self.is_super = is_super @property def pieces(self): yield 'CREATE USER {} ('.format(self.name) yield " password='{}',".format(self.password) yield " is_super='{}'".format('true' if self.is_super else 'false') yield ')' def compile(self): return '\n'.join(self.pieces) class DropUser(MapDDDL): """Create user""" def __init__(self, name, database=None): self.name = name self.database = database @property def pieces(self): yield 'DROP USER {}'.format(self.name) def compile(self): return '\n'.join(self.pieces) class AlterTable(MapDDDL): def __init__(self, table, tbl_properties=None): self.table = table self.tbl_properties = tbl_properties def _wrap_command(self, cmd): return 'ALTER TABLE {}'.format(cmd) def _format_properties(self, prefix=''): tokens = [] if self.tbl_properties is not None: # tokens.append(format_tblproperties(self.tbl_properties)) pass if len(tokens) > 0: return '\n{}{}'.format(prefix, '\n'.join(tokens)) else: return '' def compile(self): props = self._format_properties() action = '{} SET {}'.format(self.table, props) return self._wrap_command(action) class RenameTable(AlterTable): def __init__(self, old_name, new_name, old_database=None, new_database=None): # if either database is None, the name is assumed to be fully scoped self.old_name = old_name self.old_database = old_database self.new_name = new_name self.new_database = new_database new_qualified_name = new_name if new_database is not None: new_qualified_name = self._get_scoped_name(new_name, new_database) old_qualified_name = old_name if old_database is not None: old_qualified_name = self._get_scoped_name(old_name, old_database) self.old_qualified_name = old_qualified_name self.new_qualified_name = new_qualified_name def compile(self): cmd = '{} RENAME TO {}'.format(self.old_qualified_name, self.new_qualified_name) return self._wrap_command(cmd) class TruncateTable(MapDDDL): _object_type = 'TABLE' def __init__(self, table_name, database=None): self.table_name = table_name self.database = database def compile(self): name = self._get_scoped_name(self.table_name, self.database) return 'TRUNCATE TABLE {}'.format(name) class CacheTable(MapDDDL): def __init__(self, table_name, database=None, pool='default'): self.table_name = table_name self.database = database self.pool = pool def compile(self): scoped_name = self._get_scoped_name(self.table_name, self.database) return "ALTER TABLE {} SET CACHED IN '{}'" .format( scoped_name, self.pool ) class CreateDatabase(CreateDDL): def __init__(self, name, owner=None): self.name = name self.owner = owner def compile(self): name = quote_identifier(self.name) cmd = 'CREATE DATABASE' properties = '' if self.owner: properties = '(owner=\'{}\')'.format(self.owner) return '{} {} {}'.format(cmd, name, properties) class DropDatabase(DropObject): _object_type = 'DATABASE' def __init__(self, name): super().__init__(must_exist=True) self.name = name def _object_name(self): return self.name def format_schema(schema): elements = [ _format_schema_element(name, t) for name, t in zip(schema.names, schema.types) ] return '({})'.format(',\n '.join(elements)) def _format_schema_element(name, t): return '{} {}'.format( quote_identifier(name, force=False), _type_to_sql_string(t) ) class InsertPandas(MapDDML): def __init__(self, table_name, df, insert_index=False, database=None): self.table_name = table_name self.database = database self.df = df.copy() if insert_index: self.df.reset_index(inplace=True) def _get_field_names(self): return ','.join(self.df.columns) def _get_value(self, v): if isinstance(v, str): return "'{}'".format(v) elif v is None: return 'NULL' else: return '{}'.format(v) def _get_field_values(self): for i, row in self.df[self.df.columns].iterrows(): yield [self._get_value(v) for v in row] @property def pieces(self): cmd = 'INSERT INTO' fields = self._get_field_names() stmt = '{0} {1} ({2}) VALUES '.format( cmd, self.table_name, fields ) for values in self._get_field_values(): yield '{} ({});'.format(stmt, ','.join(values)) def compile(self): return '\n'.join(self.pieces) def _mapd_input_signature(inputs): # TODO: varargs '{}...'.format(val) return ', '.join(map(_type_to_sql_string, inputs))
ibis/mapd/ddl.py
9,981
Create user Create Table As Select Create DDL Parameters ---------- table_name : str database : str Create user Create a view Create user noqa: F401 VIEW USER tokens.append(format_tblproperties(self.tbl_properties)) if either database is None, the name is assumed to be fully scoped TODO: varargs '{}...'.format(val)
318
en
0.386356
# this script finds all the intersecting tiles for a given input AOI, and then downloads corresponding # 0.5 meter AHN3 DSM and DTM tiles from shapely.geometry import Polygon import geopandas as gpd import pandas as pd from tqdm import tqdm from multiprocessing import Pool import urllib.request import zipfile import os import argparse def get_intersecting_tile_names(bounds_csv_path, aoi_path): print("Finding all the intersecting tile names") # all the tile bounds are in EPSG 28992 # reproject the aoi bounds to EPSG 28992 # define aoi bounds aoi_df = gpd.read_file(aoi_path) if aoi_df.crs != 28992: aoi_df = aoi_df.to_crs(epsg=28992) tile_names_list = [] # read csv into dataframe tiles_bounds_df = pd.read_csv(bounds_csv_path) for i in tqdm(range(len(tiles_bounds_df))): tile_name = tiles_bounds_df["tile_name"].iloc[i] tile_left = tiles_bounds_df["left"].iloc[i] tile_right = tiles_bounds_df["right"].iloc[i] tile_bottom = tiles_bounds_df["bottom"].iloc[i] tile_top = tiles_bounds_df["top"].iloc[i] # generate shapely geometry tile_poly = gpd.GeoSeries( [ Polygon( [ (tile_left, tile_bottom), (tile_right, tile_bottom), (tile_right, tile_top), (tile_left, tile_top), ] ) ] ) tile_df = gpd.GeoDataFrame( {"geometry": tile_poly, "df1": [1]}, crs="EPSG:28992" ) if aoi_df.intersects(tile_df).any(): tile_names_list.append(tile_name) print("the intersecting tiles are ", tile_names_list) return tile_names_list def download_data(download_url, out_path): urllib.request.urlretrieve(download_url, out_path) def extract_zip(src_zip_file, out_dir): zip_name = src_zip_file.split("/")[-1].replace(".zip", "") zip_data = zipfile.ZipFile(src_zip_file) zipinfos = zip_data.infolist() # iterate through each file os.chdir(out_dir) for zipinfo in zipinfos: # Rename the zip content zipinfo.filename = "{}.tif".format(zip_name) zip_data.extract(zipinfo) os.remove(os.path.join(os.path.join(os.getcwd(), "{}.zip".format(zip_name)))) return os.path.join(out_dir, "{}.tif".format(zip_name)) def download_and_extract(tile_name, out_dir, download_url): try: out_path = os.path.join(out_dir, "{}.zip".format(tile_name)) download_data(download_url, out_path) tif_path = extract_zip(out_path, out_dir) # return tif_path except Exception as e: print("some error in ", tile_name) print("error ", e) def download_tiles_multiprocess(tile_names_list, out_dir, num_processes): download_task_list = [] dsm_dir = os.path.join(out_dir, "dsm") os.makedirs(dsm_dir, exist_ok=True) dtm_dir = os.path.join(out_dir, "dtm") os.makedirs(dtm_dir, exist_ok=True) for tile_name in tile_names_list: dsm_url = "https://download.pdok.nl/rws/ahn3/v1_0/05m_dsm/R_{}.ZIP".format( tile_name ) dtm_url = "https://download.pdok.nl/rws/ahn3/v1_0/05m_dtm/M_{}.ZIP".format( tile_name ) download_task_list.append([tile_name, dsm_dir, dsm_url]) download_task_list.append([tile_name, dtm_dir, dtm_url]) print("Dowloding {} tiles".format(len(download_task_list))) p = Pool(num_processes) p.starmap(download_and_extract, download_task_list) p.close() p.join() if __name__ == "__main__": parser = argparse.ArgumentParser( description="Download AHN3 DSM and DTM tiles for input AOI" ) parser.add_argument("--aoi", help="aoi geojson/shpefile path string") parser.add_argument( "--out_dir", help="path to out directory where files will be downloaded", type=str, default="downloaded_tiles", ) parser.add_argument( "--num_processes", help="Number of processes to run in parallel, to speed up downloading", type=int, default=10, ) args = parser.parse_args() aoi_path = args.aoi out_dir = args.out_dir num_processes = args.num_processes os.makedirs(out_dir, exist_ok=True) bounds_csv_path = "resources/ahn3_tile_bounds.csv" target_tile_names = get_intersecting_tile_names(bounds_csv_path, aoi_path) download_tiles_multiprocess(target_tile_names, out_dir, num_processes) print("Data downloaded at ", os.path.join(os.getcwd(), out_dir))
download_ahn3_elevation_data.py
4,625
this script finds all the intersecting tiles for a given input AOI, and then downloads corresponding 0.5 meter AHN3 DSM and DTM tiles all the tile bounds are in EPSG 28992 reproject the aoi bounds to EPSG 28992 define aoi bounds read csv into dataframe generate shapely geometry iterate through each file Rename the zip content return tif_path
347
en
0.710399
import boto3 from queuing_hub.conn.base import BasePub, BaseSub class AwsBase(): def __init__(self, profile_name=None): session = boto3.Session(profile_name=profile_name) self._client = session.client('sqs') self._queue_list = self._client.list_queues()['QueueUrls'] class AwsPub(AwsBase, BasePub): def __init__(self, profile_name=None): AwsBase.__init__(self, profile_name=profile_name) BasePub.__init__(self) @property def topic_list(self) -> list: return self._queue_list def push(self, topic: str, body: str) -> dict: response = self._client.send_message( QueueUrl=topic, MessageBody=body ) return response['MessageId'] class AwsSub(AwsBase, BaseSub): ATTRIBUTE_NAMES = [ 'ApproximateNumberOfMessages', # 'ApproximateNumberOfMessagesDelayed', # 'ApproximateNumberOfMessagesNotVisible', # 'DelaySeconds', # 'MessageRetentionPeriod', # 'ReceiveMessageWaitTimeSeconds', # 'VisibilityTimeout' ] def __init__(self, profile_name=None): AwsBase.__init__(self, profile_name=profile_name) BaseSub.__init__(self) @property def sub_list(self) -> list: return self._queue_list def qsize(self, sub_list: list = None) -> dict: response = {'aws': {}} if not sub_list: sub_list = self._queue_list for sub in sub_list: response['aws'][sub] = self._get_message_count(sub) return response def is_empty(self, sub: str) -> bool: return self._get_message_count(sub) == 0 def purge(self, sub: str) -> None: self._client.purge_queue(QueueUrl=sub) def pull(self, sub: str, max_num: int = 1, ack: bool = False) -> list: response = self._client.receive_message( QueueUrl=sub, MaxNumberOfMessages=max_num ) messages = response.get('Messages') if ack and messages: self._ack(sub, messages) return [message.get('Body') for message in messages] def _ack(self, sub: str, messages: list) -> None: receipt_handle_list = \ [message['ReceiptHandle'] for message in messages] for receipt_handle in receipt_handle_list: self._client.delete_message( QueueUrl=sub, ReceiptHandle=receipt_handle ) def _get_message_count(self, sub: str) -> int: attributes = self._get_attributes(sub, self.ATTRIBUTE_NAMES) return int(attributes[self.ATTRIBUTE_NAMES[0]]) def _get_attributes(self, sub: str, attribute_names: str) -> dict: response = self._client.get_queue_attributes( QueueUrl=sub, AttributeNames=attribute_names ) return response['Attributes']
queuing_hub/conn/aws.py
2,872
'ApproximateNumberOfMessagesDelayed', 'ApproximateNumberOfMessagesNotVisible', 'DelaySeconds', 'MessageRetentionPeriod', 'ReceiveMessageWaitTimeSeconds', 'VisibilityTimeout'
173
en
0.114217
from django.test import TestCase from django.urls import reverse from rest_framework.test import APIClient from rest_framework import status from core.models import Recipe, Ingredient RECIPE_URL = reverse('recipe:recipe-list') def recipe_url(id): """Construct URL for a single recipe based on its ID""" return reverse('recipe:recipe-detail', args=[id]) def create_sample_recipe(**params): """Helper function to create a user""" return Recipe.objects.create(**params) class RecipeAPITests(TestCase): def setUp(self): self.client = APIClient() def test_create_recipe_with_ingredients(self): """Test creating a recipe including ingredients""" payload = { 'name': 'Vegan Roast Dinner', 'description': 'Roasted potatoes and mushroom wellington' ' with vegetables and gravy.', 'ingredients': [ {'name': 'carrots'}, {'name': 'potatoes'}, {'name': 'mushrooms'}, ] } response = self.client.post(RECIPE_URL, payload, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual( payload['name'], Recipe.objects.get(id=response.data['id']).name ) self.assertEquals( len(response.data['ingredients']), len(payload['ingredients']) ) def test_get_recipes(self): """Test retrieving a recipe""" create_sample_recipe( name='Roast Dinner', description='Roasted potatoes and chicken' ' with vegetables and gravy.' ) create_sample_recipe( name='Beans on Toast', description='Just the best.' ) response = self.client.get(RECIPE_URL) recipes = Recipe.objects.all().order_by('-name') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data), len(recipes)) def test_get_recipe(self): """Test retrieving a single recipe using name as filter""" test_recipe_name = 'Beans on Toast' create_sample_recipe( name='Roast Dinner', description='Roasted potatoes and chicken' ' with vegetables and gravy.' ) create_sample_recipe( name=test_recipe_name, description='Just the best recipe.' ) response = self.client.get(RECIPE_URL, {'name': test_recipe_name}) recipes = Recipe.objects.all().order_by('-name') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertNotEqual(len(response.data), len(recipes)) self.assertEqual(response.data[0]['name'], test_recipe_name) def test_update_recipe(self): """Test updating a recipe""" self.recipe = create_sample_recipe( name='Roast Dinner', description='Roasted potatoes and chicken' ' with vegetables and gravy.' ) payload = { 'name': 'Vegan Roast Dinner', 'description': 'Roasted potatoes and mushroom wellington' ' with vegetables and gravy.' } response = self.client.patch( recipe_url(self.recipe.id), payload, format='json' ) self.recipe.refresh_from_db() self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(self.recipe.name, response.data['name']) self.assertEqual(self.recipe.description, response.data['description']) def test_delete_recipe(self): """Test deleting a recipe""" self.recipe = create_sample_recipe( name='Carrot Cake', description='Sponge cake with hella carrots.' ) response = self.client.delete( recipe_url(self.recipe.id), format='json' ) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) self.assertFalse(Recipe.objects.all()) def test_get_recipes_with_ingredients(self): """Test retrieving a recipe including ingredients""" self.recipe = create_sample_recipe( name='Carrot Cake', description='Sponge cake with hella carrots.' ) Ingredient.objects.create(name='Carrots', recipe=self.recipe) Ingredient.objects.create(name='Icing Sugar', recipe=self.recipe) response = self.client.get(RECIPE_URL) ingredients = Ingredient.objects.all() self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEquals( len(response.data[0]['ingredients']), len(ingredients) ) def test_update_recipe_ingredients(self): """Test updating a recipe with ingredients included""" self.recipe = create_sample_recipe( name='Roast Dinner', description='Roasted potatoes and chicken' ' with vegetables and gravy.' ) payload = { 'name': 'Vegan Roast Dinner', 'description': 'Roasted potatoes and mushroom wellington' ' with vegetables and gravy.', 'ingredients': [ {'name': 'carrots'}, {'name': 'potatoes'}, {'name': 'mushrooms'}, ] } response = self.client.patch( recipe_url(self.recipe.id), payload, format='json' ) self.recipe.refresh_from_db() ingredients = Ingredient.objects.all() self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(ingredients), len(payload['ingredients'])) self.assertEqual(ingredients[0].recipe.name, payload['name']) def test_delete_recipe_with_ingredients(self): """Test deleting a recipe with ingredients included""" self.recipe = create_sample_recipe( name='Carrot Cake', description='Sponge cake with hella carrots.' ) Ingredient.objects.create(name='Carrots', recipe=self.recipe) Ingredient.objects.create(name='Icing Sugar', recipe=self.recipe) response = self.client.delete( recipe_url(self.recipe.id), format='json' ) ingredients = Ingredient.objects.all() self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) self.assertFalse(Recipe.objects.all()) self.assertFalse(len(ingredients), 0)
app/recipe/tests/test_recipe_api.py
6,633
Helper function to create a user Construct URL for a single recipe based on its ID Test creating a recipe including ingredients Test deleting a recipe Test deleting a recipe with ingredients included Test retrieving a single recipe using name as filter Test retrieving a recipe Test retrieving a recipe including ingredients Test updating a recipe Test updating a recipe with ingredients included
396
en
0.908645
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pyignite import Client from pyignite.datatypes.cache_config import CacheMode from pyignite.datatypes.prop_codes import * from pyignite.exceptions import SocketError nodes = [ ('127.0.0.1', 10800), ('127.0.0.1', 10801), ('127.0.0.1', 10802), ] client = Client(timeout=4.0) client.connect(nodes) print('Connected') my_cache = client.get_or_create_cache({ PROP_NAME: 'my_cache', PROP_CACHE_MODE: CacheMode.PARTITIONED, PROP_BACKUPS_NUMBER: 2, }) my_cache.put('test_key', 0) test_value = 0 # abstract main loop while True: try: # do the work test_value = my_cache.get('test_key') or 0 my_cache.put('test_key', test_value + 1) except (OSError, SocketError) as e: # recover from error (repeat last command, check data # consistency or just continue − depends on the task) print('Error: {}'.format(e)) print('Last value: {}'.format(test_value)) print('Reconnecting') # Connected # Error: Connection broken. # Last value: 2650 # Reconnecting # Error: Connection broken. # Last value: 10204 # Reconnecting # Error: Connection broken. # Last value: 18932 # Reconnecting # Traceback (most recent call last): # ... # pyignite.exceptions.ReconnectError: Can not reconnect: out of nodes.
examples/failover.py
2,068
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. abstract main loop do the work recover from error (repeat last command, check data consistency or just continue − depends on the task) Connected Error: Connection broken. Last value: 2650 Reconnecting Error: Connection broken. Last value: 10204 Reconnecting Error: Connection broken. Last value: 18932 Reconnecting Traceback (most recent call last): ... pyignite.exceptions.ReconnectError: Can not reconnect: out of nodes.
1,178
en
0.816688
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables from . import outputs __all__ = [ 'GetApplicationGatewayResult', 'AwaitableGetApplicationGatewayResult', 'get_application_gateway', ] @pulumi.output_type class GetApplicationGatewayResult: """ Application gateway resource. """ def __init__(__self__, authentication_certificates=None, autoscale_configuration=None, backend_address_pools=None, backend_http_settings_collection=None, custom_error_configurations=None, enable_fips=None, enable_http2=None, etag=None, firewall_policy=None, force_firewall_policy_association=None, frontend_ip_configurations=None, frontend_ports=None, gateway_ip_configurations=None, http_listeners=None, id=None, identity=None, location=None, name=None, operational_state=None, private_endpoint_connections=None, private_link_configurations=None, probes=None, provisioning_state=None, redirect_configurations=None, request_routing_rules=None, resource_guid=None, rewrite_rule_sets=None, sku=None, ssl_certificates=None, ssl_policy=None, ssl_profiles=None, tags=None, trusted_client_certificates=None, trusted_root_certificates=None, type=None, url_path_maps=None, web_application_firewall_configuration=None, zones=None): if authentication_certificates and not isinstance(authentication_certificates, list): raise TypeError("Expected argument 'authentication_certificates' to be a list") pulumi.set(__self__, "authentication_certificates", authentication_certificates) if autoscale_configuration and not isinstance(autoscale_configuration, dict): raise TypeError("Expected argument 'autoscale_configuration' to be a dict") pulumi.set(__self__, "autoscale_configuration", autoscale_configuration) if backend_address_pools and not isinstance(backend_address_pools, list): raise TypeError("Expected argument 'backend_address_pools' to be a list") pulumi.set(__self__, "backend_address_pools", backend_address_pools) if backend_http_settings_collection and not isinstance(backend_http_settings_collection, list): raise TypeError("Expected argument 'backend_http_settings_collection' to be a list") pulumi.set(__self__, "backend_http_settings_collection", backend_http_settings_collection) if custom_error_configurations and not isinstance(custom_error_configurations, list): raise TypeError("Expected argument 'custom_error_configurations' to be a list") pulumi.set(__self__, "custom_error_configurations", custom_error_configurations) if enable_fips and not isinstance(enable_fips, bool): raise TypeError("Expected argument 'enable_fips' to be a bool") pulumi.set(__self__, "enable_fips", enable_fips) if enable_http2 and not isinstance(enable_http2, bool): raise TypeError("Expected argument 'enable_http2' to be a bool") pulumi.set(__self__, "enable_http2", enable_http2) if etag and not isinstance(etag, str): raise TypeError("Expected argument 'etag' to be a str") pulumi.set(__self__, "etag", etag) if firewall_policy and not isinstance(firewall_policy, dict): raise TypeError("Expected argument 'firewall_policy' to be a dict") pulumi.set(__self__, "firewall_policy", firewall_policy) if force_firewall_policy_association and not isinstance(force_firewall_policy_association, bool): raise TypeError("Expected argument 'force_firewall_policy_association' to be a bool") pulumi.set(__self__, "force_firewall_policy_association", force_firewall_policy_association) if frontend_ip_configurations and not isinstance(frontend_ip_configurations, list): raise TypeError("Expected argument 'frontend_ip_configurations' to be a list") pulumi.set(__self__, "frontend_ip_configurations", frontend_ip_configurations) if frontend_ports and not isinstance(frontend_ports, list): raise TypeError("Expected argument 'frontend_ports' to be a list") pulumi.set(__self__, "frontend_ports", frontend_ports) if gateway_ip_configurations and not isinstance(gateway_ip_configurations, list): raise TypeError("Expected argument 'gateway_ip_configurations' to be a list") pulumi.set(__self__, "gateway_ip_configurations", gateway_ip_configurations) if http_listeners and not isinstance(http_listeners, list): raise TypeError("Expected argument 'http_listeners' to be a list") pulumi.set(__self__, "http_listeners", http_listeners) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if identity and not isinstance(identity, dict): raise TypeError("Expected argument 'identity' to be a dict") pulumi.set(__self__, "identity", identity) if location and not isinstance(location, str): raise TypeError("Expected argument 'location' to be a str") pulumi.set(__self__, "location", location) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if operational_state and not isinstance(operational_state, str): raise TypeError("Expected argument 'operational_state' to be a str") pulumi.set(__self__, "operational_state", operational_state) if private_endpoint_connections and not isinstance(private_endpoint_connections, list): raise TypeError("Expected argument 'private_endpoint_connections' to be a list") pulumi.set(__self__, "private_endpoint_connections", private_endpoint_connections) if private_link_configurations and not isinstance(private_link_configurations, list): raise TypeError("Expected argument 'private_link_configurations' to be a list") pulumi.set(__self__, "private_link_configurations", private_link_configurations) if probes and not isinstance(probes, list): raise TypeError("Expected argument 'probes' to be a list") pulumi.set(__self__, "probes", probes) if provisioning_state and not isinstance(provisioning_state, str): raise TypeError("Expected argument 'provisioning_state' to be a str") pulumi.set(__self__, "provisioning_state", provisioning_state) if redirect_configurations and not isinstance(redirect_configurations, list): raise TypeError("Expected argument 'redirect_configurations' to be a list") pulumi.set(__self__, "redirect_configurations", redirect_configurations) if request_routing_rules and not isinstance(request_routing_rules, list): raise TypeError("Expected argument 'request_routing_rules' to be a list") pulumi.set(__self__, "request_routing_rules", request_routing_rules) if resource_guid and not isinstance(resource_guid, str): raise TypeError("Expected argument 'resource_guid' to be a str") pulumi.set(__self__, "resource_guid", resource_guid) if rewrite_rule_sets and not isinstance(rewrite_rule_sets, list): raise TypeError("Expected argument 'rewrite_rule_sets' to be a list") pulumi.set(__self__, "rewrite_rule_sets", rewrite_rule_sets) if sku and not isinstance(sku, dict): raise TypeError("Expected argument 'sku' to be a dict") pulumi.set(__self__, "sku", sku) if ssl_certificates and not isinstance(ssl_certificates, list): raise TypeError("Expected argument 'ssl_certificates' to be a list") pulumi.set(__self__, "ssl_certificates", ssl_certificates) if ssl_policy and not isinstance(ssl_policy, dict): raise TypeError("Expected argument 'ssl_policy' to be a dict") pulumi.set(__self__, "ssl_policy", ssl_policy) if ssl_profiles and not isinstance(ssl_profiles, list): raise TypeError("Expected argument 'ssl_profiles' to be a list") pulumi.set(__self__, "ssl_profiles", ssl_profiles) if tags and not isinstance(tags, dict): raise TypeError("Expected argument 'tags' to be a dict") pulumi.set(__self__, "tags", tags) if trusted_client_certificates and not isinstance(trusted_client_certificates, list): raise TypeError("Expected argument 'trusted_client_certificates' to be a list") pulumi.set(__self__, "trusted_client_certificates", trusted_client_certificates) if trusted_root_certificates and not isinstance(trusted_root_certificates, list): raise TypeError("Expected argument 'trusted_root_certificates' to be a list") pulumi.set(__self__, "trusted_root_certificates", trusted_root_certificates) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) if url_path_maps and not isinstance(url_path_maps, list): raise TypeError("Expected argument 'url_path_maps' to be a list") pulumi.set(__self__, "url_path_maps", url_path_maps) if web_application_firewall_configuration and not isinstance(web_application_firewall_configuration, dict): raise TypeError("Expected argument 'web_application_firewall_configuration' to be a dict") pulumi.set(__self__, "web_application_firewall_configuration", web_application_firewall_configuration) if zones and not isinstance(zones, list): raise TypeError("Expected argument 'zones' to be a list") pulumi.set(__self__, "zones", zones) @property @pulumi.getter(name="authenticationCertificates") def authentication_certificates(self) -> Optional[Sequence['outputs.ApplicationGatewayAuthenticationCertificateResponse']]: """ Authentication certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). """ return pulumi.get(self, "authentication_certificates") @property @pulumi.getter(name="autoscaleConfiguration") def autoscale_configuration(self) -> Optional['outputs.ApplicationGatewayAutoscaleConfigurationResponse']: """ Autoscale Configuration. """ return pulumi.get(self, "autoscale_configuration") @property @pulumi.getter(name="backendAddressPools") def backend_address_pools(self) -> Optional[Sequence['outputs.ApplicationGatewayBackendAddressPoolResponse']]: """ Backend address pool of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). """ return pulumi.get(self, "backend_address_pools") @property @pulumi.getter(name="backendHttpSettingsCollection") def backend_http_settings_collection(self) -> Optional[Sequence['outputs.ApplicationGatewayBackendHttpSettingsResponse']]: """ Backend http settings of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). """ return pulumi.get(self, "backend_http_settings_collection") @property @pulumi.getter(name="customErrorConfigurations") def custom_error_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayCustomErrorResponse']]: """ Custom error configurations of the application gateway resource. """ return pulumi.get(self, "custom_error_configurations") @property @pulumi.getter(name="enableFips") def enable_fips(self) -> Optional[bool]: """ Whether FIPS is enabled on the application gateway resource. """ return pulumi.get(self, "enable_fips") @property @pulumi.getter(name="enableHttp2") def enable_http2(self) -> Optional[bool]: """ Whether HTTP2 is enabled on the application gateway resource. """ return pulumi.get(self, "enable_http2") @property @pulumi.getter def etag(self) -> str: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter(name="firewallPolicy") def firewall_policy(self) -> Optional['outputs.SubResourceResponse']: """ Reference to the FirewallPolicy resource. """ return pulumi.get(self, "firewall_policy") @property @pulumi.getter(name="forceFirewallPolicyAssociation") def force_firewall_policy_association(self) -> Optional[bool]: """ If true, associates a firewall policy with an application gateway regardless whether the policy differs from the WAF Config. """ return pulumi.get(self, "force_firewall_policy_association") @property @pulumi.getter(name="frontendIPConfigurations") def frontend_ip_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayFrontendIPConfigurationResponse']]: """ Frontend IP addresses of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). """ return pulumi.get(self, "frontend_ip_configurations") @property @pulumi.getter(name="frontendPorts") def frontend_ports(self) -> Optional[Sequence['outputs.ApplicationGatewayFrontendPortResponse']]: """ Frontend ports of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). """ return pulumi.get(self, "frontend_ports") @property @pulumi.getter(name="gatewayIPConfigurations") def gateway_ip_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayIPConfigurationResponse']]: """ Subnets of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). """ return pulumi.get(self, "gateway_ip_configurations") @property @pulumi.getter(name="httpListeners") def http_listeners(self) -> Optional[Sequence['outputs.ApplicationGatewayHttpListenerResponse']]: """ Http listeners of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). """ return pulumi.get(self, "http_listeners") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def identity(self) -> Optional['outputs.ManagedServiceIdentityResponse']: """ The identity of the application gateway, if configured. """ return pulumi.get(self, "identity") @property @pulumi.getter def location(self) -> Optional[str]: """ Resource location. """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> str: """ Resource name. """ return pulumi.get(self, "name") @property @pulumi.getter(name="operationalState") def operational_state(self) -> str: """ Operational state of the application gateway resource. """ return pulumi.get(self, "operational_state") @property @pulumi.getter(name="privateEndpointConnections") def private_endpoint_connections(self) -> Sequence['outputs.ApplicationGatewayPrivateEndpointConnectionResponse']: """ Private Endpoint connections on application gateway. """ return pulumi.get(self, "private_endpoint_connections") @property @pulumi.getter(name="privateLinkConfigurations") def private_link_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayPrivateLinkConfigurationResponse']]: """ PrivateLink configurations on application gateway. """ return pulumi.get(self, "private_link_configurations") @property @pulumi.getter def probes(self) -> Optional[Sequence['outputs.ApplicationGatewayProbeResponse']]: """ Probes of the application gateway resource. """ return pulumi.get(self, "probes") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: """ The provisioning state of the application gateway resource. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="redirectConfigurations") def redirect_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayRedirectConfigurationResponse']]: """ Redirect configurations of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). """ return pulumi.get(self, "redirect_configurations") @property @pulumi.getter(name="requestRoutingRules") def request_routing_rules(self) -> Optional[Sequence['outputs.ApplicationGatewayRequestRoutingRuleResponse']]: """ Request routing rules of the application gateway resource. """ return pulumi.get(self, "request_routing_rules") @property @pulumi.getter(name="resourceGuid") def resource_guid(self) -> str: """ The resource GUID property of the application gateway resource. """ return pulumi.get(self, "resource_guid") @property @pulumi.getter(name="rewriteRuleSets") def rewrite_rule_sets(self) -> Optional[Sequence['outputs.ApplicationGatewayRewriteRuleSetResponse']]: """ Rewrite rules for the application gateway resource. """ return pulumi.get(self, "rewrite_rule_sets") @property @pulumi.getter def sku(self) -> Optional['outputs.ApplicationGatewaySkuResponse']: """ SKU of the application gateway resource. """ return pulumi.get(self, "sku") @property @pulumi.getter(name="sslCertificates") def ssl_certificates(self) -> Optional[Sequence['outputs.ApplicationGatewaySslCertificateResponse']]: """ SSL certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). """ return pulumi.get(self, "ssl_certificates") @property @pulumi.getter(name="sslPolicy") def ssl_policy(self) -> Optional['outputs.ApplicationGatewaySslPolicyResponse']: """ SSL policy of the application gateway resource. """ return pulumi.get(self, "ssl_policy") @property @pulumi.getter(name="sslProfiles") def ssl_profiles(self) -> Optional[Sequence['outputs.ApplicationGatewaySslProfileResponse']]: """ SSL profiles of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). """ return pulumi.get(self, "ssl_profiles") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: """ Resource tags. """ return pulumi.get(self, "tags") @property @pulumi.getter(name="trustedClientCertificates") def trusted_client_certificates(self) -> Optional[Sequence['outputs.ApplicationGatewayTrustedClientCertificateResponse']]: """ Trusted client certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). """ return pulumi.get(self, "trusted_client_certificates") @property @pulumi.getter(name="trustedRootCertificates") def trusted_root_certificates(self) -> Optional[Sequence['outputs.ApplicationGatewayTrustedRootCertificateResponse']]: """ Trusted Root certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). """ return pulumi.get(self, "trusted_root_certificates") @property @pulumi.getter def type(self) -> str: """ Resource type. """ return pulumi.get(self, "type") @property @pulumi.getter(name="urlPathMaps") def url_path_maps(self) -> Optional[Sequence['outputs.ApplicationGatewayUrlPathMapResponse']]: """ URL path map of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). """ return pulumi.get(self, "url_path_maps") @property @pulumi.getter(name="webApplicationFirewallConfiguration") def web_application_firewall_configuration(self) -> Optional['outputs.ApplicationGatewayWebApplicationFirewallConfigurationResponse']: """ Web application firewall configuration. """ return pulumi.get(self, "web_application_firewall_configuration") @property @pulumi.getter def zones(self) -> Optional[Sequence[str]]: """ A list of availability zones denoting where the resource needs to come from. """ return pulumi.get(self, "zones") class AwaitableGetApplicationGatewayResult(GetApplicationGatewayResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetApplicationGatewayResult( authentication_certificates=self.authentication_certificates, autoscale_configuration=self.autoscale_configuration, backend_address_pools=self.backend_address_pools, backend_http_settings_collection=self.backend_http_settings_collection, custom_error_configurations=self.custom_error_configurations, enable_fips=self.enable_fips, enable_http2=self.enable_http2, etag=self.etag, firewall_policy=self.firewall_policy, force_firewall_policy_association=self.force_firewall_policy_association, frontend_ip_configurations=self.frontend_ip_configurations, frontend_ports=self.frontend_ports, gateway_ip_configurations=self.gateway_ip_configurations, http_listeners=self.http_listeners, id=self.id, identity=self.identity, location=self.location, name=self.name, operational_state=self.operational_state, private_endpoint_connections=self.private_endpoint_connections, private_link_configurations=self.private_link_configurations, probes=self.probes, provisioning_state=self.provisioning_state, redirect_configurations=self.redirect_configurations, request_routing_rules=self.request_routing_rules, resource_guid=self.resource_guid, rewrite_rule_sets=self.rewrite_rule_sets, sku=self.sku, ssl_certificates=self.ssl_certificates, ssl_policy=self.ssl_policy, ssl_profiles=self.ssl_profiles, tags=self.tags, trusted_client_certificates=self.trusted_client_certificates, trusted_root_certificates=self.trusted_root_certificates, type=self.type, url_path_maps=self.url_path_maps, web_application_firewall_configuration=self.web_application_firewall_configuration, zones=self.zones) def get_application_gateway(application_gateway_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApplicationGatewayResult: """ Application gateway resource. :param str application_gateway_name: The name of the application gateway. :param str resource_group_name: The name of the resource group. """ __args__ = dict() __args__['applicationGatewayName'] = application_gateway_name __args__['resourceGroupName'] = resource_group_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20200701:getApplicationGateway', __args__, opts=opts, typ=GetApplicationGatewayResult).value return AwaitableGetApplicationGatewayResult( authentication_certificates=__ret__.authentication_certificates, autoscale_configuration=__ret__.autoscale_configuration, backend_address_pools=__ret__.backend_address_pools, backend_http_settings_collection=__ret__.backend_http_settings_collection, custom_error_configurations=__ret__.custom_error_configurations, enable_fips=__ret__.enable_fips, enable_http2=__ret__.enable_http2, etag=__ret__.etag, firewall_policy=__ret__.firewall_policy, force_firewall_policy_association=__ret__.force_firewall_policy_association, frontend_ip_configurations=__ret__.frontend_ip_configurations, frontend_ports=__ret__.frontend_ports, gateway_ip_configurations=__ret__.gateway_ip_configurations, http_listeners=__ret__.http_listeners, id=__ret__.id, identity=__ret__.identity, location=__ret__.location, name=__ret__.name, operational_state=__ret__.operational_state, private_endpoint_connections=__ret__.private_endpoint_connections, private_link_configurations=__ret__.private_link_configurations, probes=__ret__.probes, provisioning_state=__ret__.provisioning_state, redirect_configurations=__ret__.redirect_configurations, request_routing_rules=__ret__.request_routing_rules, resource_guid=__ret__.resource_guid, rewrite_rule_sets=__ret__.rewrite_rule_sets, sku=__ret__.sku, ssl_certificates=__ret__.ssl_certificates, ssl_policy=__ret__.ssl_policy, ssl_profiles=__ret__.ssl_profiles, tags=__ret__.tags, trusted_client_certificates=__ret__.trusted_client_certificates, trusted_root_certificates=__ret__.trusted_root_certificates, type=__ret__.type, url_path_maps=__ret__.url_path_maps, web_application_firewall_configuration=__ret__.web_application_firewall_configuration, zones=__ret__.zones)
sdk/python/pulumi_azure_nextgen/network/v20200701/get_application_gateway.py
27,598
Application gateway resource. Authentication certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). Autoscale Configuration. Backend address pool of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). Backend http settings of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). Custom error configurations of the application gateway resource. Whether FIPS is enabled on the application gateway resource. Whether HTTP2 is enabled on the application gateway resource. A unique read-only string that changes whenever the resource is updated. Reference to the FirewallPolicy resource. If true, associates a firewall policy with an application gateway regardless whether the policy differs from the WAF Config. Frontend IP addresses of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). Frontend ports of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). Subnets of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). Application gateway resource. :param str application_gateway_name: The name of the application gateway. :param str resource_group_name: The name of the resource group. Http listeners of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). Resource ID. The identity of the application gateway, if configured. Resource location. Resource name. Operational state of the application gateway resource. Private Endpoint connections on application gateway. PrivateLink configurations on application gateway. Probes of the application gateway resource. The provisioning state of the application gateway resource. Redirect configurations of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). Request routing rules of the application gateway resource. The resource GUID property of the application gateway resource. Rewrite rules for the application gateway resource. SKU of the application gateway resource. SSL certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). SSL policy of the application gateway resource. SSL profiles of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). Resource tags. Trusted client certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). Trusted Root certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). Resource type. URL path map of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits). Web application firewall configuration. A list of availability zones denoting where the resource needs to come from. coding=utf-8 *** WARNING: this file was generated by the Pulumi SDK Generator. *** *** Do not edit by hand unless you're certain you know what you are doing! *** pylint: disable=using-constant-test
4,299
en
0.72592
from __future__ import (absolute_import, division,print_function, unicode_literals) from builtins import * import numpy as np import cv2 import SimpleITK as sitk from builtins import * from scipy.spatial import distance import sys import time ############### FUNCTIONS ########################## def imcomplement(im): if np.max(im)>1: imout=255-im else: imout=1-im return imout def mat2gray(img): max_img=np.max(img) min_img=np.min(img) imgout=(img-min_img)/(max_img-min_img) return imgout def im2double(img): imgout=img.astype('float32') imgout= mat2gray(imgout) return imgout def imreconstruct(marker,mask): markeritk=sitk.GetImageFromArray(marker) maskitk=sitk.GetImageFromArray(mask) recfilt=sitk.ReconstructionByDilationImageFilter() rectoutitk=recfilt.Execute(markeritk,maskitk) rectout=sitk.GetArrayFromImage(rectoutitk) return rectout def eigen_cov(x,y): mx=np.mean(x) my=np.mean(y) x=x-mx y=y-my cxx=np.var(x) cxy=0 cyy=np.var(y); nx=len(x) for ct in range(nx): cxy=cxy+x[ct]*y[ct]; cxy=cxy/nx; C=np.zeros((2,2)) C[0,0]=cxx C[0,1]=cxy C[1,0]=cxy C[1,1]=cyy D,V=np.linalg.eig(C) return V,D def improfile(img,x,y,n): xm=x[0] x0=x[1] ym=y[0] y0=y[1] a = np.arctan((y0 - ym) / (x0 - xm)) i=range(0,100,int(100/n)) cx=np.squeeze(np.zeros((1,len(i)))) cy=np.squeeze(np.zeros((1,len(i)))) c=np.squeeze(np.zeros((1,len(i)))) ct=0 for t in range(0,100,int(100/30)): tf=t/100.0 cx[ct] = int(xm + (x0 - xm)*tf) cy[ct] = int(ym + (y0 - ym)*tf) c[ct]=img[int(cy[ct]), int(cx[ct])] ct=ct+1 return c,cx,cy def filter_result3(img,bw_result,ths,thm): bw_result_orig=np.copy(bw_result); points=np.where(bw_result>0) points=np.reshape(points,np.shape(points)) points=np.transpose(points) npoints=np.shape(points)[0] k=20 step=5 hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV) sat=hsv[:,:,1]/255 bw_result_filter=np.zeros(np.shape(bw_result)) xc=points[:,1] yc=points[:,0] for ct in range(0,npoints,step): #print(ct/npoints) ystart=max(0,yc[ct]-k); xstart=max(0,xc[ct]-k); yend=min(np.shape(img)[0],yc[ct]+k); xend=min(np.shape(img)[1],xc[ct]+k); p=points[ct,:] p=np.reshape(p,(1,2)) Dpoints=distance.cdist(p,points) Dpoints=np.squeeze(Dpoints) ipoints=np.squeeze(np.where(Dpoints<40)) xneigh=points[ipoints,1]; yneigh=points[ipoints,0]; V,D=eigen_cov(xneigh,yneigh) vmin=V[:,0]; if D[1]<D[0]: vmin=V[:,1]; x1=xc[ct]-k*vmin[0]; y1=yc[ct]-k*vmin[1]; x2=xc[ct]+k*vmin[0]; y2=yc[ct]+k*vmin[1]; p,px,py=improfile(sat,np.array([x1,x2]),np.array([y1,y2]),30); s=np.abs(np.mean(p[0:5])-np.mean(p[len(p)-5:len(p)])); s=round(s*100); m=np.max([p[0:5],p[len(p)-5:len(p)]]); if(s<ths and m<thm): bw_result_filter[ystart:yend,xstart:xend]=bw_result_orig[ystart:yend,xstart:xend]; return bw_result_filter def min_openings(im,LEN,DEG_NUM): imo=[]; for i in range(DEG_NUM): #DEG=(i)*((360/DEG_NUM)/2) filtername=str(i+1)+'se.txt' se=np.loadtxt('filters/images/filters/'+filtername) if(i==0): se=np.reshape(se,(1,len(se))) if(i==6): se=np.reshape(se,(len(se),1)) se=se.astype('uint8') imoi=cv2.erode(im,se) imoi=cv2.dilate(imoi,se) imo.append(imoi) imB=imo[0] for i in range(DEG_NUM-1): k=i+1 imB=np.minimum(imB,imo[k]) return imB def smooth_cross_section(imV,LEN_diff,DEG_NUM): imV_c=imcomplement(imV) imd=[] for i in range(12): k=i+1 se1=np.loadtxt('filters/images/filters/'+str(k)+'linekernel1.txt') se2=np.loadtxt('filters/images/filters/'+str(k)+'linekernel2.txt') if(i==0): se1=np.reshape(se1,(1,len(se1))) se2=np.reshape(se2,(len(se2),1)) if(i==6): se1=np.reshape(se1,(len(se1),1)) se2=np.reshape(se2,(1,len(se2))) temp=cv2.filter2D(imV_c.astype('float32'),-1,se1) imdi=cv2.filter2D(temp,-1,se2) imdi[imdi<0]=0 imd.append(imdi) imDiff=imd[0] for i in range(11): k=i+1 imDiff=np.maximum(imDiff,imd[k]) imDiff=mat2gray(imDiff) return imDiff def reconstruction_by_dilation(im,LEN,DEG_NUM): imo=[]; for i in range(DEG_NUM): #DEG=(i)*((360/DEG_NUM)/2) filtername=str(i+1)+'se.txt' se=np.loadtxt('filters/images/filters/'+filtername) if(i==0): se=np.reshape(se,(1,len(se))) if(i==6): se=np.reshape(se,(len(se),1)) se=se.astype('uint8') imoi=cv2.erode(im,se) imoi=cv2.dilate(imoi,se) imo.append(imoi) imC=imo[0] for i in range(DEG_NUM-1): k=i+1 imC=np.maximum(imC,imo[k]) imC2=imreconstruct(imC,im) imC2=mat2gray(imC2) return imC2 def reconstruction_by_erosion(im,LEN,DEG_NUM): im_close=[]; for i in range(DEG_NUM): #DEG=(i)*((360/DEG_NUM)/2) filtername=str(i+1)+'se.txt' se=np.loadtxt('filters/images/filters/'+filtername) if(i==0): se=np.reshape(se,(1,len(se))) if(i==6): se=np.reshape(se,(len(se),1)) se=se.astype('uint8') im_closei=cv2.dilate(im,se) im_closei=cv2.erode(im_closei,se) im_close.append(im_closei); imTemp39=im_close[0] for i in range(DEG_NUM-1): k=i+1 imTemp39=np.minimum(imTemp39,im_close[k]) marker=imcomplement(imTemp39) mask=imcomplement(im) imF=imreconstruct(marker,mask) imF=mat2gray(imF) imF=imcomplement(imF) return imF ############ MAIN ############## if len(sys.argv)<2: print('missing input file') sys.exit(-1) if len(sys.argv)==4: img_file_out=sys.argv[2] img_file_out_bin=sys.argv[3] else: img_file_out='output.png' img_file_out_bin='output.png' img_file=sys.argv[1] print('processing '+img_file) imgorig=cv2.imread(img_file) start_time = time.time() size_orig=np.shape(imgorig) print(size_orig) ## resize if the original size is different from dataset images ## so we can keep the same parameters for the filters scale=2 rows_dataset=int(2448/scale) cols_dataset=int(3264/scale) img_blur = cv2.bilateralFilter(cv2.resize(imgorig,(cols_dataset,rows_dataset)) ,int(51/scale),int(201),int(201/scale)) img_blur=cv2.resize(img_blur,(size_orig[1],size_orig[0])) ## print("bilateral filter --- %s seconds ---" % (time.time() - start_time)) img=cv2.resize(img_blur,(653,490)) hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV) im=hsv[:,:,2] bw_mask=np.zeros(np.shape(im)) bw_mask_offr=round(np.shape(im)[0]/20) bw_mask_offc=round(np.shape(im)[1]/20) bw_mask[bw_mask_offr:np.shape(im)[0]-bw_mask_offr, bw_mask_offc:np.shape(im)[1]-bw_mask_offc]=1; im=mat2gray(im)*mat2gray(bw_mask) im=imcomplement(im) im=im2double(im) DEG_NUM=12; LEN_c=11; LEN_o=11; LEN_diff=7; ic1=reconstruction_by_dilation(im,LEN_c,DEG_NUM) io1=min_openings(im,LEN_o,DEG_NUM) iv=mat2gray(ic1-io1) imDiff=smooth_cross_section(iv,LEN_diff,LEN_c) imL=reconstruction_by_dilation(imDiff,LEN_c,DEG_NUM) imF=reconstruction_by_erosion(imL,LEN_c,DEG_NUM) TH_LOW=0.12; TH_HIGH=0.2; min_obj=20; min_hole=10; mask=np.zeros(np.shape(imF)) marker=np.zeros(np.shape(imF)) mask[imF>TH_LOW]=1 marker[imF>TH_HIGH]=1 bw_result=imreconstruct(marker,mask) print("bw result --- %s seconds ---" % (time.time() - start_time)) bw_result=filter_result3(img,bw_result,4,0.2) print("filter result --- %s seconds ---" % (time.time() - start_time)) bw_result=cv2.resize(bw_result,(size_orig[1],size_orig[0])) imgr=imgorig[:,:,2]; imgr[bw_result>0]=255; imgorig[:,:,2]=imgr; print('saving output file: '+img_file_out) cv2.imwrite(img_file_out,imgorig) cv2.imwrite(img_file_out_bin,bw_result*255) print('done ')
scripts/image/crack_detection_fast.py
8,422
FUNCTIONS print(ct/npoints)DEG=(i)*((360/DEG_NUM)/2)DEG=(i)*((360/DEG_NUM)/2)DEG=(i)*((360/DEG_NUM)/2) MAIN resize if the original size is different from dataset images so we can keep the same parameters for the filters
221
en
0.627533
#%% First import numpy as np import json import os import pandas as pd import requests from contextlib import closing import time from datetime import datetime from requests.models import HTTPBasicAuth import seaborn as sns from matplotlib import pyplot as plt from requests import get from requests_futures.sessions import FuturesSession from bs4 import BeautifulSoup from dotenv import load_dotenv, dotenv_values from requests_oauthlib import OAuth2, OAuth2Session #%% abspath = os.path.abspath(__file__) dname = os.path.dirname(abspath) os.chdir(dname) env_vars = dotenv_values('config.env') client_id = env_vars['id'] client_secret = env_vars['secret'] code = env_vars['code'] callback_uri = "http://localhost:8080" authorize_url = "https://www.warcraftlogs.com/oauth/authorize" token_url = "https://www.warcraftlogs.com/oauth/token" # warcraftlogs = OAuth2Session(client_id, redirect_uri=callback_uri) # authorization_url, state = warcraftlogs.authorization_url(authorize_url, # access_type="offline") # token = warcraftlogs.fetch_token(token_url = token_url, # auth = HTTPBasicAuth(client_id, client_secret), # code = code) # access_token = token['access_token'] # refresh_token = token['refresh_token'] # with open('refresh_token.env', 'w') as f: # f.write('refresh_token = '+str(refresh_token)+'\nacces_token = '+str(access_token)) if os.path.isfile('refresh_token.env'): env_vars = dotenv_values('refresh_token.env') refresh_token = env_vars['refresh_token'] access_token = env_vars['access_token'] else: raise 'Get your fresh token dumby' # print(refresh_token) try: warcraftlogs = OAuth2Session(client_id = client_id) graphql_endpoint = "https://www.warcraftlogs.com/api/v2/client" headers = {"Authorization": f"Bearer {access_token}"} query = """{ reportData{ reports(guildID: 95321, endTime: 1622872800000.0, startTime: 1605855600000.0){ data{ fights(difficulty: 5){ name averageItemLevel # friendlyPlayers id } } } } }""" r = requests.post(graphql_endpoint, json={"query": query}, headers=headers) except: token = warcraftlogs.refresh_token(token_url = token_url, auth = HTTPBasicAuth(client_id, client_secret), refresh_token = refresh_token) access_token = token['access_token'] refresh_token = token['refresh_token'] with open('refresh_token.env', 'w') as f: f.write('refresh_token = '+str(refresh_token)+'\naccess_token = '+str(access_token)) warcraftlogs = OAuth2Session(client_id = client_id) graphql_endpoint = "https://www.warcraftlogs.com/api/v2/client" headers = {"Authorization": f"Bearer {access_token}"} query = """{ reportData{ reports(guildID: 95321, endTime: 1622872800000.0, startTime: 1605855600000.0){ data{ fights(difficulty: 5){ name averageItemLevel # friendlyPlayers id } } } } }""" r = requests.post(graphql_endpoint, json={"query": query}, headers=headers) with open('..//get_guild_list/guild_list_hungering.json', encoding='utf-8') as f: guilds = json.load(f) #%% def is_good_response_json(resp): """ Returns True if the response seems to be HTML, False otherwise. """ content_type = resp.headers['Content-Type'].lower() return (resp.status_code == 200 and content_type is not None and content_type.find('json') > -1) def get_guild_id(guild): try: guild_id = int(guild['id']) except: query = """ { guildData{ guild(name: "%s", serverSlug: "%s", serverRegion: "%s"){ id } } } """ % (guild['name'], guild['realm'].replace(' ', '-'), guild['region']) r = requests.post(graphql_endpoint, json={"query": query}, headers=headers) guild_id = r.json()['data']['guildData']['guild']['id'] return guild_id def get_log_list(guild): guild['id'] = get_guild_id(guild) query = ("{" f"reportData{{" f" reports(guildID: {guild['id']}, zoneID: 26){{" f" data{{" f" code" f" startTime" f" endTime" f" }}" f" }}" f"}}" f"}}") r = requests.post(graphql_endpoint, json={"query": query}, headers=headers) log_list = r.json()['data']['reportData']['reports']['data'] return log_list def get_log_list_apiv1(guild): with open('..//..//Warcraftlogs//api_key.txt.') as f: api_key = f.readlines()[0] link = "https://www.warcraftlogs.com:443/v1/reports/guild/" + \ guild['name'] + "/" + guild['realm'].replace(' ', '-').replace("'","")+ "/" + \ guild['region'] + "?api_key=" + api_key guild_logs = requests.get(link) log_list = guild_logs.json() log_list_new = [] for item in log_list: if item['zone'] == 26: log_list_new.append({'code': item['id'], 'startTime': item['start'], 'endTime': item['end']}) return log_list_new def get_pulls(log, guild): log_id = log['code'] query = """ { reportData{ report(code: "%s"){ fights(difficulty: 5){ name id averageItemLevel bossPercentage kill startTime endTime } } } } """ % (log_id) r = requests.post(graphql_endpoint, json={"query": query}, headers=headers) fight_list = r.json()['data']['reportData']['report']['fights'] for k in range(len(fight_list)): fight_list[k].update({'log_code': log_id}) return fight_list def get_fight_info(fight, guild, unique_id): code = fight['log_code'] fight_ID = fight['id'] start_time = fight['start_time'] end_time = fight['end_time'] query = """ { reportData{ report(code: "%s"){ table(fightIDs: %s, startTime: %s, endTime: %s) } } } """ % (code, fight_ID, str(start_time), str(end_time)) r = requests.post(graphql_endpoint, json={"query": query}, headers=headers) table = r.json()['data']['reportData']['report']['table']['data'] comp = table['composition'] roles = table['playerDetails'] player_list = [] for role in roles: players = roles[role] for player in players: try: gear_ilvl = [piece['itemLevel'] for piece in player['combatantInfo']['gear']] ilvl = np.mean(gear_ilvl) except: try: ilvl = player['minItemLevel'] except: ilvl = np.NaN try: covenant = player['combatantInfo']['covenantID'] except: covenant = np.NaN try: spec = player['specs'][0] except: spec = np.NaN try: stats = player['combatantInfo']['stats'] primaries = ['Agility','Intellect','Strength'] for primary in primaries: if primary in stats.keys(): break primary= stats[primary]['min'] mastery= stats['Mastery']['min'] crit= stats['Crit']['min'] haste= stats['Haste']['min'] vers= stats['Versatility']['min'] stamina= stats['Stamina']['min'] except: primary = np.NaN mastery = np.NaN crit = np.NaN haste = np.NaN vers = np.NaN stamina = np.NaN player_info= {'unique_id': unique_id, 'class': player['type'], 'spec': spec, 'role': role, 'ilvl': ilvl, 'covenant': covenant, 'primary': primary, 'mastery': mastery, 'crit': crit, 'haste': haste, 'vers': vers, 'stamina': stamina, 'boss_name': fight['name']} player_list.append(player_info) return player_list # %% Setup the SQL Stuff from sqlalchemy import create_engine import psycopg2 server = 'localhost' database = 'nathria_prog' username = 'postgres' password = 'postgres' if 'conn' in locals(): conn.close() engine = create_engine('postgresql://postgres:postgres@localhost:5432/nathria_prog') conn = psycopg2.connect('host='+server+' dbname='+database+' user='+username+' password='+password) curs = conn.cursor() curs.execute("select exists(select * from information_schema.tables where table_name=%s)",\ ('nathria_prog_v2',)) if curs.fetchone()[0]: curs.execute('select distinct guild_name from nathria_prog_v2') already_added_guilds = [item[0] for item in curs.fetchall()] already_added_length = len(already_added_guilds) else: already_added_guilds = [] already_added_length = 0 def check_in_sql(fight): unique_id = fight['unique_id'] curs.execute("select * from nathria_prog_v2 where unique_id = '%s'" % (unique_id)) if curs.fetchone() is None: check_one = False else: check_one = True curs.execute("select * from nathria_prog_v2 where start_time > %s and end_time < %s and guild_name = '%s';" \ % (fight['start_time']-60, fight['end_time']+60, fight['guild_name'])) if curs.fetchone() is None: check_two = False else: check_two = True check = check_one or check_two return check def add_to_sql(curs, table, info): placeholders = ', '.join(['%s'] * len(info)) columns = ', '.join(info.keys()) sql = "INSERT INTO %s ( %s ) VALUES ( %s )" % (str(table), columns, placeholders) curs.execute(sql, list(info.values())) #%% This is for futures use def make_logs_query(log): log_id = log['code'] query = """ { reportData{ report(code: "%s"){ fights(difficulty: 5){ name id averageItemLevel bossPercentage kill startTime endTime } } } } """ % (log_id) return query def get_log_args(log, graphql_endpoint, headers): args = {'url': graphql_endpoint, 'json': {'query': make_logs_query(log)}, 'headers': headers} return args def get_fight_list(log_list, graphql_endpoint, headers): session = FuturesSession(max_workers = 2) futures = [session.post(**get_log_args(log, graphql_endpoint, headers)) for log in log_list] fights_list = [] for q, item in enumerate(futures): result = item.result() if result.status_code!=200: print(result.status_code) fights = result.json()['data']['reportData']['report']['fights'] for k, fight in enumerate(fights): fight['log_code'] = log_list[q]['code'] fight['log_start'] = log_list[q]['startTime'] fight['log_end'] = log_list[q]['endTime'] fight['unique_id'] = log_list[q]['code'] + '_' + str(fight['id']) fights_list.extend([fight]) return fights_list def get_prog_pulls(df, boss_name): if type(df.iloc[0]['start_time']) != 'int': df['start_time'] = [time.mktime(x.to_pydatetime().timetuple()) for x in df['start_time']] df['end_time'] = [time.mktime(x.to_pydatetime().timetuple()) for x in df['end_time']] kills_df = df.query('name == "'+boss_name+'"').query('zoneDifficulty == 5').query('kill == True') first_kill_time = min(kills_df['start_time']) return df.query('name == "'+boss_name+'"').query('zoneDifficulty == 5').query('start_time <= '+str(first_kill_time)) def add_pull_num(df): df = df.sort_values(by = ['start_time']) df.insert(loc = 0, column = 'pull_num', value = np.arange(len(df))+1) return df def combine_boss_df(df): boss_names = [ 'Shriekwing', \ 'Huntsman Altimor', 'Hungering Destroyer', \ "Sun King's Salvation", "Artificer Xy'mox", \ 'Lady Inerva Darkvein', \ 'The Council of Blood', \ 'Sludgefist', \ 'Stone Legion Generals', \ 'Sire Denathrius'] combine_df = pd.DataFrame() for k, boss_name in enumerate(np.unique(df['name'])): if boss_name in boss_names and boss_name in np.unique(df['name']): combine_df = combine_df.append(add_pull_num(df.copy(deep = True).query('name == "'+boss_name+'"'))) combine_df = combine_df.reset_index().drop(columns = 'index') return combine_df n_start = 3500 for gnum, guild in enumerate(guilds[n_start:]): if guild['name'] in already_added_guilds: continue # log_list = get_log_list(guild) try: log_list = get_log_list_apiv1(guild) if len(log_list) == 0: print(f'Log list empty for {guild["name"]}') fightdf = pd.DataFrame() playerdf = pd.DataFrame() print(f'Parsing guild {guild["name"]} (#{gnum+1+n_start} of {len(guilds)})') fight_list = get_fight_list(log_list, graphql_endpoint, headers) fightdf = pd.DataFrame() for q, fight in enumerate(fight_list): fight['boss_perc'] = fight.pop('bossPercentage') fight['average_item_level'] = fight.pop('averageItemLevel') fight['unique_id'] = fight['log_code'] + '_' + str(fight['id']) fight['start_time'] = fight.pop('startTime') fight['end_time'] = fight.pop('endTime') fight['guild_name'] = guild['name'] fight['guild_realm'] = guild['realm'] fight['guild_region'] = guild['region'] fightdf = fightdf.append(pd.DataFrame(fight, index=['i',])) fightdf = combine_boss_df(fightdf.copy(deep = True)) fightdf.to_sql('nathria_prog_v2', engine, if_exists='append') if len(fightdf)>1: print(f'Adding to SQL guild {guild["name"]}') time.sleep(3) except: continue #%% asdfasdf from sqlalchemy import create_engine import psycopg2 server = 'localhost' database = 'nathria_prog' username = 'postgres' password = 'postgres' if 'conn' in locals(): conn.close() engine = create_engine('postgresql://postgres:postgres@localhost:5432/nathria_prog') conn = psycopg2.connect('host='+server+' dbname='+database+' user='+username+' password='+password) curs = conn.cursor() curs.execute("select exists(select * from information_schema.tables where table_name=%s)",\ ('nathria_prog_v2',)) if curs.fetchone()[0]: curs.execute('select distinct guild_name from nathria_prog_v2') logged_guilds = [item[0] for item in curs.fetchall()] else: logged_guilds = [] def make_fights_query(fight): code = fight['log_code'] fight_ID = fight['id'] start_time = fight['start_time'] end_time = fight['end_time'] query = """ { reportData{ report(code: "%s"){ table(fightIDs: %s, startTime: %s, endTime: %s) } } } """ % (code, fight_ID, str(start_time), str(end_time)) return query def get_fight_args(log, graphql_endpoint, headers): args = {'url': graphql_endpoint, 'json': {'query': make_fights_query(log)}, 'headers': headers} return args def get_fight_table(fights_list, graphql_endpoint, headers): session = FuturesSession(max_workers = 2) futures = [session.post(**get_fight_args(fight, graphql_endpoint, headers)) for fight in fights_list] fights_tables = [] for k, item in enumerate(futures): result = item.result() if result.status_code!=200: print(result.status_code) # if is_good_response_json(item.result()): try: fights_tables.append(result.json()['data']['reportData']['report']['table']['data']) except: pass return fights_tables def parse_fight_table(table, boss_name, unique_id, guild_name): comp = table['composition'] roles = table['playerDetails'] player_list = [] for role in roles: players = roles[role] for player in players: try: gear_ilvl = [piece['itemLevel'] for piece in player['combatantInfo']['gear']] ilvl = np.mean(gear_ilvl) except: try: ilvl = player['minItemLevel'] except: ilvl = np.NaN try: covenant = player['combatantInfo']['covenantID'] except: covenant = np.NaN try: spec = player['specs'][0] except: spec = np.NaN try: stats = player['combatantInfo']['stats'] primaries = ['Agility','Intellect','Strength'] for primary in primaries: if primary in stats.keys(): break primary= stats[primary]['min'] mastery= stats['Mastery']['min'] crit= stats['Crit']['min'] haste= stats['Haste']['min'] vers= stats['Versatility']['min'] stamina= stats['Stamina']['min'] except: primary = np.NaN mastery = np.NaN crit = np.NaN haste = np.NaN vers = np.NaN stamina = np.NaN player_info= {'unique_id': unique_id, 'name': player['name'], 'guild_name': guild_name, 'server': player['server'], 'class': player['type'], 'spec': spec, 'role': role, 'ilvl': ilvl, 'covenant': covenant, 'primary': primary, 'mastery': mastery, 'crit': crit, 'haste': haste, 'vers': vers, 'stamina': stamina, 'boss_name': boss_name} player_list.append(player_info) return player_list for guild_name in logged_guilds: curs.execute(f"select * from nathria_prog_v2 where guild_name = '{guild_name}'") pulls = pd.DataFrame(curs.fetchall()) pulls.columns = [desc[0] for desc in curs.description] fights_list = pulls.to_dict('records') curs.execute(f"select distinct unique_id from nathria_prog_v2_players where guild_name = '{guild_name}'") added_fights = [item[0] for item in curs.fetchall()] fight_list = [fight for fight in fights_list if fight['unique_id'] not in added_fights] if len(fight_list)>1: fights_tables = get_fight_table(fights_list, graphql_endpoint, headers) playerdf = pd.DataFrame() for q, table in enumerate(fights_tables): unique_id = fights_list[q]['unique_id'] guild_name = guild_name player_info = parse_fight_table(table, fights_list[q]['name'], unique_id, guild_name) for player in player_info: for player in player_info: playerdf = playerdf.append(pd.DataFrame(player, index=['i',])) if len(playerdf)>1: print(f'Adding to SQL guild player info {guild["name"]}') playerdf.to_sql('nathria_prog_v2_players', engine, if_exists='append')
Pulling data/apiv2_pull.py
19,968
Returns True if the response seems to be HTML, False otherwise. %% First%% warcraftlogs = OAuth2Session(client_id, redirect_uri=callback_uri) authorization_url, state = warcraftlogs.authorization_url(authorize_url, access_type="offline") token = warcraftlogs.fetch_token(token_url = token_url, auth = HTTPBasicAuth(client_id, client_secret), code = code) access_token = token['access_token'] refresh_token = token['refresh_token'] with open('refresh_token.env', 'w') as f: f.write('refresh_token = '+str(refresh_token)+'\nacces_token = '+str(access_token)) print(refresh_token)%% %% Setup the SQL Stuff%% This is for futures use log_list = get_log_list(guild)%% if is_good_response_json(item.result()):
781
en
0.447057
from Tkinter import * from Tkinter import Text as textcontrol class StyledTextControl( textcontrol ): def spaces(self, val): return str(val*8) def __screen(self, width, height): self. def __init__(self, parent, width, height, fontf, fontsize): # Predefining Variables self.POS = "RIGHT" self.app = parent self.widget = textcontrol(parent.mainframe) self.widget.config(tabs=self.spaces(4), background="#ffffff", foreground='#000000', highlightthickness=0, borderwidth=0) if fontf != None: if fontsize != None and fontsize != "": self.widget.config(font=(fontf, fontsize)) else: self.widget.config(font=fontf) def setMargins(self, top, left, right, ): def pack(self): self.app.configs.append(self.__screen) self.widget.pack(side=self.POS)
lib/stc.py
919
Predefining Variables
21
en
0.183492
# Copyright 2021 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from unittest import mock sys.modules["dnf"] = mock.Mock()
tests/unit/yum_config/mock_modules.py
670
Copyright 2021 Red Hat, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
569
en
0.858906
from litex.soc.cores import uart from litex.soc.cores.uart import UARTWishboneBridge from litedram.frontend.bist import LiteDRAMBISTGenerator, LiteDRAMBISTChecker from litescope import LiteScopeAnalyzer from litescope import LiteScopeIO from gateware.memtest import LiteDRAMBISTCheckerScope from targets.utils import csr_map_update from targets.mimasv2.base import BaseSoC class MemTestSoC(BaseSoC): csr_peripherals = ( "analyzer", "io", ) csr_map_update(BaseSoC.csr_map, csr_peripherals) def __init__(self, platform, *args, **kwargs): kwargs['cpu_type'] = None BaseSoC.__init__(self, platform, *args, with_uart=False, **kwargs) self.add_cpu_or_bridge(UARTWishboneBridge(platform.request("serial"), self.clk_freq, baudrate=19200)) self.add_wb_master(self.cpu_or_bridge.wishbone) # Litescope for analyzing the BIST output # -------------------- self.submodules.io = LiteScopeIO(8) for i in range(8): try: self.comb += platform.request("user_led", i).eq(self.io.output[i]) except: pass analyzer_signals = [ self.spiflash.bus, # self.spiflash.cs_n, # self.spiflash.clk, # self.spiflash.dq_oe, # self.spiflash.dqi, # self.spiflash.sr, ] self.submodules.analyzer = LiteScopeAnalyzer(analyzer_signals, 1024) def do_exit(self, vns, filename="test/analyzer.csv"): self.analyzer.export_csv(vns, filename) SoC = MemTestSoC
targets/mimasv2/scope.py
1,581
Litescope for analyzing the BIST output -------------------- self.spiflash.cs_n, self.spiflash.clk, self.spiflash.dq_oe, self.spiflash.dqi, self.spiflash.sr,
172
en
0.201882
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. from __future__ import absolute_import, print_function from distutils.spawn import find_executable from distutils.version import LooseVersion import json import os import distro import shutil import subprocess import six import six.moves.urllib as urllib from six.moves import input from subprocess import PIPE from zipfile import BadZipfile import servo.packages as packages from servo.util import extract, download_file, host_triple def check_gstreamer_lib(): return subprocess.call(["pkg-config", "--atleast-version=1.16", "gstreamer-1.0"], stdout=PIPE, stderr=PIPE) == 0 def run_as_root(command, force=False): if os.geteuid() != 0: command.insert(0, 'sudo') if force: command.append('-y') return subprocess.call(command) def install_linux_deps(context, pkgs_ubuntu, pkgs_fedora, pkgs_void, force): install = False pkgs = [] if context.distro in ['Ubuntu', 'Debian GNU/Linux']: command = ['apt-get', 'install'] pkgs = pkgs_ubuntu if subprocess.call(['dpkg', '-s'] + pkgs, stdout=PIPE, stderr=PIPE) != 0: install = True elif context.distro in ['CentOS', 'CentOS Linux', 'Fedora']: installed_pkgs = str(subprocess.check_output(['rpm', '-qa'])).replace('\n', '|') pkgs = pkgs_fedora for p in pkgs: command = ['dnf', 'install'] if "|{}".format(p) not in installed_pkgs: install = True break elif context.distro == 'void': installed_pkgs = str(subprocess.check_output(['xbps-query', '-l'])) pkgs = pkgs_void for p in pkgs: command = ['xbps-install', '-A'] if "ii {}-".format(p) not in installed_pkgs: install = force = True break if install: print("Installing missing dependencies...") run_as_root(command + pkgs, force) return install def install_salt_dependencies(context, force): pkgs_apt = ['build-essential', 'libssl-dev', 'libffi-dev', 'python-dev'] pkgs_dnf = ['gcc', 'libffi-devel', 'python-devel', 'openssl-devel'] pkgs_xbps = ['gcc', 'libffi-devel', 'python-devel'] if not install_linux_deps(context, pkgs_apt, pkgs_dnf, pkgs_xbps, force): print("Dependencies are already installed") def gstreamer(context, force=False): cur = os.curdir gstdir = os.path.join(cur, "support", "linux", "gstreamer") if not os.path.isdir(os.path.join(gstdir, "gst", "lib")): subprocess.check_call(["bash", "gstreamer.sh"], cwd=gstdir) return True return False def bootstrap_gstreamer(context, force=False): if not gstreamer(context, force): print("gstreamer is already set up") return 0 def linux(context, force=False): # Please keep these in sync with the packages in README.md pkgs_apt = ['git', 'curl', 'autoconf', 'libx11-dev', 'libfreetype6-dev', 'libgl1-mesa-dri', 'libglib2.0-dev', 'xorg-dev', 'gperf', 'g++', 'build-essential', 'cmake', 'libssl-dev', 'liblzma-dev', 'libxmu6', 'libxmu-dev', "libxcb-render0-dev", "libxcb-shape0-dev", "libxcb-xfixes0-dev", 'libgles2-mesa-dev', 'libegl1-mesa-dev', 'libdbus-1-dev', 'libharfbuzz-dev', 'ccache', 'clang', 'libunwind-dev', 'libgstreamer1.0-dev', 'libgstreamer-plugins-base1.0-dev', 'libgstreamer-plugins-bad1.0-dev', 'autoconf2.13', 'libunwind-dev', 'llvm-dev'] pkgs_dnf = ['libtool', 'gcc-c++', 'libXi-devel', 'freetype-devel', 'libunwind-devel', 'mesa-libGL-devel', 'mesa-libEGL-devel', 'glib2-devel', 'libX11-devel', 'libXrandr-devel', 'gperf', 'fontconfig-devel', 'cabextract', 'ttmkfdir', 'expat-devel', 'rpm-build', 'openssl-devel', 'cmake', 'libXcursor-devel', 'libXmu-devel', 'dbus-devel', 'ncurses-devel', 'harfbuzz-devel', 'ccache', 'clang', 'clang-libs', 'llvm', 'autoconf213', 'python3-devel', 'gstreamer1-devel', 'gstreamer1-plugins-base-devel', 'gstreamer1-plugins-bad-free-devel'] pkgs_xbps = ['libtool', 'gcc', 'libXi-devel', 'freetype-devel', 'libunwind-devel', 'MesaLib-devel', 'glib-devel', 'pkg-config', 'libX11-devel', 'libXrandr-devel', 'gperf', 'bzip2-devel', 'fontconfig-devel', 'cabextract', 'expat-devel', 'cmake', 'cmake', 'libXcursor-devel', 'libXmu-devel', 'dbus-devel', 'ncurses-devel', 'harfbuzz-devel', 'ccache', 'glu-devel', 'clang', 'gstreamer1-devel', 'autoconf213', 'gst-plugins-base1-devel', 'gst-plugins-bad1-devel'] installed_something = install_linux_deps(context, pkgs_apt, pkgs_dnf, pkgs_xbps, force) if not check_gstreamer_lib(): installed_something |= gstreamer(context, force) if not installed_something: print("Dependencies were already installed!") return 0 def salt(context, force=False): # Ensure Salt dependencies are installed install_salt_dependencies(context, force) # Ensure Salt is installed in the virtualenv # It's not installed globally because it's a large, non-required dependency, # and the installation fails on Windows print("Checking Salt installation...", end='') reqs_path = os.path.join(context.topdir, 'python', 'requirements-salt.txt') process = subprocess.Popen( ["pip", "install", "-q", "-I", "-r", reqs_path], stdout=PIPE, stderr=PIPE ) process.wait() if process.returncode: out, err = process.communicate() print('failed to install Salt via pip:') print('Output: {}\nError: {}'.format(out, err)) return 1 print("done") salt_root = os.path.join(context.sharedir, 'salt') config_dir = os.path.join(salt_root, 'etc', 'salt') pillar_dir = os.path.join(config_dir, 'pillars') # In order to allow `mach bootstrap` to work from any CWD, # the `root_dir` must be an absolute path. # We place it under `context.sharedir` because # Salt caches data (e.g. gitfs files) in its `var` subdirectory. # Hence, dynamically generate the config with an appropriate `root_dir` # and serialize it as JSON (which is valid YAML). config = { 'hash_type': 'sha384', 'master': 'localhost', 'root_dir': salt_root, 'state_output': 'changes', 'state_tabular': True, } if 'SERVO_SALTFS_ROOT' in os.environ: config.update({ 'fileserver_backend': ['roots'], 'file_roots': { 'base': [os.path.abspath(os.environ['SERVO_SALTFS_ROOT'])], }, }) else: config.update({ 'fileserver_backend': ['git'], 'gitfs_env_whitelist': 'base', 'gitfs_provider': 'gitpython', 'gitfs_remotes': [ 'https://github.com/servo/saltfs.git', ], }) if not os.path.exists(config_dir): os.makedirs(config_dir, mode=0o700) with open(os.path.join(config_dir, 'minion'), 'w') as config_file: config_file.write(json.dumps(config) + '\n') # Similarly, the pillar data is created dynamically # and temporarily serialized to disk. # This dynamism is not yet used, but will be in the future # to enable Android bootstrapping by using # context.sharedir as a location for Android packages. pillar = { 'top.sls': { 'base': { '*': ['bootstrap'], }, }, 'bootstrap.sls': { 'fully_managed': False, }, } if os.path.exists(pillar_dir): shutil.rmtree(pillar_dir) os.makedirs(pillar_dir, mode=0o700) for filename in pillar: with open(os.path.join(pillar_dir, filename), 'w') as pillar_file: pillar_file.write(json.dumps(pillar[filename]) + '\n') cmd = [ # sudo escapes from the venv, need to use full path find_executable('salt-call'), '--local', '--config-dir={}'.format(config_dir), '--pillar-root={}'.format(pillar_dir), 'state.apply', 'servo-build-dependencies', ] if not force: print('Running bootstrap in dry-run mode to show changes') # Because `test=True` mode runs each state individually without # considering how required/previous states affect the system, # it will often report states with requisites as failing due # to the requisites not actually being run, # even though these are spurious and will succeed during # the actual highstate. # Hence `--retcode-passthrough` is not helpful in dry-run mode, # so only detect failures of the actual salt-call binary itself. retcode = run_as_root(cmd + ['test=True']) if retcode != 0: print('Something went wrong while bootstrapping') return retcode proceed = input( 'Proposed changes are above, proceed with bootstrap? [y/N]: ' ) if proceed.lower() not in ['y', 'yes']: return 0 print('') print('Running Salt bootstrap') retcode = run_as_root(cmd + ['--retcode-passthrough']) if retcode == 0: print('Salt bootstrapping complete') else: print('Salt bootstrapping encountered errors') return retcode def windows_msvc(context, force=False): '''Bootstrapper for MSVC building on Windows.''' deps_dir = os.path.join(context.sharedir, "msvc-dependencies") deps_url = "https://servo-deps-2.s3.amazonaws.com/msvc-deps/" def version(package): return packages.WINDOWS_MSVC[package] def package_dir(package): return os.path.join(deps_dir, package, version(package)) def check_cmake(version): cmake_path = find_executable("cmake") if cmake_path: cmake = subprocess.Popen([cmake_path, "--version"], stdout=PIPE) cmake_version_output = six.ensure_str(cmake.stdout.read()).splitlines()[0] cmake_version = cmake_version_output.replace("cmake version ", "") if LooseVersion(cmake_version) >= LooseVersion(version): return True return False def prepare_file(zip_path, full_spec): if not os.path.isfile(zip_path): zip_url = "{}{}.zip".format(deps_url, urllib.parse.quote(full_spec)) download_file(full_spec, zip_url, zip_path) print("Extracting {}...".format(full_spec), end='') try: extract(zip_path, deps_dir) except BadZipfile: print("\nError: %s.zip is not a valid zip file, redownload..." % full_spec) os.remove(zip_path) prepare_file(zip_path, full_spec) else: print("done") to_install = {} for package in packages.WINDOWS_MSVC: # Don't install CMake if it already exists in PATH if package == "cmake" and check_cmake(version("cmake")): continue if not os.path.isdir(package_dir(package)): to_install[package] = version(package) if not to_install: return 0 print("Installing missing MSVC dependencies...") for package in to_install: full_spec = '{}-{}'.format(package, version(package)) parent_dir = os.path.dirname(package_dir(package)) if not os.path.isdir(parent_dir): os.makedirs(parent_dir) zip_path = package_dir(package) + ".zip" prepare_file(zip_path, full_spec) extracted_path = os.path.join(deps_dir, full_spec) os.rename(extracted_path, package_dir(package)) return 0 LINUX_SPECIFIC_BOOTSTRAPPERS = { "salt": salt, "gstreamer": bootstrap_gstreamer, } def get_linux_distribution(): distrib, version, _ = distro.linux_distribution() distrib = six.ensure_str(distrib) version = six.ensure_str(version) if distrib in ['LinuxMint', 'Linux Mint', 'KDE neon']: if '.' in version: major, _ = version.split('.', 1) else: major = version if major == '20': base_version = '20.04' elif major == '19': base_version = '18.04' elif major == '18': base_version = '16.04' else: raise Exception('unsupported version of %s: %s' % (distrib, version)) distrib, version = 'Ubuntu', base_version elif distrib == 'Pop!_OS': if '.' in version: major, _ = version.split('.', 1) else: major = version if major == '20': base_version = '20.04' elif major == '19': base_version = '18.04' elif major == '18': base_version = '16.04' else: raise Exception('unsupported version of %s: %s' % (distrib, version)) distrib, version = 'Ubuntu', base_version elif distrib.lower() == 'elementary': if version == '5.0': base_version = '18.04' elif version[0:3] == '0.4': base_version = '16.04' else: raise Exception('unsupported version of %s: %s' % (distrib, version)) distrib, version = 'Ubuntu', base_version elif distrib.lower() == 'ubuntu': if version > '21.04': raise Exception('unsupported version of %s: %s' % (distrib, version)) # Fixme: we should allow checked/supported versions only elif distrib.lower() not in [ 'centos', 'centos linux', 'debian gnu/linux', 'fedora', 'void', 'nixos', ]: raise Exception('mach bootstrap does not support %s, please file a bug' % distrib) return distrib, version def bootstrap(context, force=False, specific=None): '''Dispatches to the right bootstrapping function for the OS.''' bootstrapper = None if "windows-msvc" in host_triple(): bootstrapper = windows_msvc elif "linux-gnu" in host_triple(): distrib, version = get_linux_distribution() if distrib.lower() == 'nixos': print('NixOS does not need bootstrap, it will automatically enter a nix-shell') print('Just run ./mach build') print('') print('You will need to run a nix-shell if you are trying to run any of the built binaries') print('To enter the nix-shell manually use:') print(' $ nix-shell etc/shell.nix') return context.distro = distrib context.distro_version = version bootstrapper = LINUX_SPECIFIC_BOOTSTRAPPERS.get(specific, linux) if bootstrapper is None: print('Bootstrap support is not yet available for your OS.') return 1 return bootstrapper(context, force=force)
python/servo/bootstrap.py
15,201
Dispatches to the right bootstrapping function for the OS. Bootstrapper for MSVC building on Windows. This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/. Please keep these in sync with the packages in README.md Ensure Salt dependencies are installed Ensure Salt is installed in the virtualenv It's not installed globally because it's a large, non-required dependency, and the installation fails on Windows In order to allow `mach bootstrap` to work from any CWD, the `root_dir` must be an absolute path. We place it under `context.sharedir` because Salt caches data (e.g. gitfs files) in its `var` subdirectory. Hence, dynamically generate the config with an appropriate `root_dir` and serialize it as JSON (which is valid YAML). Similarly, the pillar data is created dynamically and temporarily serialized to disk. This dynamism is not yet used, but will be in the future to enable Android bootstrapping by using context.sharedir as a location for Android packages. sudo escapes from the venv, need to use full path Because `test=True` mode runs each state individually without considering how required/previous states affect the system, it will often report states with requisites as failing due to the requisites not actually being run, even though these are spurious and will succeed during the actual highstate. Hence `--retcode-passthrough` is not helpful in dry-run mode, so only detect failures of the actual salt-call binary itself. Don't install CMake if it already exists in PATH Fixme: we should allow checked/supported versions only
1,689
en
0.888844
#!/usr/bin/env python from setuptools import setup, find_packages from codecs import open from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() setup( name='pystadel', version='1.0.0', description='Class for sending SMSes using Stadel SMS gateway', long_description=long_description, url='https://github.com/luttermann/pystadel', author='Lasse Luttermann Poulsen', author_email='lasse@poulsen.dk', license='BSD-2-Clause', # https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ # 4 - Beta # 5 - Production/Stable 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', # It might work in other versions, but these are not testet. 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], keywords='sms stadel', py_modules=["stadel"], )
setup.py
1,218
!/usr/bin/env python Get the long description from the README file https://pypi.python.org/pypi?%3Aaction=list_classifiers 4 - Beta 5 - Production/Stable It might work in other versions, but these are not testet.
216
en
0.678904
""" Copyright (c) 2016-present, Facebook, Inc. All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree. An additional grant of patent rights can be found in the PATENTS file in the same directory. """ import unittest import s1ap_types from integ_tests.s1aptests import s1ap_wrapper class TestSctpAbortAfterSmc(unittest.TestCase): def setUp(self): self._s1ap_wrapper = s1ap_wrapper.TestWrapper() def tearDown(self): self._s1ap_wrapper.cleanup() def test_sctp_abort_after_smc(self): """ testing Sctp Abort after Security Mode Command for a single UE """ self._s1ap_wrapper.configUEDevice(1) req = self._s1ap_wrapper.ue_req print( "************************* Running Sctp Abort after Security" " Mode Command for a single UE for UE id ", req.ue_id, ) attach_req = s1ap_types.ueAttachRequest_t() attach_req.ue_Id = req.ue_id sec_ctxt = s1ap_types.TFW_CREATE_NEW_SECURITY_CONTEXT id_type = s1ap_types.TFW_MID_TYPE_IMSI eps_type = s1ap_types.TFW_EPS_ATTACH_TYPE_EPS_ATTACH attach_req.mIdType = id_type attach_req.epsAttachType = eps_type attach_req.useOldSecCtxt = sec_ctxt print("Sending Attach Request ue-id", req.ue_id) self._s1ap_wrapper._s1_util.issue_cmd( s1ap_types.tfwCmd.UE_ATTACH_REQUEST, attach_req ) response = self._s1ap_wrapper.s1_util.get_response() self.assertEqual( response.msg_type, s1ap_types.tfwCmd.UE_AUTH_REQ_IND.value ) print("Received auth req ind ue-id", req.ue_id) auth_res = s1ap_types.ueAuthResp_t() auth_res.ue_Id = req.ue_id sqn_recvd = s1ap_types.ueSqnRcvd_t() sqn_recvd.pres = 0 auth_res.sqnRcvd = sqn_recvd print("Sending Auth Response ue-id", req.ue_id) self._s1ap_wrapper._s1_util.issue_cmd( s1ap_types.tfwCmd.UE_AUTH_RESP, auth_res ) response = self._s1ap_wrapper.s1_util.get_response() self.assertEqual( response.msg_type, s1ap_types.tfwCmd.UE_SEC_MOD_CMD_IND.value ) print("Received Security Mode Command ue-id", req.ue_id) print("send SCTP ABORT") sctp_abort = s1ap_types.FwSctpAbortReq_t() sctp_abort.cause = 3 self._s1ap_wrapper._s1_util.issue_cmd( s1ap_types.tfwCmd.SCTP_ABORT_REQ, sctp_abort ) if __name__ == "__main__": unittest.main()
lte/gateway/python/integ_tests/s1aptests/test_sctp_abort_after_smc.py
2,604
testing Sctp Abort after Security Mode Command for a single UE Copyright (c) 2016-present, Facebook, Inc. All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree. An additional grant of patent rights can be found in the PATENTS file in the same directory.
347
en
0.91211
''' Borrowed from Asteroid.py and Ship.py which was created by Lukas Peraza url: https://github.com/LBPeraza/Pygame-Asteroids Subzero sprite borrowed from: https://www.spriters-resource.com/playstation/mkmsz/sheet/37161/ ''' import pygame import os from CollegiateObjectFile import CollegiateObject # right in variable means facing right, left means facing left class Character(CollegiateObject): @staticmethod def init(character): # Create a list of every image of a character images = [] path = "images/%s/ordered images" %character # Upload each image in order, and resize accordingly maxDim = 70 for imageName in os.listdir(path): maxDim = 70 image = pygame.image.load(path + os.sep + imageName) if "effect" in imageName: # Resize special move effects images with static attribute maxDim if character == "goku" or character == "raizen" or character == "naruto" or character == "sasuke": maxDim = 120 else: maxDim = 70 w, h = image.get_size() factor = 1 if w != maxDim: factor = maxDim / w if h != maxDim: factor = maxDim / h image = pygame.transform.scale( image, ( int(w * factor), int(h * factor) ) ) elif "jump" in imageName: # Resize special move effects images with static attribute maxDim w, h = image.get_size() factor = 1 if w != Character.maxWidth: factor = Character.maxWidth / w image = pygame.transform.scale( image, ( int(w * factor), int(h * factor) ) ) else: # Resize character images with static attribute maxWidth and maxHeight w, h = image.get_size() factor = 1 if w != Character.maxWidth: factor = Character.maxWidth / w if h != Character.maxHeight: factor = Character.maxHeight / h image = pygame.transform.scale( image, ( int(w * factor), int(h * factor) ) ) images.append(image) Character.charactersDict[character] = Character.charactersDict.get(character, images) # Create a dictionary of the images of a character mapped to the character charactersDict = {} maxWidth = 100 maxHeight = 170 maxDim = 70 gravity = .75 runVel = 10 maxHealth = 300 maxEnergy = 100 red = (255, 0, 0) green = (0, 255, 0) blue = (0, 0, 255) orange = (255, 128, 0) def __init__(self, character, screenWidth, screenHeight, isRight, player): self.character = character self.player = player self.isRight = isRight if self.character == "subzero": self.specialName = "freeze" elif self.character == "scorpion": self.specialName = "spear" elif self.character == "raizen": self.specialName = "spirit shotgun" elif self.character == "goku": self.specialName = "kamehameha" elif self.character == "naruto": self.specialName = "rasengan" elif self.character == "sasuke": self.specialName = "chidori" Character.maxHeight = screenHeight Character.maxWidth = screenWidth # Initiate health and energy bars margin = 5 barMargin = margin + 45 self.barHeight = 10 self.healthY = 10 self.healthWidth = 300 self.health = Character.maxHealth self.healthColor = Character.green labeledge = 20 if self.player == 1: self.healthX = barMargin elif self.player == 2: self.healthX = screenWidth - barMargin - self.healthWidth - labeledge self.energyY = 30 self.energy = Character.maxEnergy self.energyColor = Character.red if self.player == 1: self.energyX = barMargin elif self.player == 2: self.energyX = screenWidth - barMargin - self.healthWidth - labeledge self.images = Character.charactersDict[character] # All imported images are uploaded in the following order: icon, idle, jump, block, run, punch, kick, special, effect characterInstances = ["icon", "idle", "jump", "block", "damage1", "run", "punch", "kick", "special1", "effect1"]# added "damage", after block # Create a dictionary mapping the character instance to it's respective image self.spriteRightDict = {} i = 0 for instance in characterInstances: self.spriteRightDict[instance] = self.spriteRightDict.get(instance, self.images[i]) i += 1 # Flip all pictures to face left for left disctionary self.spriteLeftDict = {} j = 0 for sprite in characterInstances: # Don't want to flip run image yet if sprite == "run": self.spriteLeftDict[sprite] = self.images[j] image = pygame.transform.flip(self.images[j], True, False) self.spriteLeftDict[sprite] = self.spriteLeftDict.get(sprite, image) j += 1 # Pass information to parent CollegiateObject class to initialize character self.spriteDict = {} # Get the starting image, and x location if self.isRight: self.spriteDict = self.spriteRightDict idleImage = self.spriteRightDict["idle"] w, h = idleImage.get_size() x = margin + (w // 2) elif not self.isRight: self.spriteDict = self.spriteLeftDict idleImage = self.spriteLeftDict["idle"] w, h = idleImage.get_size() x = screenWidth - margin - (w // 2) r = max(w,h) // 2 y = screenHeight - margin - (h // 2) super(Character, self).__init__(x, y, idleImage, r) # Get dictionary of sounds (actually set in run game, but initiated here) self.sounds = {} # Set other attributes self.isDead = False self.isFlipped = False self.isIdle = True self.idleCount = 0 self.isAttack = False self.isDamage = False # Keep damage image for 1 second self.damageCount = 1 self.isRunLeft = False self.isRunRight = False self.isJump = False self.jumpVel = 10 self.peakJump = screenWidth // 4 self.idleY = self.y self.isBlock = False self.isPunch = False # Keep punch image for 1 second self.punchCount = 1 self.punchDamage = 20 self.isKick = False self.kickCount = 20 self.kickDamage = 25 self.isSpecial = False self.specialCount = 30 self.specialDamage = 50 #print("Loaded Character") def loseHealth(self, damage): if self.isBlock: self.sounds["block"].play() if not self.isBlock: self.sounds["damage1"].play() if self.isDamage and self.health > 0: self.health -= damage if self.health <= 0: if self.healthColor == Character.green: self.health = Character.maxHealth self.healthColor = Character.orange elif self.healthColor == Character.orange: self.health = Character.maxHealth self.healthColor = Character.red else: self.health = 0 self.isDead = True if not self.isBlock: self.baseImage = self.spriteDict["damage1"] def getEnergy(self): increment = 10 maxEnergy = 100 if self.energy <= (maxEnergy - increment) and self.isAttack: self.energy += increment if self.energy >= Character.maxEnergy: self.energy = Character.maxEnergy def update(self, dt, keysDown, screenWidth, screenHeight): # Change facing direction when characters switch sides if self.isRight: self.spriteDict = self.spriteRightDict elif not self.isRight: self.spriteDict = self.spriteLeftDict player1Moves = {"Left": keysDown(pygame.K_a), "Right": keysDown(pygame.K_d), "Down": keysDown(pygame.K_s), "Up": keysDown(pygame.K_w), "Punch": keysDown(pygame.K_v), "Kick": keysDown(pygame.K_c), "Special1": keysDown(pygame.K_SPACE) } player2Moves = {"Left": keysDown(pygame.K_LEFT), "Right": keysDown(pygame.K_RIGHT), "Down": keysDown(pygame.K_DOWN), "Up": keysDown(pygame.K_UP), "Punch": keysDown(pygame.K_l), "Kick": keysDown(pygame.K_k), "Special1": keysDown(pygame.K_j) } if self.player == 1: self.moves = player1Moves elif self.player == 2: self.moves = player2Moves self.idleCount += 1 margin = 5 boarderLeft = 0 + margin + (self.width // 2) boarderRight = screenWidth - margin - (self.width // 2) boarderBottom = screenHeight - margin - (self.height // 2) if self.moves["Left"] and self.x > boarderLeft and not self.isJump and not self.isBlock and not self.isDamage: self.x -= Character.runVel self.baseImage = pygame.transform.flip(self.spriteDict["run"], True, False) self.isRunLeft = True self.isIdle = False if self.isRunLeft and not self.isJump and not self.moves["Left"]: self.isRunLeft = False self.isIdle = True self.baseImage = self.spriteDict["idle"] if self.moves["Right"] and self.x < boarderRight and not self.isJump and not self.isBlock and not self.isDamage: # not elif! if we're holding left and right, don't turn self.x += Character.runVel self.baseImage = self.spriteDict["run"] self.isRunRight = True self.isIdle = False if self.isRunRight and not self.isJump and not self.moves["Right"]: self.isRunRight = False self.isIdle = True self.baseImage = self.spriteDict["idle"] if self.moves["Down"] and not self.isJump and not self.isDamage: self.baseImage = self.spriteDict["block"] self.isBlock = True self.isIdle = False if self.isBlock and not self.moves["Down"]: self.isBlock = False self.isIdle = True self.baseImage = self.spriteDict["idle"] if self.moves["Up"] and self.y >= boarderBottom and not self.isJump and not self.isBlock and not self.isDamage:# and self.isIdle: self.sounds["jump"].play() self.baseImage = self.spriteDict["jump"] self.isJump = True self.isIdle = False elif self.isJump: if self.jumpVel >= 0: self.y -= (self.jumpVel** 2) // 2 if self.isRunLeft and (self.x - Character.runVel) >= boarderLeft: self.x -= Character.runVel elif self.isRunRight and (self.x + Character.runVel) <= boarderRight: self.x += Character.runVel self.jumpVel -= Character.gravity else: self.y += (self.jumpVel** 2) // 2 if self.isRunLeft and (self.x - Character.runVel) >= boarderLeft: self.x -= Character.runVel elif self.isRunRight and (self.x + Character.runVel) <= boarderRight: self.x += Character.runVel self.jumpVel -= Character.gravity if self.y > self.idleY: self.baseImage = self.spriteDict["idle"] self.y = self.idleY self.isJump = False self.isRunLeft = False self.isRunRight = False self.isIdle = True self.jumpVel = 10 if self.moves["Punch"] and self.isIdle and self.idleCount >= 20 and not self.isPunch and not self.isDamage: self.sounds["punch"].play() self.baseImage = self.spriteDict["punch"] self.isPunch = True self.isIdle = False elif self.isPunch: if self.punchCount >= 0: self.punchCount -= 1 else: self.isPunch = False self.isIdle = True self.idleCount = 0 self.punchCount = 20 self.baseImage = self.spriteDict["idle"] if self.moves["Kick"] and self.isIdle and self.idleCount >= 20 and not self.isDamage: self.sounds["kick"].play() self.baseImage = self.spriteDict["kick"] self.isKick = True self.isIdle = False elif self.isKick: if self.kickCount >= 0: self.kickCount -= 1 else: self.isKick = False self.isIdle = True self.idleCount = 0 self.kickCount = 20 self.baseImage = self.spriteDict["idle"] if self.moves["Special1"] and self.isIdle and self.idleCount >= 20 and (self.energy >= self.specialDamage) and not self.isJump and not self.isBlock and not self.isDamage: self.sounds["special1"].play() self.baseImage = self.spriteDict["special1"] self.isSpecial = True self.isIdle = False self.energy -= self.specialDamage elif self.isSpecial: if self.specialCount >= 0: self.specialCount -= 1 else: self.isSpecial = False self.isIdle = True self.idleCount = 0 self.specialCount = 30 self.baseImage = self.spriteDict["idle"] super(Character, self).update(screenWidth, screenHeight)
CharacterFile.py
14,894
Borrowed from Asteroid.py and Ship.py which was created by Lukas Peraza url: https://github.com/LBPeraza/Pygame-Asteroids Subzero sprite borrowed from: https://www.spriters-resource.com/playstation/mkmsz/sheet/37161/ right in variable means facing right, left means facing left Create a list of every image of a character Upload each image in order, and resize accordingly Resize special move effects images with static attribute maxDim Resize special move effects images with static attribute maxDim Resize character images with static attribute maxWidth and maxHeight Create a dictionary of the images of a character mapped to the character Initiate health and energy bars All imported images are uploaded in the following order: icon, idle, jump, block, run, punch, kick, special, effect added "damage", after block Create a dictionary mapping the character instance to it's respective image Flip all pictures to face left for left disctionary Don't want to flip run image yet Pass information to parent CollegiateObject class to initialize character Get the starting image, and x location Get dictionary of sounds (actually set in run game, but initiated here) Set other attributes Keep damage image for 1 second Keep punch image for 1 secondprint("Loaded Character") Change facing direction when characters switch sides not elif! if we're holding left and right, don't turn and self.isIdle:
1,403
en
0.88369
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .advanced_filter import AdvancedFilter class NumberNotInAdvancedFilter(AdvancedFilter): """NumberNotIn Filter. All required parameters must be populated in order to send to Azure. :param key: The filter key. Represents an event property with upto two levels of nesting. :type key: str :param operator_type: Required. Constant filled by server. :type operator_type: str :param values: The set of filter values :type values: list[float] """ _validation = { 'operator_type': {'required': True}, } _attribute_map = { 'key': {'key': 'key', 'type': 'str'}, 'operator_type': {'key': 'operatorType', 'type': 'str'}, 'values': {'key': 'values', 'type': '[float]'}, } def __init__(self, **kwargs): super(NumberNotInAdvancedFilter, self).__init__(**kwargs) self.values = kwargs.get('values', None) self.operator_type = 'NumberNotIn'
azure-mgmt-eventgrid/azure/mgmt/eventgrid/models/number_not_in_advanced_filter.py
1,425
NumberNotIn Filter. All required parameters must be populated in order to send to Azure. :param key: The filter key. Represents an event property with upto two levels of nesting. :type key: str :param operator_type: Required. Constant filled by server. :type operator_type: str :param values: The set of filter values :type values: list[float] coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. --------------------------------------------------------------------------
801
en
0.635212
import os import sys import socket import struct import SocketServer import threadpool # fake ip list FAKE_IPLIST = {} # dns server config TIMEOUT = 2 # set timeout 2 second TRY_TIMES = 5 # try to recv msg times DNS_SERVER = '8.8.8.8' # remote dns server # currently not used def bytetodomain(s): domain = '' i = 0 length = struct.unpack('!B', s[0:1])[0] while length != 0: i += 1 domain += s[i:i + length] i += length length = struct.unpack('!B', s[i:i+1])[0] if length != 0: domain += '.' return (domain, i + 1) def skip_query(query): step = 0 length = struct.unpack('!B', query[0:1])[0] while length != 0: step = step + length + 1 length = struct.unpack('!B', query[step:step+1])[0] return step + 1 def is_valid_pkt(response): try: (flag, qdcount, ancount) = struct.unpack('!HHH', response[2:8]) if flag != 0x8180 and flag != 0x8580: return True if 1 != qdcount or 1 != ancount: return True dlen = skip_query(response[12:]) pos = 12 + dlen (qtype, qclass) = struct.unpack('!HH', response[pos:pos+4]) # qtype is 1 (mean query HOST ADDRESS), qclass is 1 (mean INTERNET) if 1 != qtype or 1 != qclass: return True pos = pos + 4 # position for response if ord(response[pos:pos+1]) & 0xc0: pos = pos + 12 else: pos = pos + dlen + 10 if response[pos:pos+4] in FAKE_IPLIST: print('Match: ' + socket.inet_ntoa(response[pos:pos+4])) return False except Exception, e: print(e) return True class ThreadPoolMixIn: def process_request_thread(self, request, client_address): try: self.finish_request(request, client_address) self.shutdown_request(request) except: self.handle_error(request, client_address) self.shutdown_request(request) def process_request(self, request, client_address): self.tp.add_task(self.process_request_thread, request, client_address) def serve_forever(self, poll_interval=0.5): try: SocketServer.UDPServer.serve_forever(self, poll_interval) finally: self.tp.stop() class DNSFilter(ThreadPoolMixIn, SocketServer.UDPServer): # much faster rebinding allow_reuse_address = True def __init__(self, s, t): self.tp = threadpool.ThreadPool(20) SocketServer.UDPServer.__init__(self, s, t) class ThreadedUDPRequestHandler(SocketServer.BaseRequestHandler): def handle(self): query_data = self.request[0] udp_sock = self.request[1] addr = self.client_address response = self.dns_query(DNS_SERVER, 53, query_data) if response: # udp dns packet no length udp_sock.sendto(response, addr) def dns_query(self, dns_ip, dns_port, query_data): try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.settimeout(TIMEOUT) # set socket timeout = 5s s.sendto(query_data, (dns_ip, dns_port)) for i in xrange(TRY_TIMES): data, addr = s.recvfrom(1024) if is_valid_pkt(data): return data else: data = None except: return None finally: if s: s.close() return data if __name__ == '__main__': print '---------------------------------------------------------------' print '| To Use this tool, you must set your dns server to 127.0.0.1 |' print '---------------------------------------------------------------' # load config file, iplist.txt from https://github.com/clowwindy/ChinaDNS with open('iplist.txt', 'rb') as f: while 1: ip = f.readline() if ip: FAKE_IPLIST[socket.inet_aton(ip[:-1])] = None else: break dns_server = DNSFilter(('0.0.0.0', 53), ThreadedUDPRequestHandler) try: dns_server.serve_forever() except: pass finally: pass
DNSFilter.py
4,364
fake ip list dns server config set timeout 2 second try to recv msg times remote dns server currently not used qtype is 1 (mean query HOST ADDRESS), qclass is 1 (mean INTERNET) position for response much faster rebinding udp dns packet no length set socket timeout = 5s load config file, iplist.txt from https://github.com/clowwindy/ChinaDNS
341
en
0.82295
"""Djinni manager tool""" import os import ezored.functions as fn import ezored.logging as log from ezored import constants as const # ----------------------------------------------------------------------------- def run(params={}): args = params['args'] if len(args) > 0: action = args[0] if action: if action == 'generate': generate(params) else: help(params) else: help(params) else: help(params) # ----------------------------------------------------------------------------- def generate(params={}): dirs = fn.find_dirs_simple(os.path.join( fn.root_dir(), const.DIR_NAME_FILES, const.DIR_NAME_DJINNI), '*' ) if dirs: log.info('Generating files for all modules...') dirs.sort() for item in dirs: if fn.file_exists(os.path.join(item, 'generate.py')): dir_name = os.path.basename(item) log.info('Generating djinni files for "{0}"...'.format(dir_name)) fn.run_simple(['python', 'generate.py'], item) log.ok() else: log.error('No djinni modules to generate') # ----------------------------------------------------------------------------- def help(params={}): log.colored('Available actions:\n', log.PURPLE) log.normal(' - generate') # ----------------------------------------------------------------------------- def get_description(params={}): return 'Djinni manager tool'
files/commands/djinni/djinni.py
1,563
Djinni manager tool ----------------------------------------------------------------------------- ----------------------------------------------------------------------------- ----------------------------------------------------------------------------- -----------------------------------------------------------------------------
333
en
0.132544
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html import pymongo import datetime from scrapy.conf import settings # 学历列表 educations = ("不限","大专","本科","硕士","博士") #修正学历 有些职位中的学历明显不一致。需要修正 def clean_education(edu,body): if edu not in educations: for i in educations: if i in body: edu = i else: edu = '不限' return edu def clear_salary(salary): res = salary.split("-") temp = [] for x in res: temp.append(int(x.upper().replace("K"," "))*1000) result = { "min":temp[0], "max":temp[1], "avg":int((temp[0]+temp[1])/2) } return result def clear_time(time): now_year = datetime.datetime.now().year if '发布于' in time: time = time.replace("发布于", str(now_year)+"-") time = time.replace("月", "-") time = time.replace("日", "") if time.find("昨天") > 0: time = str(datetime.date.today() - datetime.timedelta(days=1)) elif time.find(":") > 0: time = str(datetime.date.today()) return time def clear_position(name): data = name.split(" ") name = data[0] work_year = data[-2] educational = data[-1] return name,work_year,educational #判断PHP是否在职位名称中,不在就过滤掉。 #jd中含有php不参考,因为很多jd中都乱写 def clean_name(name): if "PHP" not in name.upper(): return False return True class TutorialPipeline(object): def process_item(self, item, spider): client = pymongo.MongoClient(host="127.0.0.1", port=27017) db = client['job'] collection = db['position2'] collection.insert(dict(item)) client.close() return item #处理直聘网数据 class ZhipinPipeline(object): def process_item(self, item, spider): client = pymongo.MongoClient(host="127.0.0.1", port=27017) db = client['job'] collection = db['position'] item['salary'] = clear_salary(item['salary']) item['create_time'] = clear_time(item['create_time']) item['educational'] = clean_education(item['educational'],item['body']) is_php = clean_name(item['position_name']) if is_php is True: collection.insert(dict(item)) client.close() return item #处理51job数据 class FiveJobPipeline(object): def clear_salary(self,salary): lists = salary.split("/")[0].split('-') min,max = lists unit = 10000 if "千" in max: unit = 1000 max = max.replace("千","") else: max = max.replace("万","") print(max) result = {} result['min'] = float(min)*unit result['max'] = float(max)*unit result['avg'] = (result['max']+result['min'])/2 return result def clear_address(self,address): if "上班地址" in address: address = address.replace("上班地址 :"," ") return address def clear_workyear(self,work_year): if "经验" in work_year: work_year = work_year.replace("工作经验"," ") or work_year.replace("经验"," ") return work_year def process_item(self, item, spider): client = pymongo.MongoClient(host="127.0.0.1", port=27017) db = client['job'] collection = db['51job'] item['salary'] = self.clear_salary(salary=item['salary']) item['address'] = self.clear_address(address=item['address']) item['work_year'] = self.clear_workyear(work_year=item['work_year']) collection.insert(dict(item)) client.close() return item
spider/python/tutorial/pipelines.py
3,902
-*- coding: utf-8 -*- Define your item pipelines here Don't forget to add your pipeline to the ITEM_PIPELINES setting See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html 学历列表修正学历 有些职位中的学历明显不一致。需要修正判断PHP是否在职位名称中,不在就过滤掉。jd中含有php不参考,因为很多jd中都乱写处理直聘网数据处理51job数据
267
zh
0.389656
# BSD 2-Clause License # Copyright (c) 2018, Stan Sakl # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. if __name__ == '__main__': print("Running kwic.py") inputFile = open("input.txt", "r", 1) outputFile = open("output.txt", "w", 1) lines = inputFile.readlines() for line in lines: substring = line.split() substring.sort() sortedString = [] for string in substring: #print(string) sortedString.append(string) outputFile.write(string) outputFile.write(" ") outputFile.write("\n") print(sortedString) inputFile.close() outputFile.close()
kwic_python/kwic.py
1,918
BSD 2-Clause License Copyright (c) 2018, Stan Sakl All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.print(string)
1,321
en
0.879645
# -*- coding: utf-8 -*- # PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN: # https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code from ccxt.async_support.base.exchange import Exchange import math from ccxt.base.errors import ExchangeError from ccxt.base.errors import AuthenticationError from ccxt.base.errors import ArgumentsRequired from ccxt.base.errors import BadRequest from ccxt.base.errors import BadResponse from ccxt.base.errors import DDoSProtection from ccxt.base.precise import Precise class bytetrade(Exchange): def describe(self): return self.deep_extend(super(bytetrade, self).describe(), { 'id': 'bytetrade', 'name': 'ByteTrade', 'countries': ['HK'], 'rateLimit': 500, 'requiresWeb3': True, 'certified': False, # new metainfo interface 'has': { 'cancelOrder': True, 'CORS': False, 'createOrder': True, 'fetchBalance': True, 'fetchBidsAsks': True, 'fetchClosedOrders': True, 'fetchCurrencies': True, 'fetchDepositAddress': True, 'fetchDeposits': True, 'fetchMarkets': True, 'fetchMyTrades': True, 'fetchOHLCV': True, 'fetchOpenOrders': True, 'fetchOrder': True, 'fetchOrderBook': True, 'fetchOrders': True, 'fetchTicker': True, 'fetchTickers': True, 'fetchTrades': True, 'fetchWithdrawals': True, 'withdraw': False, }, 'timeframes': { '1m': '1m', '5m': '5m', '15m': '15m', '30m': '30m', '1h': '1h', '4h': '4h', '1d': '1d', '5d': '5d', '1w': '1w', '1M': '1M', }, 'urls': { 'test': { 'market': 'https://api-v2-test.byte-trade.com', 'public': 'https://api-v2-test.byte-trade.com', }, 'logo': 'https://user-images.githubusercontent.com/1294454/67288762-2f04a600-f4e6-11e9-9fd6-c60641919491.jpg', 'api': { 'market': 'https://api-v2.byte-trade.com', 'public': 'https://api-v2.byte-trade.com', }, 'www': 'https://www.byte-trade.com', 'doc': 'https://docs.byte-trade.com/#description', }, 'api': { 'market': { 'get': [ 'klines', # Kline of a symbol 'depth', # Market Depth of a symbol 'trades', # Trade records of a symbol 'tickers', ], }, 'public': { 'get': [ 'symbols', # Reference information of trading instrument, including base currency, quote precision, etc. 'currencies', # The list of currencies available 'balance', # Get the balance of an account 'orders/open', # Get the open orders of an account 'orders/closed', # Get the closed orders of an account 'orders/all', # Get the open and closed orders of an account 'orders', # Get the details of an order of an account 'orders/trades', # Get detail match results 'depositaddress', # Get deposit address 'withdrawals', # Get withdrawals info 'deposits', # Get deposit info 'transfers', # Get transfer info ], 'post': [ 'transaction/createorder', # Post create order transaction to blockchain 'transaction/cancelorder', # Post cancel order transaction to blockchain 'transaction/withdraw', # Post withdraw transaction to blockchain 'transaction/transfer', # Post transfer transaction to blockchain ], }, }, 'fees': { 'trading': { 'taker': 0.0008, 'maker': 0.0008, }, }, 'commonCurrencies': { '1': 'ByteTrade', '44': 'ByteHub', '48': 'Blocktonic', '133': 'TerraCredit', }, 'exceptions': { 'vertify error': AuthenticationError, # typo on the exchange side, 'vertify' 'verify error': AuthenticationError, # private key signature is incorrect 'transaction already in network': BadRequest, # same transaction submited 'invalid argument': BadRequest, }, 'options': { 'orderExpiration': 31536000000, # one year }, }) async def fetch_currencies(self, params={}): currencies = await self.publicGetCurrencies(params) result = {} for i in range(0, len(currencies)): currency = currencies[i] id = self.safe_string(currency, 'code') code = None if id in self.commonCurrencies: code = self.commonCurrencies[id] else: code = self.safe_string(currency, 'name') name = self.safe_string(currency, 'fullname') # in byte-trade.com DEX, request https://api-v2.byte-trade.com/currencies will return currencies, # the api doc is https://github.com/Bytetrade/bytetrade-official-api-docs/wiki/rest-api#get-currencies-get-currencys-supported-in-bytetradecom # we can see the coin name is none-unique in the result, the coin which code is 18 is the CyberMiles ERC20, and the coin which code is 35 is the CyberMiles main chain, but their name is same. # that is because bytetrade is a DEX, supports people create coin with the same name, but the id(code) of coin is unique, so we should use the id or name and id as the identity of coin. # For coin name and symbol is same with CCXT, I use name@id as the key of commonCurrencies dict. # [{ # "name": "CMT", # currency name, non-unique # "code": "18", # currency id, unique # "type": "crypto", # "fullname": "CyberMiles", # "active": True, # "chainType": "ethereum", # "basePrecision": 18, # "transferPrecision": 10, # "externalPrecision": 18, # "chainContractAddress": "0xf85feea2fdd81d51177f6b8f35f0e6734ce45f5f", # "limits": { # "deposit": { # "min": "0", # "max": "-1" # }, # "withdraw": { # "min": "0", # "max": "-1" # } # } # }, # { # "name": "CMT", # "code": "35", # "type": "crypto", # "fullname": "CyberMiles", # "active": True, # "chainType": "cmt", # "basePrecision": 18, # "transferPrecision": 10, # "externalPrecision": 18, # "chainContractAddress": "0x0000000000000000000000000000000000000000", # "limits": { # "deposit": { # "min": "1", # "max": "-1" # }, # "withdraw": { # "min": "10", # "max": "-1" # } # } # } # ] active = self.safe_value(currency, 'active') limits = self.safe_value(currency, 'limits') deposit = self.safe_value(limits, 'deposit') amountPrecision = self.safe_integer(currency, 'basePrecision') maxDeposit = self.safe_number(deposit, 'max') if maxDeposit == -1.0: maxDeposit = None withdraw = self.safe_value(limits, 'withdraw') maxWithdraw = self.safe_number(withdraw, 'max') if maxWithdraw == -1.0: maxWithdraw = None result[code] = { 'id': id, 'code': code, 'name': name, 'active': active, 'precision': amountPrecision, 'fee': None, 'limits': { 'amount': {'min': None, 'max': None}, 'deposit': { 'min': self.safe_number(deposit, 'min'), 'max': maxDeposit, }, 'withdraw': { 'min': self.safe_number(withdraw, 'min'), 'max': maxWithdraw, }, }, 'info': currency, } return result async def fetch_markets(self, params={}): markets = await self.publicGetSymbols(params) result = [] for i in range(0, len(markets)): market = markets[i] id = self.safe_string(market, 'symbol') base = self.safe_string(market, 'baseName') quote = self.safe_string(market, 'quoteName') baseId = self.safe_string(market, 'base') quoteId = self.safe_string(market, 'quote') normalBase = base.split('@' + baseId)[0] normalQuote = quote.split('@' + quoteId)[0] if quoteId == '126': normalQuote = 'ZAR' # The id 126 coin is a special coin whose name on the chain is actually ZAR, but it is changed to ZCN after creation, so it must be changed to ZAR when placing the transaction in the chain normalSymbol = normalBase + '/' + normalQuote if baseId in self.commonCurrencies: base = self.commonCurrencies[baseId] if quoteId in self.commonCurrencies: quote = self.commonCurrencies[quoteId] symbol = base + '/' + quote limits = self.safe_value(market, 'limits', {}) amount = self.safe_value(limits, 'amount', {}) price = self.safe_value(limits, 'price', {}) precision = self.safe_value(market, 'precision', {}) active = self.safe_string(market, 'active') entry = { 'id': id, 'symbol': symbol, 'base': base, 'quote': quote, 'baseId': baseId, 'quoteId': quoteId, 'info': market, 'active': active, 'precision': { 'amount': self.safe_integer(precision, 'amount'), 'price': self.safe_integer(precision, 'price'), }, 'normalSymbol': normalSymbol, 'limits': { 'amount': { 'min': self.safe_number(amount, 'min'), 'max': self.safe_number(amount, 'max'), }, 'price': { 'min': self.safe_number(price, 'min'), 'max': self.safe_number(price, 'max'), }, 'cost': { 'min': None, 'max': None, }, }, } result.append(entry) return result async def fetch_balance(self, params={}): if not ('userid' in params) and (self.apiKey is None): raise ArgumentsRequired(self.id + ' fetchDeposits() requires self.apiKey or userid argument') await self.load_markets() request = { 'userid': self.apiKey, } balances = await self.publicGetBalance(self.extend(request, params)) result = {'info': balances} for i in range(0, len(balances)): balance = balances[i] currencyId = self.safe_string(balance, 'code') code = self.safe_currency_code(currencyId, None) account = self.account() account['free'] = self.safe_string(balance, 'free') account['used'] = self.safe_string(balance, 'used') result[code] = account return self.parse_balance(result, False) async def fetch_order_book(self, symbol, limit=None, params={}): await self.load_markets() market = self.market(symbol) request = { 'symbol': market['id'], } if limit is not None: request['limit'] = limit # default = maximum = 100 response = await self.marketGetDepth(self.extend(request, params)) timestamp = self.safe_value(response, 'timestamp') orderbook = self.parse_order_book(response, symbol, timestamp) return orderbook def parse_ticker(self, ticker, market=None): timestamp = self.safe_integer(ticker, 'timestamp') # # [ # { # "symbol":"68719476706", # "name":"ETH/BTC", # "base":"2", # "quote":"32", # "timestamp":1575905991933, # "datetime":"2019-12-09T15:39:51.933Z", # "high":"0", # "low":"0", # "open":"0", # "close":"0", # "last":"0", # "change":"0", # "percentage":"0", # "baseVolume":"0", # "quoteVolume":"0" # } # ] # symbol = None marketId = self.safe_string(ticker, 'symbol') if marketId in self.markets_by_id: market = self.markets_by_id[marketId] else: baseId = self.safe_string(ticker, 'base') quoteId = self.safe_string(ticker, 'quote') if (baseId is not None) and (quoteId is not None): base = self.safe_currency_code(baseId) quote = self.safe_currency_code(quoteId) symbol = base + '/' + quote if (symbol is None) and (market is not None): symbol = market['symbol'] return { 'symbol': symbol, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'high': self.safe_number(ticker, 'high'), 'low': self.safe_number(ticker, 'low'), 'bid': None, 'bidVolume': None, 'ask': None, 'askVolume': None, 'vwap': self.safe_number(ticker, 'weightedAvgPrice'), 'open': self.safe_number(ticker, 'open'), 'close': self.safe_number(ticker, 'close'), 'last': self.safe_number(ticker, 'last'), 'previousClose': None, # previous day close 'change': self.safe_number(ticker, 'change'), 'percentage': self.safe_number(ticker, 'percentage'), 'average': None, 'baseVolume': self.safe_number(ticker, 'baseVolume'), 'quoteVolume': self.safe_number(ticker, 'quoteVolume'), 'info': ticker, } async def fetch_ticker(self, symbol, params={}): await self.load_markets() market = self.market(symbol) request = { 'symbol': market['id'], } response = await self.marketGetTickers(self.extend(request, params)) # # [ # { # "symbol":"68719476706", # "name":"ETH/BTC", # "base":"2", # "quote":"32", # "timestamp":1575905991933, # "datetime":"2019-12-09T15:39:51.933Z", # "high":"0", # "low":"0", # "open":"0", # "close":"0", # "last":"0", # "change":"0", # "percentage":"0", # "baseVolume":"0", # "quoteVolume":"0" # } # ] # if isinstance(response, list): ticker = self.safe_value(response, 0) if ticker is None: raise BadResponse(self.id + ' fetchTicker() returned an empty response') return self.parse_ticker(ticker, market) return self.parse_ticker(response, market) async def fetch_bids_asks(self, symbols=None, params={}): await self.load_markets() response = await self.marketGetDepth(params) return self.parse_tickers(response, symbols) async def fetch_tickers(self, symbols=None, params={}): await self.load_markets() response = await self.marketGetTickers(params) return self.parse_tickers(response, symbols) def parse_ohlcv(self, ohlcv, market=None): # # [ # 1591505760000, # "242.7", # "242.76", # "242.69", # "242.76", # "0.1892" # ] # return [ self.safe_integer(ohlcv, 0), self.safe_number(ohlcv, 1), self.safe_number(ohlcv, 2), self.safe_number(ohlcv, 3), self.safe_number(ohlcv, 4), self.safe_number(ohlcv, 5), ] async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}): await self.load_markets() market = self.market(symbol) request = { 'symbol': market['id'], 'timeframe': self.timeframes[timeframe], } if since is not None: request['since'] = since if limit is not None: request['limit'] = limit response = await self.marketGetKlines(self.extend(request, params)) # # [ # [1591505760000,"242.7","242.76","242.69","242.76","0.1892"], # [1591505820000,"242.77","242.83","242.7","242.72","0.6378"], # [1591505880000,"242.72","242.73","242.61","242.72","0.4141"], # ] # return self.parse_ohlcvs(response, market, timeframe, since, limit) def parse_trade(self, trade, market=None): timestamp = self.safe_integer(trade, 'timestamp') price = self.safe_number(trade, 'price') amount = self.safe_number(trade, 'amount') cost = self.safe_number(trade, 'cost') id = self.safe_string(trade, 'id') type = self.safe_string(trade, 'type') takerOrMaker = self.safe_string(trade, 'takerOrMaker') side = self.safe_string(trade, 'side') datetime = self.iso8601(timestamp) # self.safe_string(trade, 'datetime') order = self.safe_string(trade, 'order') symbol = None if market is None: marketId = self.safe_string(trade, 'symbol') market = self.safe_value(self.markets_by_id, marketId) if market is not None: symbol = market['symbol'] feeData = self.safe_value(trade, 'fee') feeCost = self.safe_number(feeData, 'cost') feeRate = self.safe_number(feeData, 'rate') feeCode = self.safe_string(feeData, 'code') feeCurrency = self.safe_currency_code(feeCode) fee = { 'currency': feeCurrency, 'cost': feeCost, 'rate': feeRate, } return { 'info': trade, 'timestamp': timestamp, 'datetime': datetime, 'symbol': symbol, 'id': id, 'order': order, 'type': type, 'takerOrMaker': takerOrMaker, 'side': side, 'price': price, 'amount': amount, 'cost': cost, 'fee': fee, } async def fetch_trades(self, symbol, since=None, limit=None, params={}): await self.load_markets() market = self.market(symbol) request = { 'symbol': market['id'], } if since is not None: request['since'] = since if limit is not None: request['limit'] = limit # default = 100, maximum = 500 response = await self.marketGetTrades(self.extend(request, params)) return self.parse_trades(response, market, since, limit) def parse_order(self, order, market=None): status = self.safe_string(order, 'status') symbol = None marketId = self.safe_string(order, 'symbol') if marketId in self.markets_by_id: market = self.markets_by_id[marketId] else: baseId = self.safe_string(order, 'base') quoteId = self.safe_string(order, 'quote') if (baseId is not None) and (quoteId is not None): base = self.safe_currency_code(baseId) quote = self.safe_currency_code(quoteId) symbol = base + '/' + quote if (symbol is None) and (market is not None): symbol = market['symbol'] timestamp = self.safe_integer(order, 'timestamp') datetime = self.safe_string(order, 'datetime') lastTradeTimestamp = self.safe_integer(order, 'lastTradeTimestamp') price = self.safe_number(order, 'price') amount = self.safe_number(order, 'amount') filled = self.safe_number(order, 'filled') remaining = self.safe_number(order, 'remaining') cost = self.safe_number(order, 'cost') average = self.safe_number(order, 'average') id = self.safe_string(order, 'id') type = self.safe_string(order, 'type') side = self.safe_string(order, 'side') feeData = self.safe_value(order, 'fee') feeCost = self.safe_number(feeData, 'cost') feeRate = self.safe_number(feeData, 'rate') feeCode = self.safe_string(feeData, 'code') feeCurrency = self.safe_currency_code(feeCode) fee = { 'currency': feeCurrency, 'cost': feeCost, 'rate': feeRate, } return { 'info': order, 'id': id, 'clientOrderId': None, 'timestamp': timestamp, 'datetime': datetime, 'lastTradeTimestamp': lastTradeTimestamp, 'symbol': symbol, 'type': type, 'timeInForce': None, 'postOnly': None, 'side': side, 'price': price, 'stopPrice': None, 'amount': amount, 'cost': cost, 'average': average, 'filled': filled, 'remaining': remaining, 'status': status, 'fee': fee, 'trades': None, } async def create_order(self, symbol, type, side, amount, price=None, params={}): self.check_required_dependencies() if self.apiKey is None: raise ArgumentsRequired('createOrder() requires self.apiKey or userid in params') await self.load_markets() market = self.market(symbol) sideNum = None typeNum = None if side == 'sell': sideNum = 1 else: sideNum = 2 if type == 'limit': typeNum = 1 else: typeNum = 2 price = 0 normalSymbol = market['normalSymbol'] baseId = market['baseId'] baseCurrency = self.currency(market['base']) amountTruncated = self.amount_to_precision(symbol, amount) amountChain = self.toWei(amountTruncated, baseCurrency['precision']) amountChainString = self.number_to_string(amountChain) quoteId = market['quoteId'] quoteCurrency = self.currency(market['quote']) priceRounded = self.price_to_precision(symbol, price) priceChain = self.toWei(priceRounded, quoteCurrency['precision']) priceChainString = self.number_to_string(priceChain) now = self.milliseconds() expiryDelta = self.safe_integer(self.options, 'orderExpiration', 31536000000) expiration = self.milliseconds() + expiryDelta datetime = self.iso8601(now) datetime = datetime.split('.')[0] expirationDatetime = self.iso8601(expiration) expirationDatetime = expirationDatetime.split('.')[0] defaultDappId = 'Sagittarius' dappId = self.safe_string(params, 'dappId', defaultDappId) defaultFee = self.safe_string(self.options, 'fee', '300000000000000') totalFeeRate = self.safe_string(params, 'totalFeeRate', 8) chainFeeRate = self.safe_string(params, 'chainFeeRate', 1) fee = self.safe_string(params, 'fee', defaultFee) eightBytes = Precise.stringPow('2', '64') allByteStringArray = [ self.number_to_be(1, 32), self.number_to_le(int(math.floor(now / 1000)), 4), self.number_to_le(1, 1), self.number_to_le(int(math.floor(expiration / 1000)), 4), self.number_to_le(1, 1), self.number_to_le(32, 1), self.number_to_le(0, 8), self.number_to_le(fee, 8), # string for 32 bit php self.number_to_le(len(self.apiKey), 1), self.encode(self.apiKey), self.number_to_le(sideNum, 1), self.number_to_le(typeNum, 1), self.number_to_le(len(normalSymbol), 1), self.encode(normalSymbol), self.number_to_le(Precise.string_div(amountChainString, eightBytes, 0), 8), self.number_to_le(Precise.string_mod(amountChainString, eightBytes), 8), self.number_to_le(Precise.string_div(priceChainString, eightBytes, 0), 8), self.number_to_le(Precise.string_mod(priceChainString, eightBytes), 8), self.number_to_le(0, 2), self.number_to_le(int(math.floor(now / 1000)), 4), self.number_to_le(int(math.floor(expiration / 1000)), 4), self.number_to_le(1, 1), self.number_to_le(int(chainFeeRate), 2), self.number_to_le(1, 1), self.number_to_le(int(totalFeeRate), 2), self.number_to_le(int(quoteId), 4), self.number_to_le(int(baseId), 4), self.number_to_le(0, 1), self.number_to_le(1, 1), self.number_to_le(len(dappId), 1), self.encode(dappId), self.number_to_le(0, 1), ] txByteStringArray = [ self.number_to_le(int(math.floor(now / 1000)), 4), self.number_to_le(1, 1), self.number_to_le(int(math.floor(expiration / 1000)), 4), self.number_to_le(1, 1), self.number_to_le(32, 1), self.number_to_le(0, 8), self.number_to_le(fee, 8), # string for 32 bit php self.number_to_le(len(self.apiKey), 1), self.encode(self.apiKey), self.number_to_le(sideNum, 1), self.number_to_le(typeNum, 1), self.number_to_le(len(normalSymbol), 1), self.encode(normalSymbol), self.number_to_le(Precise.string_div(amountChainString, eightBytes, 0), 8), self.number_to_le(Precise.string_mod(amountChainString, eightBytes), 8), self.number_to_le(Precise.string_div(priceChainString, eightBytes, 0), 8), self.number_to_le(Precise.string_mod(priceChainString, eightBytes), 8), self.number_to_le(0, 2), self.number_to_le(int(math.floor(now / 1000)), 4), self.number_to_le(int(math.floor(expiration / 1000)), 4), self.number_to_le(1, 1), self.number_to_le(int(chainFeeRate), 2), self.number_to_le(1, 1), self.number_to_le(int(totalFeeRate), 2), self.number_to_le(int(quoteId), 4), self.number_to_le(int(baseId), 4), self.number_to_le(0, 1), self.number_to_le(1, 1), self.number_to_le(len(dappId), 1), self.encode(dappId), self.number_to_le(0, 1), ] txbytestring = self.binary_concat_array(txByteStringArray) txidhash = self.hash(txbytestring, 'sha256', 'hex') txid = txidhash[0:40] orderidByteStringArray = [ self.number_to_le(len(txid), 1), self.encode(txid), self.number_to_be(0, 4), ] orderidbytestring = self.binary_concat_array(orderidByteStringArray) orderidhash = self.hash(orderidbytestring, 'sha256', 'hex') orderid = orderidhash[0:40] bytestring = self.binary_concat_array(allByteStringArray) hash = self.hash(bytestring, 'sha256', 'hex') signature = self.ecdsa(hash, self.secret, 'secp256k1', None, True) recoveryParam = self.binary_to_base16(self.number_to_le(self.sum(signature['v'], 31), 1)) mySignature = recoveryParam + signature['r'] + signature['s'] operation = { 'now': datetime, 'expiration': expirationDatetime, 'fee': fee, 'creator': self.apiKey, 'side': sideNum, 'order_type': typeNum, 'market_name': normalSymbol, 'amount': amountChain, 'price': priceChain, 'use_btt_as_fee': False, 'money_id': int(quoteId), 'stock_id': int(baseId), 'custom_no_btt_fee_rate': int(totalFeeRate), 'custom_btt_fee_rate': int(chainFeeRate), } fatty = { 'timestamp': datetime, 'expiration': expirationDatetime, 'operations': [ [ 32, operation, ], ], 'validate_type': 0, 'dapp': dappId, 'signatures': [ mySignature, ], } request = { 'trObj': self.json(fatty), } response = await self.publicPostTransactionCreateorder(request) timestamp = self.milliseconds() statusCode = self.safe_string(response, 'code') status = 'open' if (statusCode == '0') else 'failed' return { 'info': response, 'id': orderid, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'lastTradeTimestamp': None, 'status': status, 'symbol': None, 'type': None, 'side': None, 'price': None, 'amount': None, 'filled': None, 'remaining': None, 'cost': None, 'trades': None, 'fee': None, 'clientOrderId': None, 'average': None, } async def fetch_order(self, id, symbol=None, params={}): if not ('userid' in params) and (self.apiKey is None): raise ArgumentsRequired('fetchOrder() requires self.apiKey or userid argument') await self.load_markets() request = { 'userid': self.apiKey, } market = None if symbol is not None: market = self.markets[symbol] request['symbol'] = market['id'] request['id'] = id response = await self.publicGetOrders(self.extend(request, params)) return self.parse_order(response, market) async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}): if not ('userid' in params) and (self.apiKey is None): raise ArgumentsRequired('fetchOpenOrders() requires self.apiKey or userid argument') await self.load_markets() request = { 'userid': self.apiKey, } market = None if symbol is not None: market = self.market(symbol) request['symbol'] = market['id'] if limit is not None: request['limit'] = limit if since is not None: request['since'] = since response = await self.publicGetOrdersOpen(self.extend(request, params)) return self.parse_orders(response, market, since, limit) async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}): if not ('userid' in params) and (self.apiKey is None): raise ArgumentsRequired('fetchClosedOrders() requires self.apiKey or userid argument') await self.load_markets() market = None request = { 'userid': self.apiKey, } if symbol is not None: market = self.market(symbol) request['symbol'] = market['id'] if limit is not None: request['limit'] = limit if since is not None: request['since'] = since response = await self.publicGetOrdersClosed(self.extend(request, params)) return self.parse_orders(response, market, since, limit) async def fetch_orders(self, symbol=None, since=None, limit=None, params={}): if not ('userid' in params) and (self.apiKey is None): raise ArgumentsRequired('fetchOrders() requires self.apiKey or userid argument') await self.load_markets() market = None request = { 'userid': self.apiKey, } if symbol is not None: market = self.market(symbol) request['symbol'] = market['id'] if limit is not None: request['limit'] = limit if since is not None: request['since'] = since response = await self.publicGetOrdersAll(self.extend(request, params)) return self.parse_orders(response, market, since, limit) async def cancel_order(self, id, symbol=None, params={}): if self.apiKey is None: raise ArgumentsRequired('cancelOrder() requires hasAlreadyAuthenticatedSuccessfully') if symbol is None: raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument') await self.load_markets() market = self.market(symbol) baseId = market['baseId'] quoteId = market['quoteId'] normalSymbol = market['normalSymbol'] feeAmount = '300000000000000' now = self.milliseconds() expiration = 0 datetime = self.iso8601(now) datetime = datetime.split('.')[0] expirationDatetime = self.iso8601(expiration) expirationDatetime = expirationDatetime.split('.')[0] defaultDappId = 'Sagittarius' dappId = self.safe_string(params, 'dappId', defaultDappId) byteStringArray = [ self.number_to_be(1, 32), self.number_to_le(int(math.floor(now / 1000)), 4), self.number_to_le(1, 1), self.number_to_le(expiration, 4), self.number_to_le(1, 1), self.number_to_le(33, 1), self.number_to_le(0, 8), self.number_to_le(feeAmount, 8), # string for 32 bit php self.number_to_le(len(self.apiKey), 1), self.encode(self.apiKey), self.number_to_le(len(normalSymbol), 1), self.encode(normalSymbol), self.base16_to_binary(id), self.number_to_le(int(quoteId), 4), self.number_to_le(int(baseId), 4), self.number_to_le(0, 1), self.number_to_le(1, 1), self.number_to_le(len(dappId), 1), self.encode(dappId), self.number_to_le(0, 1), ] bytestring = self.binary_concat_array(byteStringArray) hash = self.hash(bytestring, 'sha256', 'hex') signature = self.ecdsa(hash, self.secret, 'secp256k1', None, True) recoveryParam = self.binary_to_base16(self.number_to_le(self.sum(signature['v'], 31), 1)) mySignature = recoveryParam + signature['r'] + signature['s'] operation = { 'fee': feeAmount, 'creator': self.apiKey, 'order_id': id, 'market_name': normalSymbol, 'money_id': int(quoteId), 'stock_id': int(baseId), } fatty = { 'timestamp': datetime, 'expiration': expirationDatetime, 'operations': [ [ 33, operation, ], ], 'validate_type': 0, 'dapp': dappId, 'signatures': [ mySignature, ], } request = { 'trObj': self.json(fatty), } response = await self.publicPostTransactionCancelorder(request) timestamp = self.milliseconds() statusCode = self.safe_string(response, 'code') status = 'canceled' if (statusCode == '0') else 'failed' return { 'info': response, 'id': None, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'lastTradeTimestamp': None, 'status': status, 'symbol': None, 'type': None, 'side': None, 'price': None, 'amount': None, 'filled': None, 'remaining': None, 'cost': None, 'trades': None, 'fee': None, 'clientOrderId': None, 'average': None, } async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}): if not ('userid' in params) and (self.apiKey is None): raise ArgumentsRequired('fetchMyTrades() requires self.apiKey or userid argument') await self.load_markets() market = self.market(symbol) request = { 'userid': self.apiKey, } if symbol is not None: request['symbol'] = market['id'] if limit is not None: request['limit'] = limit if since is not None: request['since'] = since response = await self.publicGetOrdersTrades(self.extend(request, params)) return self.parse_trades(response, market, since, limit) async def fetch_deposits(self, code=None, since=None, limit=None, params={}): await self.load_markets() if not ('userid' in params) and (self.apiKey is None): raise ArgumentsRequired('fetchDeposits() requires self.apiKey or userid argument') currency = None request = { 'userid': self.apiKey, } if code is not None: currency = self.currency(code) request['currency'] = currency['id'] if since is not None: request['since'] = since if limit is not None: request['limit'] = limit response = await self.publicGetDeposits(self.extend(request, params)) return self.parse_transactions(response, currency, since, limit) async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}): await self.load_markets() if not ('userid' in params) and (self.apiKey is None): raise ArgumentsRequired('fetchWithdrawals() requires self.apiKey or userid argument') currency = None request = { 'userid': self.apiKey, } if code is not None: currency = self.currency(code) request['currency'] = currency['id'] if since is not None: request['since'] = since if limit is not None: request['limit'] = limit response = await self.publicGetWithdrawals(self.extend(request, params)) return self.parse_transactions(response, currency, since, limit) def parse_transaction_status(self, status): statuses = { 'DEPOSIT_FAILED': 'failed', 'FEE_SEND_FAILED': 'failed', 'FEE_FAILED': 'failed', 'PAY_SEND_FAILED': 'failed', 'PAY_FAILED': 'failed', 'BTT_FAILED': 'failed', 'WITHDDRAW_FAILED': 'failed', 'USER_FAILED': 'failed', 'FEE_EXECUED': 'pending', 'PAY_EXECUED': 'pending', 'WITHDDRAW_EXECUTED': 'pending', 'USER_EXECUED': 'pending', 'BTT_SUCCED': 'ok', } return self.safe_string(statuses, status, status) def parse_transaction(self, transaction, currency=None): id = self.safe_string(transaction, 'id') address = self.safe_string(transaction, 'address') tag = self.safe_string(transaction, 'tag') if tag is not None: if len(tag) < 1: tag = None txid = self.safe_value(transaction, 'txid') currencyId = self.safe_string(transaction, 'code') code = self.safe_currency_code(currencyId, currency) timestamp = self.safe_integer(transaction, 'timestamp') datetime = self.safe_string(transaction, 'datetime') type = self.safe_string(transaction, 'type') status = self.parse_transaction_status(self.safe_string(transaction, 'status')) amount = self.safe_number(transaction, 'amount') feeInfo = self.safe_value(transaction, 'fee') feeCost = self.safe_number(feeInfo, 'cost') feeCurrencyId = self.safe_string(feeInfo, 'code') feeCode = self.safe_currency_code(feeCurrencyId, currency) fee = { 'cost': feeCost, 'currency': feeCode, } return { 'info': transaction, 'id': id, 'txid': txid, 'timestamp': timestamp, 'datetime': datetime, 'address': address, 'tag': tag, 'type': type, 'amount': amount, 'currency': code, 'status': status, 'updated': None, 'fee': fee, } async def fetch_deposit_address(self, code, params={}): await self.load_markets() if not ('userid' in params) and (self.apiKey is None): raise ArgumentsRequired('fetchDepositAddress() requires self.apiKey or userid argument') currency = self.currency(code) request = { 'userid': self.apiKey, 'code': currency['id'], } response = await self.publicGetDepositaddress(request) address = self.safe_string(response[0], 'address') tag = self.safe_string(response[0], 'tag') chainType = self.safe_string(response[0], 'chainType') self.check_address(address) return { 'currency': code, 'address': address, 'tag': tag, 'chainType': chainType, 'info': response, } def sign(self, path, api='public', method='GET', params={}, headers=None, body=None): url = self.urls['api'][api] url += '/' + path if params: url += '?' + self.urlencode(params) return {'url': url, 'method': method, 'body': body, 'headers': headers} def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody): if code == 503: raise DDoSProtection(self.id + ' ' + str(code) + ' ' + reason + ' ' + body) if response is None: return # fallback to default error handler if 'code' in response: status = self.safe_string(response, 'code') if status == '1': message = self.safe_string(response, 'msg') feedback = self.id + ' ' + body self.throw_exactly_matched_exception(self.exceptions, message, feedback) raise ExchangeError(feedback)
python/ccxt/async_support/bytetrade.py
44,292
-*- coding: utf-8 -*- PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN: https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.mdhow-to-contribute-code new metainfo interface Kline of a symbol Market Depth of a symbol Trade records of a symbol Reference information of trading instrument, including base currency, quote precision, etc. The list of currencies available Get the balance of an account Get the open orders of an account Get the closed orders of an account Get the open and closed orders of an account Get the details of an order of an account Get detail match results Get deposit address Get withdrawals info Get deposit info Get transfer info Post create order transaction to blockchain Post cancel order transaction to blockchain Post withdraw transaction to blockchain Post transfer transaction to blockchain typo on the exchange side, 'vertify' private key signature is incorrect same transaction submited one year in byte-trade.com DEX, request https://api-v2.byte-trade.com/currencies will return currencies, the api doc is https://github.com/Bytetrade/bytetrade-official-api-docs/wiki/rest-apiget-currencies-get-currencys-supported-in-bytetradecom we can see the coin name is none-unique in the result, the coin which code is 18 is the CyberMiles ERC20, and the coin which code is 35 is the CyberMiles main chain, but their name is same. that is because bytetrade is a DEX, supports people create coin with the same name, but the id(code) of coin is unique, so we should use the id or name and id as the identity of coin. For coin name and symbol is same with CCXT, I use name@id as the key of commonCurrencies dict. [{ "name": "CMT", currency name, non-unique "code": "18", currency id, unique "type": "crypto", "fullname": "CyberMiles", "active": True, "chainType": "ethereum", "basePrecision": 18, "transferPrecision": 10, "externalPrecision": 18, "chainContractAddress": "0xf85feea2fdd81d51177f6b8f35f0e6734ce45f5f", "limits": { "deposit": { "min": "0", "max": "-1" }, "withdraw": { "min": "0", "max": "-1" } } }, { "name": "CMT", "code": "35", "type": "crypto", "fullname": "CyberMiles", "active": True, "chainType": "cmt", "basePrecision": 18, "transferPrecision": 10, "externalPrecision": 18, "chainContractAddress": "0x0000000000000000000000000000000000000000", "limits": { "deposit": { "min": "1", "max": "-1" }, "withdraw": { "min": "10", "max": "-1" } } } ] The id 126 coin is a special coin whose name on the chain is actually ZAR, but it is changed to ZCN after creation, so it must be changed to ZAR when placing the transaction in the chain default = maximum = 100 [ { "symbol":"68719476706", "name":"ETH/BTC", "base":"2", "quote":"32", "timestamp":1575905991933, "datetime":"2019-12-09T15:39:51.933Z", "high":"0", "low":"0", "open":"0", "close":"0", "last":"0", "change":"0", "percentage":"0", "baseVolume":"0", "quoteVolume":"0" } ] previous day close [ { "symbol":"68719476706", "name":"ETH/BTC", "base":"2", "quote":"32", "timestamp":1575905991933, "datetime":"2019-12-09T15:39:51.933Z", "high":"0", "low":"0", "open":"0", "close":"0", "last":"0", "change":"0", "percentage":"0", "baseVolume":"0", "quoteVolume":"0" } ] [ 1591505760000, "242.7", "242.76", "242.69", "242.76", "0.1892" ] [ [1591505760000,"242.7","242.76","242.69","242.76","0.1892"], [1591505820000,"242.77","242.83","242.7","242.72","0.6378"], [1591505880000,"242.72","242.73","242.61","242.72","0.4141"], ] self.safe_string(trade, 'datetime') default = 100, maximum = 500 string for 32 bit php string for 32 bit php string for 32 bit php fallback to default error handler
4,323
en
0.745309
import pytest import numpy as np from ebbef2p.structure import Structure L = 2 E = 1 I = 1 def test_center_load(): P = 100 M_max = P * L / 4 # maximum moment S_max = P/2 # max shearing force w_max = -P * L ** 3 / (48 * E * I) # max displacement tolerance = 1e-6 #set a tolerance of 0.0001% s = Structure('test') s.add_beam(coord=[0, L], E=E, I=I) s.add_nodal_load(P, L/2, 'fz') s.add_nodal_support({'uz': 0, 'ur': "NaN"}, 0) s.add_nodal_support({'uz': 0, 'ur': "NaN"}, L) s.add_nodes(25) s.add_elements(s.nodes) s.solve(s.build_global_matrix(), s.build_load_vector(), s.get_boudary_conditions()) assert min(s.get_displacements()['vertical_displacements']) == pytest.approx(w_max, rel=tolerance) assert max(s.get_bending_moments()['values']) == pytest.approx(M_max, rel=tolerance) assert max(s.get_shear_forces()['values']) == pytest.approx(S_max, rel=tolerance) def test_uniformly_distributed_load(): q = 10 M_max = q * L ** 2 / 8 # maximum moment S_max = q * L/2 # max shearing force w_max = -5 * q * L ** 4 / (384 * E * I) # max displacement tolerance = 1e-4 #set a tolerance of 0.01% s = Structure('test') s.add_beam(coord=[0, L], E=E, I=I) s.add_distributed_load((q, q), (0, L)) s.add_nodal_support({'uz': 0, 'ur': "NaN"}, 0) s.add_nodal_support({'uz': 0, 'ur': "NaN"}, L) s.add_nodes(200) s.add_elements(s.nodes) s.solve(s.build_global_matrix(), s.build_load_vector(), s.get_boudary_conditions()) assert min(s.get_displacements()['vertical_displacements']) == pytest.approx(w_max, rel=tolerance) assert max(s.get_bending_moments()['values']) == pytest.approx(M_max, rel=tolerance) assert max(s.get_shear_forces()['values']) == pytest.approx(S_max, rel=1e-2)
tests/test_simple supported_beam.py
1,829
maximum moment max shearing force max displacementset a tolerance of 0.0001% maximum moment max shearing force max displacementset a tolerance of 0.01%
153
en
0.403153
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import sys class Hwloc(AutotoolsPackage): """The Hardware Locality (hwloc) software project. The Portable Hardware Locality (hwloc) software package provides a portable abstraction (across OS, versions, architectures, ...) of the hierarchical topology of modern architectures, including NUMA memory nodes, sockets, shared caches, cores and simultaneous multithreading. It also gathers various system attributes such as cache and memory information as well as the locality of I/O devices such as network interfaces, InfiniBand HCAs or GPUs. It primarily aims at helping applications with gathering information about modern computing hardware so as to exploit it accordingly and efficiently. """ homepage = "http://www.open-mpi.org/projects/hwloc/" url = "https://download.open-mpi.org/release/hwloc/v2.0/hwloc-2.0.2.tar.gz" list_url = "http://www.open-mpi.org/software/hwloc/" list_depth = 2 git = 'https://github.com/open-mpi/hwloc.git' version('master', branch='master') version('2.0.2', sha256='27dcfe42e3fb3422b72ce48b48bf601c0a3e46e850ee72d9bdd17b5863b6e42c') version('2.0.1', sha256='f1156df22fc2365a31a3dc5f752c53aad49e34a5e22d75ed231cd97eaa437f9d') version('2.0.0', sha256='a0d425a0fc7c7e3f2c92a272ffaffbd913005556b4443e1887d2e1718d902887') version('1.11.11', sha256='74329da3be1b25de8e98a712adb28b14e561889244bf3a8138afe91ab18e0b3a') version('1.11.10', sha256='0a2530b739d9ebf60c4c1e86adb5451a20d9e78f7798cf78d0147cc6df328aac') version('1.11.9', sha256='85b978995b67db0b1a12dd1a73b09ef3d39f8e3cb09f8b9c60cf04633acce46c') version('1.11.8', sha256='8af89b1164a330e36d18210360ea9bb305e19f9773d1c882855d261a13054ea8') version('1.11.7', sha256='ac16bed9cdd3c63bca1fe1ac3de522a1376b1487c4fc85b7b19592e28fd98e26') version('1.11.6', sha256='67963f15197e6b551539c4ed95a4f8882be9a16cf336300902004361cf89bdee') version('1.11.5', sha256='da2c780fce9b5440a1a7d1caf78f637feff9181a9d1ca090278cae4bea71b3df') version('1.11.4', sha256='1b6a58049c31ce36aff162cf4332998fd468486bd08fdfe0249a47437311512d') version('1.11.3', sha256='03a1cc63f23fed7e17e4d4369a75dc77d5c145111b8578b70e0964a12712dea0') version('1.11.2', sha256='d11f091ed54c56c325ffca1083113a405fcd8a25d5888af64f5cd6cf587b7b0a') version('1.11.1', sha256='b41f877d79b6026640943d57ef25311299378450f2995d507a5e633da711be61') version('1.9', sha256='9fb572daef35a1c8608d1a6232a4a9f56846bab2854c50562dfb9a7be294f4e8') variant('nvml', default=False, description="Support NVML device discovery") variant('gl', default=False, description="Support GL device discovery") variant('cuda', default=False, description="Support CUDA devices") variant('libxml2', default=True, description="Build with libxml2") variant('pci', default=(sys.platform != 'darwin'), description="Support analyzing devices on PCI bus") variant('shared', default=True, description="Build shared libraries") variant( 'cairo', default=False, description='Enable the Cairo back-end of hwloc\'s lstopo command' ) depends_on('pkgconfig', type='build') depends_on('m4', type='build', when='@master') depends_on('autoconf', type='build', when='@master') depends_on('automake', type='build', when='@master') depends_on('libtool', type='build', when='@master') depends_on('cuda', when='+nvml') depends_on('cuda', when='+cuda') depends_on('gl', when='+gl') depends_on('libpciaccess', when='+pci') depends_on('libxml2', when='+libxml2') depends_on('cairo', when='+cairo') depends_on('numactl', when='@:1.11.11 platform=linux') def url_for_version(self, version): return "http://www.open-mpi.org/software/hwloc/v%s/downloads/hwloc-%s.tar.gz" % (version.up_to(2), version) def configure_args(self): args = [ # Disable OpenCL, since hwloc might pick up an OpenCL # library at build time that is then not found at run time # (Alternatively, we could require OpenCL as dependency.) "--disable-opencl", ] if '@2.0.0:' in self.spec: args.append('--enable-netloc') args.extend(self.enable_or_disable('cairo')) args.extend(self.enable_or_disable('nvml')) args.extend(self.enable_or_disable('gl')) args.extend(self.enable_or_disable('cuda')) args.extend(self.enable_or_disable('libxml2')) args.extend(self.enable_or_disable('pci')) args.extend(self.enable_or_disable('shared')) return args
var/spack/repos/builtin/packages/hwloc/package.py
4,815
The Hardware Locality (hwloc) software project. The Portable Hardware Locality (hwloc) software package provides a portable abstraction (across OS, versions, architectures, ...) of the hierarchical topology of modern architectures, including NUMA memory nodes, sockets, shared caches, cores and simultaneous multithreading. It also gathers various system attributes such as cache and memory information as well as the locality of I/O devices such as network interfaces, InfiniBand HCAs or GPUs. It primarily aims at helping applications with gathering information about modern computing hardware so as to exploit it accordingly and efficiently. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other Spack Project Developers. See the top-level COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) Disable OpenCL, since hwloc might pick up an OpenCL library at build time that is then not found at run time (Alternatively, we could require OpenCL as dependency.)
1,001
en
0.898575
# System Imports import cv2 import json from typing import Optional # Library imports import numpy # Twisted Import from twisted.internet import reactor, defer, threads, protocol from twisted.internet.endpoints import TCP4ClientEndpoint from twisted.internet.interfaces import IAddress # Package Imports from .data import Image, ColorSpace class cv_webcam (object): def __init__ (self, device, img_width, img_height): self.device_index = device self.img_width = img_width self.img_height = img_height self.name = "cv_webcam(%s)" % device self.camera = None @defer.inlineCallbacks def connect (self, _protocolFactory): if self.camera is None: self.camera = yield threads.deferToThread(cv2.VideoCapture, self.device_index) # Set picture capture dimensions self.camera.set(3, self.img_width) self.camera.set(4, self.img_height) defer.returnValue(self) @defer.inlineCallbacks def image (self): """ Get an image from the camera. Returns an Image object. """ try: flag, img_array = yield threads.deferToThread(self.camera.read) except SystemError: return if flag is False: print ("No image") return defer.returnValue(Image(img_array, ColorSpace.BGR)) def disconnect (self): threads.deferToThread(self.camera.release) class _camera_proxy_protocol (protocol.Protocol): _state: str _buffer: bytes = b'' _image_callback: Optional[defer.Deferred] = None _camera_id: Optional[bytes] = None def setCameraId(self, camera_id: int): self._camera_id = str(camera_id).encode() self.requestFormat() # def connectionMade(self): # if self._camera_id is not None: # self.requestFormat() def dataReceived(self, data: bytes): """ Byte 1: command Byte 2-5: length Byte 6+: data """ self._buffer += data if len(self._buffer) > 5: command = chr(self._buffer[0]) length = int.from_bytes(self._buffer[1:5], byteorder = 'big') if len(self._buffer) >= length + 5: data = self._buffer[5 : 5 + length] self._buffer = self._buffer[5 + length : ] if command == 'F': self.formatReceived(data) elif command == 'I': self.imageReceived(data) def formatReceived (self, data: bytes): image_format = json.loads(data.decode()) if image_format['channels'] == 1: self._image_shape = (image_format['height'], image_format['width']) else: self._image_shape = ( image_format['height'], image_format['width'], image_format['channels'] ) self._image_colorspace = image_format['colorspace'] def imageReceived (self, data: bytes): try: img_data = numpy.reshape( numpy.frombuffer(data, dtype = numpy.uint8), newshape = self._image_shape ) self._image_callback.callback(img_data) except (AttributeError, defer.AlreadyCalledError) as e: # No callback, or callback already done. (Unexpected image data). pass except Exception as e: try: self._image_callback.errback(e) except defer.AlreadyCalledError: pass def requestFormat (self): self.transport.write(b'F' + self._camera_id + b'\n') def requestImage (self): self._image_callback = defer.Deferred() self.transport.write(b'I' + self._camera_id + b'\n') return self._image_callback class camera_proxy (object): def __init__ (self, host, port, camera_id): self.point = TCP4ClientEndpoint(reactor, host, port) self.name = f"camera_proxy({host!s}, {port!s})" self.camera_id = camera_id @defer.inlineCallbacks def connect (self, _protocolFactory): self._protocol = yield self.point.connect( protocol.Factory.forProtocol(_camera_proxy_protocol) ) self._protocol.setCameraId(self.camera_id) # yield self._protocol._get_format_information() defer.returnValue(self) @defer.inlineCallbacks def image (self): """ Get an image from the camera. Returns a SimpleCV Image. """ try: img_array = yield self._protocol.requestImage() except Exception as e: print('Exception fetching image', e) return defer.returnValue(Image(img_array, ColorSpace.BGR)) def disconnect (self): threads.deferToThread(self.camera.release)
src/octopus/image/source.py
4,903
Byte 1: command Byte 2-5: length Byte 6+: data Get an image from the camera. Returns an Image object. Get an image from the camera. Returns a SimpleCV Image. System Imports Library imports Twisted Import Package Imports Set picture capture dimensions def connectionMade(self): if self._camera_id is not None: self.requestFormat() No callback, or callback already done. (Unexpected image data). yield self._protocol._get_format_information()
456
en
0.522363
import numpy as np import logging import unittest import os import scipy.linalg as LA import time from sklearn.utils import safe_sqr, check_array from scipy import stats from pysnptools.snpreader import Bed,Pheno from pysnptools.snpreader import SnpData,SnpReader from pysnptools.kernelreader import KernelNpz from pysnptools.kernelreader import SnpKernel from pysnptools.kernelreader import KernelReader from pysnptools.kernelreader import Identity as KernelIdentity import pysnptools.util as pstutil from pysnptools.standardizer import DiagKtoN,UnitTrained from pysnptools.standardizer import Unit from pysnptools.util import intersect_apply from pysnptools.standardizer import Standardizer from fastlmm.inference.lmm import LMM from fastlmm.inference.fastlmm_predictor import _pheno_fixup from fastlmm.inference import FastLMM from pysnptools.standardizer import Identity as StandardizerIdentity from scipy.stats import multivariate_normal from fastlmm.util.pickle_io import load, save # make FastLmm use this when there are no SNPs or K is Identity? class LinearRegression(object): ''' A linear regression predictor, that works like the FastLMM in fastlmm_predictor.py, but that expects all similarity matrices to be identity. **Constructor:** :Parameters: * **covariate_standardizer** (:class:`Standardizer`) -- The PySnpTools standardizer to be apply to X, the covariate data. Some choices include :class:`Standardizer.Unit` (Default. Fills missing with zero) and :class:`Standardizer.Identity` (do nothing) :Example: >>> import numpy as np >>> import logging >>> from pysnptools.snpreader import Pheno >>> from fastlmm.inference import LinearRegression >>> logging.basicConfig(level=logging.INFO) >>> cov = Pheno("../feature_selection/examples/toydata.cov") >>> pheno_fn = "../feature_selection/examples/toydata.phe" >>> train_idx = np.r_[10:cov.iid_count] # iids 10 and on >>> test_idx = np.r_[0:10] # the first 10 iids >>> linreg = LinearRegression() >>> #We give it phenotype information for extra examples, but it reorders and intersects the examples, so only training examples are used. >>> _ = linreg.fit(X=cov[train_idx,:],y=pheno_fn) >>> mean, covariance = linreg.predict(X=cov[test_idx,:]) >>> print mean.iid[0], round(mean.val[0],7), round(covariance.val[0,0],7) ['per0' 'per0'] 0.1518764 0.9043703 >>> nll = linreg.score(X=cov[test_idx,:],y=pheno_fn) >>> print round(nll,7) 13.6688448 ''' def __init__(self,covariate_standardizer=Unit()): self.covariate_standardizer = covariate_standardizer self.is_fitted = False def fit(self, X=None, y=None, K0_train=None, K1_train=None, h2=None, mixing=None,count_A1=None): """ Method for training a :class:`FastLMM` predictor. If the examples in X, y, K0_train, K1_train are not the same, they will be reordered and intersected. :param X: training covariate information, optional: If you give a string, it should be the file name of a PLINK phenotype-formatted file. :type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string. :param y: training phenotype: If you give a string, it should be the file name of a PLINK phenotype-formatted file. :type y: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string. :param K0_train: Must be None. Represents the identity similarity matrix. :type K0_train: None :param K1_train: Must be None. Represents the identity similarity matrix. :type K1_train: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__ :param h2: Ignored. Optional. :type h2: number :param mixing: Ignored. Optional. :type mixing: number :param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1 alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True. :type count_A1: bool :rtype: self, the fitted Linear Regression predictor """ self.is_fitted = True assert K0_train is None # could also accept that ID or no snps assert K1_train is None # could also accept that ID or no snps assert y is not None, "y must be given" y = _pheno_fixup(y,count_A1=count_A1) assert y.sid_count == 1, "Expect y to be just one variable" X = _pheno_fixup(X, iid_if_none=y.iid,count_A1=count_A1) X, y = intersect_apply([X, y]) y = y.read() X, covar_unit_trained = X.read().standardize(self.covariate_standardizer,return_trained=True) # add a column of 1's to cov to increase DOF of model (and accuracy) by allowing a constant offset X = SnpData(iid=X.iid, sid=FastLMM._new_snp_name(X), val=np.c_[X.val,np.ones((X.iid_count,1))]) lsqSol = np.linalg.lstsq(X.val, y.val[:,0],rcond=-1) bs=lsqSol[0] #weights r2=lsqSol[1] #squared residuals D=lsqSol[2] #rank of design matrix N=y.iid_count self.beta = bs self.ssres = float(r2) self.sstot = ((y.val-y.val.mean())**2).sum() self.covar_unit_trained = covar_unit_trained self.iid_count = X.iid_count self.covar_sid = X.sid self.pheno_sid = y.sid return self def predict(self,X=None,K0_whole_test=None,K1_whole_test=None,iid_if_none=None,count_A1=None): """ Method for predicting from a fitted :class:`FastLMM` predictor. If the examples in X, K0_whole_test, K1_whole_test are not the same, they will be reordered and intersected. :param X: testing covariate information, optional: If you give a string, it should be the file name of a PLINK phenotype-formatted file. :type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string. :param K0_whole_test: Must be None. Represents the identity similarity matrix. :type K0_whole_test: None :param K1_whole_test: Must be None. Represents the identity similarity matrix. :type K1_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__ :param iid_if_none: Examples to predict for if no X, K0_whole_test, K1_whole_test is provided. :type iid_if_none: an ndarray of two strings :param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1 alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True. :type count_A1: bool :rtype: A `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__ of the means and a :class:`KernelData` of the covariance """ assert self.is_fitted, "Can only predict after predictor has been fitted" assert K0_whole_test is None or isinstance(K0_whole_test,KernelIdentity) # could also accept no snps assert K1_whole_test is None or isinstance(K1_whole_test,KernelIdentity) # could also accept no snps X = _pheno_fixup(X,iid_if_none=iid_if_none,count_A1=count_A1) X = X.read().standardize(self.covar_unit_trained) # add a column of 1's to cov to increase DOF of model (and accuracy) by allowing a constant offset X = SnpData(iid=X.iid, sid=FastLMM._new_snp_name(X), val=np.c_[X.read().val,np.ones((X.iid_count,1))]) assert np.array_equal(X.sid,self.covar_sid), "Expect covar sids to be the same in train and test." pheno_predicted = X.val.dot(self.beta).reshape(-1,1) ret0 = SnpData(iid = X.iid, sid=self.pheno_sid,val=pheno_predicted,pos=np.array([[np.nan,np.nan,np.nan]]),name="linear regression Prediction") #!!!replace 'parent_string' with 'name' from pysnptools.kernelreader import KernelData ret1 = KernelData(iid=X.iid,val=np.eye(X.iid_count)* self.ssres / self.iid_count) return ret0, ret1 def score(self, X=None, y=None, K0_whole_test=None, K1_whole_test=None, iid_if_none=None, return_mse_too=False, count_A1=None): """ Method for calculating the negative log likelihood of testing examples. If the examples in X,y, K0_whole_test, K1_whole_test are not the same, they will be reordered and intersected. :param X: testing covariate information, optional: If you give a string, it should be the file name of a PLINK phenotype-formatted file. :type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string. :param y: testing phenotype: If you give a string, it should be the file name of a PLINK phenotype-formatted file. :type y: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string. :param K0_whole_test: Must be None. Represents the identity similarity matrix. :type K0_whole_test: None :param K1_whole_test: Must be None. Represents the identity similarity matrix. :type K1_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__ :param iid_if_none: Examples to predict for if no X, K0_whole_test, K1_whole_test is provided. :type iid_if_none: an ndarray of two strings :param return_mse_too: If true, will also return the mean squared error. :type return_mse_too: bool :param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1 alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True. :type count_A1: bool :rtype: a float of the negative log likelihood and, optionally, a float of the mean squared error. """ mean0, covar0 = self.predict(K0_whole_test=K0_whole_test,K1_whole_test=K1_whole_test,X=X,iid_if_none=iid_if_none,count_A1=count_A1) y = _pheno_fixup(y, iid_if_none=covar0.iid,count_A1=count_A1) mean, covar, y = intersect_apply([mean0, covar0, y]) var = multivariate_normal(mean=mean.read(order='A',view_ok=True).val.reshape(-1), cov=covar.read(order='A',view_ok=True).val) y_actual = y.read().val nll = -np.log(var.pdf(y_actual.reshape(-1))) if not return_mse_too: return nll else: mse = ((y_actual-mean)**2).sum() return nll, mse """ Created on 2013-08-02 @author: Christian Widmer <chris@shogun-toolbox.org> @summary: Module for univariate feature selection in the presence of covariates Motivated by sklearn's linear regression method for feature selection, we've come up with an extended version that takes care of covariates based on sklearn code (f_regression): https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/univariate_selection.py """ #def get_example_data(): # """ # load plink files # """ # import fastlmm.pyplink.plink as plink # import pysnptools.snpreader.bed as Bed # import fastlmm.util.util as util # ipheno = 0 # foldIter = 0 # """ # import dataset # dat = dataset.importDataset("pheno4") # fn_bed = dat["bedFile"] # fn_pheno = dat["phenoFile"] # """ # fn_bed = "../featureSelection/examples/toydata" # fn_pheno = "../feature_selection/examples/toydata.phe" # import pysnptools.util.pheno as pstpheno # pheno = pstpheno.loadPhen(fn_pheno) # # load data # bed = plink.Bed(fn_bed) # indarr = util.intersect_ids([pheno['iid'],bed.iid]) # pheno['iid'] = pheno['iid'][indarr[:,0]] # pheno['vals'] = pheno['vals'][indarr[:,0]] # bed = bed[indarr[:,1],:] # N = pheno['vals'].shape[0] # y = pheno['vals'][:,ipheno] # iid = pheno['iid'] # snps = bed.read().standardize() # return snps, y def f_regression_block(fun,X,y,blocksize=None,**args): """ runs f_regression for each block separately (saves memory). ------------------------- fun : method that returns statistics,pval X : {array-like, sparse matrix} shape = (n_samples, n_features) The set of regressors that will tested sequentially. y : array of shape(n_samples). The data matrix blocksize : number of SNPs per block """ if blocksize==None: return fun(X,y,**args) idx_start = 0 idx_stop = int(blocksize) pval = np.zeros(X.shape[1]) stats = np.zeros(X.shape[1]) while idx_start<X.shape[1]: stats[idx_start:idx_stop], pval[idx_start:idx_stop] = fun(X[:,idx_start:idx_stop],y,**args) idx_start = idx_stop idx_stop += blocksize if idx_stop>X.shape[1]: idx_stop = X.shape[1] return stats,pval def f_regression_cov_alt(X, y, C): """ Implementation as derived in tex document See pg 12 of following document for definition of F-statistic http://www-stat.stanford.edu/~jtaylo/courses/stats191/notes/simple_diagnostics.pdf Parameters ---------- X : {array-like, sparse matrix} shape = (n_samples, n_features) The set of regressors that will tested sequentially. y : array of shape(n_samples). The data matrix c : {array-like, sparse matrix} shape = (n_samples, n_covariates) The set of covariates. Returns ------- F : array, shape=(n_features,) F values of features. pval : array, shape=(n_features,) p-values of F-scores. """ # make sure we don't overwrite input data old_flag_X = X.flags.writeable old_flag_C = C.flags.writeable old_flag_y = y.flags.writeable X.flags.writeable = False C.flags.writeable = False y.flags.writeable = False #X, C, y = check_array(X, C, y, dtype=np.float) y = y.ravel() # make copy of input data X = X.copy(order="F") y = y.copy() assert C.shape[1] < C.shape[0] cpinv = np.linalg.pinv(C) X -= np.dot(C,(np.dot(cpinv, X))) #most expensive line (runtime) y -= np.dot(C,(np.dot(cpinv, y))) yS = safe_sqr(y.T.dot(X)) # will create a copy # Note: (X*X).sum(0) = X.T.dot(X).diagonal(), computed efficiently # see e.g.: http://stackoverflow.com/questions/14758283/is-there-a-numpy-scipy-dot-product-calculating-only-the-diagonal-entries-of-the # TODO: make this smarter using either stride tricks or cython X *= X denom = X.sum(0) * y.T.dot(y) - yS F = yS / denom # degrees of freedom dof = (X.shape[0] - 1 - C.shape[1]) / (1) #(df_fm / (df_rm - df_fm)) F *= dof # convert to p-values pv = stats.f.sf(F, 1, dof) # restore old state X.flags.writeable = old_flag_X C.flags.writeable = old_flag_C y.flags.writeable = old_flag_y return F, pv def f_regression_cov(X, y, C): """Univariate linear regression tests Quick linear model for testing the effect of a single regressor, sequentially for many regressors. This is done in 3 steps: 1. the regressor of interest and the data are orthogonalized wrt constant regressors 2. the cross correlation between data and regressors is computed 3. it is converted to an F score then to a p-value Parameters ---------- X : {array-like, sparse matrix} shape = (n_samples, n_features) The set of regressors that will tested sequentially. y : array of shape(n_samples). The data matrix c : {array-like, sparse matrix} shape = (n_samples, n_covariates) The set of covariates. Returns ------- F : array, shape=(n_features,) F values of features. pval : array, shape=(n_features,) p-values of F-scores. """ X = check_array(X, dtype=np.float) C = check_array(C, dtype=np.float) y = check_array(y, dtype=np.float) y = y.ravel() assert C.shape[1] < C.shape[0] cpinv = np.linalg.pinv(C) X -= np.dot(C,(np.dot(cpinv, X))) y -= np.dot(C,(np.dot(cpinv, y))) # compute the correlation corr = np.dot(y, X) corr /= np.asarray(np.sqrt(safe_sqr(X).sum(axis=0))).ravel() corr /= np.asarray(np.sqrt(safe_sqr(y).sum())).ravel() # convert to p-value dof = (X.shape[0] - 1 - C.shape[1]) / (1) #(df_fm / (df_rm - df_fm)) F = corr ** 2 / (1 - corr ** 2) * dof pv = stats.f.sf(F, 1, dof) return F, pv def test_bias(): """ make sure we get the same result for setting C=unitvec """ S, y = get_example_data() C = np.ones((len(y),1)) from sklearn.feature_selection import f_regression F1, pval1 = f_regression(S, y, center=True) F2, pval2 = f_regression_cov(S, C, y) F3, pval3 = f_regression_cov_alt(S, C, y) # make sure values are the same np.testing.assert_array_almost_equal(F1, F2) np.testing.assert_array_almost_equal(F2, F3) np.testing.assert_array_almost_equal(pval1, pval2) np.testing.assert_array_almost_equal(pval2, pval3) def test_cov(): """ compare different implementations, make sure results are the same """ S, y = get_example_data() C = S[:,0:10] S = S[:,10:] F1, pval1 = f_regression_cov(S, C, y) F2, pval2 = f_regression_cov_alt(S, C, y) np.testing.assert_array_almost_equal(F1, F2) np.testing.assert_array_almost_equal(pval1, pval2) if __name__ == "__main__": logging.basicConfig(level=logging.INFO) import doctest doctest.testmod() #test_cov() #test_bias()
fastlmm/inference/linear_regression.py
19,437
A linear regression predictor, that works like the FastLMM in fastlmm_predictor.py, but that expects all similarity matrices to be identity. **Constructor:** :Parameters: * **covariate_standardizer** (:class:`Standardizer`) -- The PySnpTools standardizer to be apply to X, the covariate data. Some choices include :class:`Standardizer.Unit` (Default. Fills missing with zero) and :class:`Standardizer.Identity` (do nothing) :Example: >>> import numpy as np >>> import logging >>> from pysnptools.snpreader import Pheno >>> from fastlmm.inference import LinearRegression >>> logging.basicConfig(level=logging.INFO) >>> cov = Pheno("../feature_selection/examples/toydata.cov") >>> pheno_fn = "../feature_selection/examples/toydata.phe" >>> train_idx = np.r_[10:cov.iid_count] # iids 10 and on >>> test_idx = np.r_[0:10] # the first 10 iids >>> linreg = LinearRegression() >>> #We give it phenotype information for extra examples, but it reorders and intersects the examples, so only training examples are used. >>> _ = linreg.fit(X=cov[train_idx,:],y=pheno_fn) >>> mean, covariance = linreg.predict(X=cov[test_idx,:]) >>> print mean.iid[0], round(mean.val[0],7), round(covariance.val[0,0],7) ['per0' 'per0'] 0.1518764 0.9043703 >>> nll = linreg.score(X=cov[test_idx,:],y=pheno_fn) >>> print round(nll,7) 13.6688448 runs f_regression for each block separately (saves memory). ------------------------- fun : method that returns statistics,pval X : {array-like, sparse matrix} shape = (n_samples, n_features) The set of regressors that will tested sequentially. y : array of shape(n_samples). The data matrix blocksize : number of SNPs per block Univariate linear regression tests Quick linear model for testing the effect of a single regressor, sequentially for many regressors. This is done in 3 steps: 1. the regressor of interest and the data are orthogonalized wrt constant regressors 2. the cross correlation between data and regressors is computed 3. it is converted to an F score then to a p-value Parameters ---------- X : {array-like, sparse matrix} shape = (n_samples, n_features) The set of regressors that will tested sequentially. y : array of shape(n_samples). The data matrix c : {array-like, sparse matrix} shape = (n_samples, n_covariates) The set of covariates. Returns ------- F : array, shape=(n_features,) F values of features. pval : array, shape=(n_features,) p-values of F-scores. Implementation as derived in tex document See pg 12 of following document for definition of F-statistic http://www-stat.stanford.edu/~jtaylo/courses/stats191/notes/simple_diagnostics.pdf Parameters ---------- X : {array-like, sparse matrix} shape = (n_samples, n_features) The set of regressors that will tested sequentially. y : array of shape(n_samples). The data matrix c : {array-like, sparse matrix} shape = (n_samples, n_covariates) The set of covariates. Returns ------- F : array, shape=(n_features,) F values of features. pval : array, shape=(n_features,) p-values of F-scores. Method for training a :class:`FastLMM` predictor. If the examples in X, y, K0_train, K1_train are not the same, they will be reordered and intersected. :param X: training covariate information, optional: If you give a string, it should be the file name of a PLINK phenotype-formatted file. :type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string. :param y: training phenotype: If you give a string, it should be the file name of a PLINK phenotype-formatted file. :type y: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string. :param K0_train: Must be None. Represents the identity similarity matrix. :type K0_train: None :param K1_train: Must be None. Represents the identity similarity matrix. :type K1_train: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__ :param h2: Ignored. Optional. :type h2: number :param mixing: Ignored. Optional. :type mixing: number :param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1 alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True. :type count_A1: bool :rtype: self, the fitted Linear Regression predictor Method for predicting from a fitted :class:`FastLMM` predictor. If the examples in X, K0_whole_test, K1_whole_test are not the same, they will be reordered and intersected. :param X: testing covariate information, optional: If you give a string, it should be the file name of a PLINK phenotype-formatted file. :type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string. :param K0_whole_test: Must be None. Represents the identity similarity matrix. :type K0_whole_test: None :param K1_whole_test: Must be None. Represents the identity similarity matrix. :type K1_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__ :param iid_if_none: Examples to predict for if no X, K0_whole_test, K1_whole_test is provided. :type iid_if_none: an ndarray of two strings :param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1 alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True. :type count_A1: bool :rtype: A `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__ of the means and a :class:`KernelData` of the covariance Method for calculating the negative log likelihood of testing examples. If the examples in X,y, K0_whole_test, K1_whole_test are not the same, they will be reordered and intersected. :param X: testing covariate information, optional: If you give a string, it should be the file name of a PLINK phenotype-formatted file. :type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string. :param y: testing phenotype: If you give a string, it should be the file name of a PLINK phenotype-formatted file. :type y: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string. :param K0_whole_test: Must be None. Represents the identity similarity matrix. :type K0_whole_test: None :param K1_whole_test: Must be None. Represents the identity similarity matrix. :type K1_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__ :param iid_if_none: Examples to predict for if no X, K0_whole_test, K1_whole_test is provided. :type iid_if_none: an ndarray of two strings :param return_mse_too: If true, will also return the mean squared error. :type return_mse_too: bool :param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1 alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True. :type count_A1: bool :rtype: a float of the negative log likelihood and, optionally, a float of the mean squared error. make sure we get the same result for setting C=unitvec compare different implementations, make sure results are the same make FastLmm use this when there are no SNPs or K is Identity? could also accept that ID or no snps could also accept that ID or no snps add a column of 1's to cov to increase DOF of model (and accuracy) by allowing a constant offsetweightssquared residualsrank of design matrix could also accept no snps could also accept no snps add a column of 1's to cov to increase DOF of model (and accuracy) by allowing a constant offset!!!replace 'parent_string' with 'name'def get_example_data(): """ load plink files """ import fastlmm.pyplink.plink as plink import pysnptools.snpreader.bed as Bed import fastlmm.util.util as util ipheno = 0 foldIter = 0 """ import dataset dat = dataset.importDataset("pheno4") fn_bed = dat["bedFile"] fn_pheno = dat["phenoFile"] """ fn_bed = "../featureSelection/examples/toydata" fn_pheno = "../feature_selection/examples/toydata.phe" import pysnptools.util.pheno as pstpheno pheno = pstpheno.loadPhen(fn_pheno) load data bed = plink.Bed(fn_bed) indarr = util.intersect_ids([pheno['iid'],bed.iid]) pheno['iid'] = pheno['iid'][indarr[:,0]] pheno['vals'] = pheno['vals'][indarr[:,0]] bed = bed[indarr[:,1],:] N = pheno['vals'].shape[0] y = pheno['vals'][:,ipheno] iid = pheno['iid'] snps = bed.read().standardize() return snps, y make sure we don't overwrite input dataX, C, y = check_array(X, C, y, dtype=np.float) make copy of input datamost expensive line (runtime) will create a copy Note: (X*X).sum(0) = X.T.dot(X).diagonal(), computed efficiently see e.g.: http://stackoverflow.com/questions/14758283/is-there-a-numpy-scipy-dot-product-calculating-only-the-diagonal-entries-of-the TODO: make this smarter using either stride tricks or cython degrees of freedom(df_fm / (df_rm - df_fm)) convert to p-values restore old state compute the correlation convert to p-value(df_fm / (df_rm - df_fm)) make sure values are the sametest_cov()test_bias()
10,422
en
0.656375
# -*- coding: utf-8 -*- # # Copyright (C) 2019 CERN. # Copyright (C) 2019 Northwestern University. # # Invenio-RDM-Records is free software; you can redistribute it and/or modify # it under the terms of the MIT License; see LICENSE file for more details. """Fake demo records.""" import datetime import json import random from pathlib import Path from edtf.parser.grammar import level0Expression from faker import Faker from invenio_access.permissions import system_identity from invenio_rdm_records.fixtures import VocabulariesFixture class CachedVocabularies: """Singleton to store some vocabulary entries. This is needed because otherwise expensive random picking would have to be done for every call to create_fake_record(). Even then, we shouldn't load all vocabularies' entries in memory (at least not big ones). """ _resource_type_ids = [] _subject_ids = [] @classmethod def _read_vocabulary(cls, vocabulary): dir_ = Path(__file__).parent return VocabulariesFixture( system_identity, [Path("./app_data"), dir_ / "data"], "vocabularies.yaml", ).get_records_by_vocabulary(vocabulary) @classmethod def fake_resource_type(cls): """Generate a random resource_type.""" if not cls._resource_type_ids: cls._resource_type_ids = [] dir_ = Path(__file__).parent res_types = cls._read_vocabulary("resource_types") for res in res_types: cls._resource_type_ids.append(res["id"]) random_id = random.choice(cls._resource_type_ids) return {"id": random_id} @classmethod def fake_subjects(cls): """Generate random subjects.""" if not cls._subject_ids: subjects = cls._read_vocabulary("subjects") for subj in subjects: cls._subject_ids.append(subj["id"]) if not cls._subject_ids: return [] n = random.choice([0, 1, 2]) random_ids = random.sample(cls._subject_ids, n) return [{"id": i} for i in random_ids] @classmethod def fake_language(cls): """Generate a random resource_type.""" random_id = random.choice(["eng", "aah", "aag"]) return {"id": random_id} def fake_edtf_level_0(): """Generates a fake publication_date string.""" def fake_date(end_date=None): fake = Faker() date_pattern = ['%Y', '%m', '%d'] # make it less and less likely to get less and less parts of the date if random.choice([True, False]): date_pattern.pop() if random.choice([True, False]): date_pattern.pop() return fake.date("-".join(date_pattern), end_datetime=end_date) f_date = fake_date() # if interval if random.choice([True, False]): # get f_date as date object parser = level0Expression("level0") parsed_date = parser.parseString(f_date)[0] date_tuple = parsed_date.lower_strict()[:3] f_date_object = datetime.date(*date_tuple) interval_start = fake_date(end_date=f_date_object) return "/".join([interval_start, f_date]) return f_date def create_fake_record(): """Create records for demo purposes.""" fake = Faker() data_to_use = { "access": { "record": "public", "files": "public", }, "files": { "enabled": False, }, "pids": { }, "metadata": { "resource_type": CachedVocabularies.fake_resource_type(), "creators": [{ "person_or_org": { "family_name": fake.last_name(), "given_name": fake.first_name(), "type": "personal", "identifiers": [{ "scheme": "orcid", "identifier": "0000-0002-1825-0097", }], }, "affiliations": [{ "name": fake.company(), "identifiers": [{ "scheme": "ror", "identifier": "03yrm5c26", }] }] } for i in range(4)], "title": fake.company() + "'s gallery", "additional_titles": [{ "title": "a research data management platform", "type": "subtitle", "lang": "eng" }, { "title": fake.company() + "'s gallery", "type": "alternativetitle", "lang": "eng" }], "publisher": "InvenioRDM", "publication_date": fake_edtf_level_0(), "subjects": CachedVocabularies.fake_subjects(), "contributors": [{ "person_or_org": { "family_name": fake.last_name(), "given_name": fake.first_name(), "type": "personal", }, "affiliations": [{ "name": fake.company(), "identifiers": [{ "scheme": "ror", "identifier": "03yrm5c26", }] }], "role": "rightsholder" } for i in range(3)], # "dates": [{ # # No end date to avoid computations based on start # "date": fake.date(pattern='%Y-%m-%d'), # "description": "Random test date", # "type": "other" # }], "languages": [CachedVocabularies.fake_language()], # "related_identifiers": [{ # "identifier": "10.9999/rdm.9999988", # "scheme": "doi", # "relation_type": "requires", # "resource_type": fake_resource_type() # }], "sizes": [ "11 pages" ], "formats": [ "application/pdf" ], "version": "v0.0.1", # "rights": [{ # "rights": "Berkeley Software Distribution 3", # "uri": "https://opensource.org/licenses/BSD-3-Clause", # "identifier": "03yrm5c26", # "scheme": "ror", # }], "description": fake.text(max_nb_chars=3000), "additional_descriptions": [{ "description": fake.text(max_nb_chars=200), "type": "methods", "lang": "eng" } for i in range(2)], "funding": [{ "funder": { "name": "European Commission", "identifier": "03yrm5c26", "scheme": "ror" }, "award": { "title": "OpenAIRE", "number": "246686", "identifier": "0000-0002-1825-0097", "scheme": "orcid" } }], # "locations": [{ # 'geometry': { # 'type': 'Point', # 'coordinates': [ # float(fake.latitude()), float(fake.longitude()) # ] # }, # "place": fake.location_on_land()[2], # "description": "Random place on land...", # 'identifiers': [{ # 'scheme': 'ror', # 'identifier': '03yrm5c26', # }, { # 'scheme': 'orcid', # 'identifier': '0000-0002-1825-0097', # }] # }, { # 'geometry': { # 'type': 'MultiPoint', # 'coordinates': [ # [float(fake.latitude()), float(fake.longitude())], # [float(fake.latitude()), float(fake.longitude())] # ] # }, # "place": fake.location_on_land()[2], # } # ], "references": [{ "reference": "Reference to something et al.", "identifier": "0000000114559647", "scheme": "isni" }], "identifiers": [{ "identifier": "ark:/123/456", "scheme": "ark" }], } } return json.loads(json.dumps(data_to_use))
invenio_rdm_records/fixtures/demo.py
8,539
Singleton to store some vocabulary entries. This is needed because otherwise expensive random picking would have to be done for every call to create_fake_record(). Even then, we shouldn't load all vocabularies' entries in memory (at least not big ones). Create records for demo purposes. Generates a fake publication_date string. Generate a random resource_type. Generate a random resource_type. Generate random subjects. Fake demo records. -*- coding: utf-8 -*- Copyright (C) 2019 CERN. Copyright (C) 2019 Northwestern University. Invenio-RDM-Records is free software; you can redistribute it and/or modify it under the terms of the MIT License; see LICENSE file for more details. make it less and less likely to get less and less parts of the date if interval get f_date as date object "dates": [{ No end date to avoid computations based on start "date": fake.date(pattern='%Y-%m-%d'), "description": "Random test date", "type": "other" }], "related_identifiers": [{ "identifier": "10.9999/rdm.9999988", "scheme": "doi", "relation_type": "requires", "resource_type": fake_resource_type() }], "rights": [{ "rights": "Berkeley Software Distribution 3", "uri": "https://opensource.org/licenses/BSD-3-Clause", "identifier": "03yrm5c26", "scheme": "ror", }], "locations": [{ 'geometry': { 'type': 'Point', 'coordinates': [ float(fake.latitude()), float(fake.longitude()) ] }, "place": fake.location_on_land()[2], "description": "Random place on land...", 'identifiers': [{ 'scheme': 'ror', 'identifier': '03yrm5c26', }, { 'scheme': 'orcid', 'identifier': '0000-0002-1825-0097', }] }, { 'geometry': { 'type': 'MultiPoint', 'coordinates': [ [float(fake.latitude()), float(fake.longitude())], [float(fake.latitude()), float(fake.longitude())] ] }, "place": fake.location_on_land()[2], } ],
1,992
en
0.587955
# Generated by Django 3.2.5 on 2021-07-09 16:58 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('accounts', '0009_auto_20210709_1606'), ] operations = [ migrations.AlterField( model_name='account', name='conf_label', field=models.SmallIntegerField(choices=[(4, 'Top Secret'), (3, 'Secret'), (2, 'Confidential'), (1, 'Unclassified')], default=1, verbose_name='Confidentiality Label'), ), migrations.AlterField( model_name='account', name='id', field=models.CharField(default=4044705356, editable=False, max_length=10, primary_key=True, serialize=False), ), migrations.AlterField( model_name='account', name='integrity_label', field=models.SmallIntegerField(choices=[(4, 'Very Trusted'), (3, 'Trusted'), (2, 'Slightly Trusted'), (1, 'Untrusted')], default=1, verbose_name='Integrity Label'), ), migrations.AlterField( model_name='historicalaccount', name='conf_label', field=models.SmallIntegerField(choices=[(4, 'Top Secret'), (3, 'Secret'), (2, 'Confidential'), (1, 'Unclassified')], default=1, verbose_name='Confidentiality Label'), ), migrations.AlterField( model_name='historicalaccount', name='id', field=models.CharField(db_index=True, default=4044705356, editable=False, max_length=10), ), migrations.AlterField( model_name='historicalaccount', name='integrity_label', field=models.SmallIntegerField(choices=[(4, 'Very Trusted'), (3, 'Trusted'), (2, 'Slightly Trusted'), (1, 'Untrusted')], default=1, verbose_name='Integrity Label'), ), ]
backend/accounts/migrations/0010_auto_20210709_1658.py
1,836
Generated by Django 3.2.5 on 2021-07-09 16:58
45
en
0.759278
# -*- coding: utf-8 -*- ############################################################################### # # ListMembers # Retrieves the email addresses of members of a MailChimp list. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class ListMembers(Choreography): def __init__(self, temboo_session): """ Create a new instance of the ListMembers Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(ListMembers, self).__init__(temboo_session, '/Library/MailChimp/ListMembers') def new_input_set(self): return ListMembersInputSet() def _make_result_set(self, result, path): return ListMembersResultSet(result, path) def _make_execution(self, session, exec_id, path): return ListMembersChoreographyExecution(session, exec_id, path) class ListMembersInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the ListMembers Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_APIKey(self, value): """ Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Mailchimp.) """ super(ListMembersInputSet, self)._set_input('APIKey', value) def set_Limit(self, value): """ Set the value of the Limit input for this Choreo. ((optional, integer) Specifies the number of records in a page to be returned. Must be greater than zero and less than or equal to 15000. Defaults to 100.) """ super(ListMembersInputSet, self)._set_input('Limit', value) def set_ListId(self, value): """ Set the value of the ListId input for this Choreo. ((required, string) The id of the Mailchimp list to retrieve members from.) """ super(ListMembersInputSet, self)._set_input('ListId', value) def set_ResponseFormat(self, value): """ Set the value of the ResponseFormat input for this Choreo. ((optional, string) Indicates the desired format for the response. Accepted values are "json" or "xml" (the default).) """ super(ListMembersInputSet, self)._set_input('ResponseFormat', value) def set_Since(self, value): """ Set the value of the Since input for this Choreo. ((optional, date) Retrieves records that have changed since this date/time. Formatted like 'YYYY-MM-DD HH:MM:SS.) """ super(ListMembersInputSet, self)._set_input('Since', value) def set_Start(self, value): """ Set the value of the Start input for this Choreo. ((optional, integer) Specifies the page at which to begin returning records. Page size is defined by the limit argument. Must be zero or greater. Defaults to 0.) """ super(ListMembersInputSet, self)._set_input('Start', value) def set_Status(self, value): """ Set the value of the Status input for this Choreo. ((optional, string) Must be one of 'subscribed', 'unsubscribed', 'cleaned', or 'updated'. Defaults to 'subscribed'.) """ super(ListMembersInputSet, self)._set_input('Status', value) class ListMembersResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the ListMembers Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. (The response from Mailchimp. Corresponds to the format specified in the ResponseFormat parameter. Defaults to "xml".) """ return self._output.get('Response', None) class ListMembersChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return ListMembersResultSet(response, path)
temboo/Library/MailChimp/ListMembers.py
4,839
An InputSet with methods appropriate for specifying the inputs to the ListMembers Choreo. The InputSet object is used to specify input parameters when executing this Choreo. A ResultSet with methods tailored to the values returned by the ListMembers Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. Create a new instance of the ListMembers Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. Retrieve the value for the "Response" output from this Choreo execution. (The response from Mailchimp. Corresponds to the format specified in the ResponseFormat parameter. Defaults to "xml".) Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Mailchimp.) Set the value of the Limit input for this Choreo. ((optional, integer) Specifies the number of records in a page to be returned. Must be greater than zero and less than or equal to 15000. Defaults to 100.) Set the value of the ListId input for this Choreo. ((required, string) The id of the Mailchimp list to retrieve members from.) Set the value of the ResponseFormat input for this Choreo. ((optional, string) Indicates the desired format for the response. Accepted values are "json" or "xml" (the default).) Set the value of the Since input for this Choreo. ((optional, date) Retrieves records that have changed since this date/time. Formatted like 'YYYY-MM-DD HH:MM:SS.) Set the value of the Start input for this Choreo. ((optional, integer) Specifies the page at which to begin returning records. Page size is defined by the limit argument. Must be zero or greater. Defaults to 0.) Set the value of the Status input for this Choreo. ((optional, string) Must be one of 'subscribed', 'unsubscribed', 'cleaned', or 'updated'. Defaults to 'subscribed'.) -*- coding: utf-8 -*- ListMembers Retrieves the email addresses of members of a MailChimp list. Python versions 2.6, 2.7, 3.x Copyright 2014, Temboo Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
2,499
en
0.708699
#!/usr/bin/env python """ Script which takes one or more file paths and reports on their detected encodings Example:: % chardetect somefile someotherfile somefile: windows-1252 with confidence 0.5 someotherfile: ascii with confidence 1.0 If no paths are provided, it takes its input from stdin. """ # Copyright (c) 2020 # Author: xiaoweixiang from __future__ import absolute_import, print_function, unicode_literals import argparse import sys from pip._vendor.chardet import __version__ from pip._vendor.chardet.compat import PY2 from pip._vendor.chardet.universaldetector import UniversalDetector def description_of(lines, name='stdin'): """ Return a string describing the probable encoding of a file or list of strings. :param lines: The lines to get the encoding of. :type lines: Iterable of bytes :param name: Name of file or collection of lines :type name: str """ u = UniversalDetector() for line in lines: line = bytearray(line) u.feed(line) # shortcut out of the loop to save reading further - particularly useful if we read a BOM. if u.done: break u.close() result = u.result if PY2: name = name.decode(sys.getfilesystemencoding(), 'ignore') if result['encoding']: return '{0}: {1} with confidence {2}'.format(name, result['encoding'], result['confidence']) else: return '{0}: no result'.format(name) def main(argv=None): """ Handles command line arguments and gets things started. :param argv: List of arguments, as if specified on the command-line. If None, ``sys.argv[1:]`` is used instead. :type argv: list of str """ # Get command line arguments parser = argparse.ArgumentParser( description="Takes one or more file paths and reports their detected \ encodings") parser.add_argument('input', help='File whose encoding we would like to determine. \ (default: stdin)', type=argparse.FileType('rb'), nargs='*', default=[sys.stdin if PY2 else sys.stdin.buffer]) parser.add_argument('--version', action='version', version='%(prog)s {0}'.format(__version__)) args = parser.parse_args(argv) for f in args.input: if f.isatty(): print("You are running chardetect interactively. Press " + "CTRL-D twice at the start of a blank line to signal the " + "end of your input. If you want help, run chardetect " + "--help\n", file=sys.stderr) print(description_of(f, f.name)) if __name__ == '__main__': main()
venv/lib/python3.8/site-packages/pip/_vendor/chardet/cli/chardetect.py
2,821
Return a string describing the probable encoding of a file or list of strings. :param lines: The lines to get the encoding of. :type lines: Iterable of bytes :param name: Name of file or collection of lines :type name: str Handles command line arguments and gets things started. :param argv: List of arguments, as if specified on the command-line. If None, ``sys.argv[1:]`` is used instead. :type argv: list of str Script which takes one or more file paths and reports on their detected encodings Example:: % chardetect somefile someotherfile somefile: windows-1252 with confidence 0.5 someotherfile: ascii with confidence 1.0 If no paths are provided, it takes its input from stdin. !/usr/bin/env python Copyright (c) 2020 Author: xiaoweixiang shortcut out of the loop to save reading further - particularly useful if we read a BOM. Get command line arguments
893
en
0.704874
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import time import math from fairseq import utils from fairseq import bleu class Meter(object): def reset(self): pass def update(self, val, n=1): pass @property def avg(self): pass @property def std(self): return 0.0 class AverageMeter(Meter): """Computes and stores the average and current value""" def __init__(self): self.sum = 0 self.count = 0 self.sum_square = 0 def reset(self): self.sum = 0 self.count = 0 self.sum_square = 0 def update(self, val, n=1): if isinstance(val, AverageMeter): reduced_meter: AverageMeter = utils.reduce_average_meter(self, val) self.sum = reduced_meter.sum self.count = reduced_meter.count self.sum_square = reduced_meter.sum_square else: self.sum += val * n self.count += n self.sum_square = self.sum_square + (val * val) * n @property def avg(self): if self.count == 0: return 0.0 return self.sum / self.count @property def std(self): expected_sum_square = self.sum_square / self.count expected_sum = self.avg return math.sqrt(expected_sum_square - expected_sum * expected_sum) class ConcatentateMeter(Meter): def __init__(self, lowercase=False): self.scorer = bleu.SacrebleuScorer(lowercase=lowercase) self.target_sum = [] self.hypo_sum = [] self.count = 0 def reset(self): self.target_sum = [] self.hypo_sum = [] self.count = 0 def update(self, val, n=1): self.target_sum += val[0] * n self.hypo_sum += val[1] * n self.count += n # TODO compute corpus bleu here @property def avg(self): if self.count == 0: return 0.0 # Compute the corpus level BLEU self.scorer.sys = self.hypo_sum self.scorer.ref = self.target_sum return self.scorer.score() class BleuMeter(Meter): def __init__(self): self.correct, self.total, self.sys_len, self.ref_len = utils.get_zero_bleu_stats() # TODO handle lowercase self.scorer = bleu.SacrebleuScorer(lowercase=False) def reset(self): self.correct, self.total, self.sys_len, self.ref_len = utils.get_zero_bleu_stats() def update(self, val, n=1): # val will be a namedtuple # We need to reduce for _ in range(n): self.correct = utils.reduce_lists(self.correct, val.correct) self.total = utils.reduce_lists(self.total, val.total) self.sys_len += val.sys_len self.ref_len += val.ref_len @property def avg(self): # We have the sufficient statistics, just compute the BLEU score return self.scorer.compute_bleu(correct=self.correct, total=self.total, sys_len=self.sys_len, ref_len=self.ref_len).score class TimeMeter(object): """Computes the average occurrence of some event per second""" def __init__(self, init=0): self.reset(init) def reset(self, init=0): self.init = init self.start = time.time() self.n = 0 def update(self, val=1): self.n += val @property def avg(self): return self.n / self.elapsed_time @property def elapsed_time(self): return self.init + (time.time() - self.start) class StopwatchMeter(Meter): """Computes the sum/avg duration of some event in seconds""" def __init__(self): self.reset() def start(self): self.start_time = time.time() def stop(self, n=1): if self.start_time is not None: delta = time.time() - self.start_time self.sum += delta self.n += n self.start_time = None def reset(self): self.sum = 0 self.n = 0 self.start_time = None @property def avg(self): return self.sum / self.n
fairseq/meters.py
4,348
Computes and stores the average and current value Computes the sum/avg duration of some event in seconds Computes the average occurrence of some event per second Copyright (c) 2017-present, Facebook, Inc. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. An additional grant of patent rights can be found in the PATENTS file in the same directory. TODO compute corpus bleu here Compute the corpus level BLEU TODO handle lowercase val will be a namedtuple We need to reduce We have the sufficient statistics, just compute the BLEU score
624
en
0.8774
# -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- project = u'QB' copyright = u'2018, NRSER' author = u'NRSER' # The short X.Y version version = u'' # The full version, including alpha/beta/rc tags release = u'' # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'sphinx.ext.githubpages', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path . exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'QBdoc' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'QB.tex', u'QB Documentation', u'NRSER', 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'qb', u'QB Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'QB', u'QB Documentation', author, 'QB', 'One line description of project.', 'Miscellaneous'), ] # -- Extension configuration ------------------------------------------------- # -- Options for intersphinx extension --------------------------------------- # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'https://docs.python.org/': None} # -- Options for todo extension ---------------------------------------------- # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True
dev/scratch/sphinx-quickstart/conf.py
5,359
-*- coding: utf-8 -*- Configuration file for the Sphinx documentation builder. This file does only contain a selection of the most common options. For a full list see the documentation: http://www.sphinx-doc.org/en/master/config -- Path setup -------------------------------------------------------------- If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. import os import sys sys.path.insert(0, os.path.abspath('.')) -- Project information ----------------------------------------------------- The short X.Y version The full version, including alpha/beta/rc tags -- General configuration --------------------------------------------------- If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.0' Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. The suffix(es) of source filenames. You can specify multiple suffix as a list of string: source_suffix = ['.rst', '.md'] The master toctree document. The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages. This is also used if you do content translation via gettext catalogs. Usually you set "language" from the command line for these cases. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This pattern also affects html_static_path and html_extra_path . The name of the Pygments (syntax highlighting) style to use. -- Options for HTML output ------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Theme options are theme-specific and customize the look and feel of a theme further. For a list of options available for each theme, see the documentation. html_theme_options = {} Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". Custom sidebar templates, must be a dictionary that maps document names to template names. The default sidebars (for documents that don't match any pattern) are defined by theme itself. Builtin themes are using these templates by default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html']``. html_sidebars = {} -- Options for HTMLHelp output --------------------------------------------- Output file base name for HTML help builder. -- Options for LaTeX output ------------------------------------------------ The paper size ('letterpaper' or 'a4paper'). 'papersize': 'letterpaper', The font size ('10pt', '11pt' or '12pt'). 'pointsize': '10pt', Additional stuff for the LaTeX preamble. 'preamble': '', Latex figure (float) alignment 'figure_align': 'htbp', Grouping the document tree into LaTeX files. List of tuples (source start file, target name, title, author, documentclass [howto, manual, or own class]). -- Options for manual page output ------------------------------------------ One entry per manual page. List of tuples (source start file, name, description, authors, manual section). -- Options for Texinfo output ---------------------------------------------- Grouping the document tree into Texinfo files. List of tuples (source start file, target name, title, author, dir menu entry, description, category) -- Extension configuration ------------------------------------------------- -- Options for intersphinx extension --------------------------------------- Example configuration for intersphinx: refer to the Python standard library. -- Options for todo extension ---------------------------------------------- If true, `todo` and `todoList` produce output, else they produce nothing.
4,081
en
0.595275
import os import sys import logging import io from xml.sax.saxutils import escape import template #=============================================================================== #=============================================================================== class _TemplateHandler(object): def __init__(self, project): self.project = project # Get toolchain build path ex:'/opt/arm-2012.03/bin' toolchain_path = os.path.dirname(project.get_target_var("CC")) # on Mac add homebrew path to compiler path if sys.platform == "darwin": toolchain_path += ":/usr/local/bin" # Get toolchain cross prefix # ex:'arm-none-linux-gnueabi-' or '' for native toolchain_cross = project.get_target_var("CROSS") if toolchain_cross: toolchain_cross = os.path.basename(toolchain_cross) # Replacement map self.replacement = { "NAME": project.name, "PRODUCT": project.product, "VARIANT": project.variant, "TOOLCHAIN_PATH": toolchain_path, "TOOLCHAIN_CROSS": toolchain_cross, "BUILD_DIR": project.build_dir, "BUILD_CMD": "${CWD}/build.sh", "BUILD_ARGS": project.build_args, "BUILD_TARGET": project.build_target, "CLEAN_TARGET": project.clean_target, "LINKED_RESOURCES": self._gen_linked_resources, "SOURCE_ENTRIES": self._gen_source_entries, "C_INCLUDE_DIRS": self._gen_include_dirs, "C_DEFINES": self._gen_c_defines, "C_INCLUDE_FILES": self._gen_include_files, "CXX_INCLUDE_DIRS": self._gen_include_dirs, "CXX_DEFINES": self._gen_cxx_defines, "CXX_INCLUDE_FILES": self._gen_include_files, } def __call__(self, pattern): action = self.replacement.get(pattern, None) if action is None: logging.error("%s: unknown replacement pattern '%s'", self.project.name, pattern) return "" elif callable(action): return action() else: return action def _gen_linked_resources(self): output = io.StringIO() for dep in self.project.linked_resources: dep_path = self.project.linked_resources[dep] output.write("<link>\n") output.write("\t<name>%s</name>\n" % dep) output.write("\t<type>2</type>\n") output.write("\t<location>%s</location>\n" % dep_path) output.write("</link>\n") return output.getvalue() def _gen_source_entries(self): output = io.StringIO() if self.project.linked_resources: excluding = "|".join(self.project.linked_resources.keys()) output.write("<entry " "excluding=\"%s\" " "flags=\"VALUE_WORKSPACE_PATH|RESOLVED\" " "kind=\"sourcePath\" " "name=\"\"/>\n" % excluding) for dep in self.project.linked_resources: output.write("<entry " "flags=\"VALUE_WORKSPACE_PATH|RESOLVED\" " "kind=\"sourcePath\" " "name=\"%s\"/>\n" % dep) return output.getvalue() def _gen_include_dirs(self): output = io.StringIO() for include in sorted(self.project.includes): output.write("<listOptionValue " "builtIn=\"false\" " "value=\"%s\"/>\n" % include) return output.getvalue() def _gen_include_files(self): output = io.StringIO() for autoconf_h_file in sorted(self.project.autoconf_h_files): output.write("<listOptionValue " "builtIn=\"false\" " "value=\"%s\"/>\n" % autoconf_h_file) return output.getvalue() def _gen_c_defines(self): return self._gen_defines(self.project.defines_c) def _gen_cxx_defines(self): defines = {} defines.update(self.project.defines_c) defines.update(self.project.defines_cxx) return self._gen_defines(defines) @staticmethod def _gen_defines(defines): output = io.StringIO() for define in sorted(defines.keys()): output.write("<listOptionValue " "builtIn=\"false\" " "value=\"%s=%s\"/>\n" % (define, escape(defines[define], {"\"": "&quot;"}))) return output.getvalue() #=============================================================================== #=============================================================================== def setup_argparse(parser): # Nothing to do pass #=============================================================================== #=============================================================================== def generate(project): _entries = [ (".project", "eclipse.project.template"), (".cproject", "eclipse.cproject.template"), ] for entry in _entries: outfilepath = os.path.join(project.outdirpath, entry[0]) infilepath = os.path.join(os.path.dirname(__file__), entry[1]) logging.info("%s: generating '%s'", project.name, outfilepath) template.expand(infilepath, outfilepath, _TemplateHandler(project))
scripts/genproject/eclipse.py
5,405
============================================================================================================================================================== Get toolchain build path ex:'/opt/arm-2012.03/bin' on Mac add homebrew path to compiler path Get toolchain cross prefix ex:'arm-none-linux-gnueabi-' or '' for native Replacement map============================================================================================================================================================== Nothing to do==============================================================================================================================================================
670
en
0.394952
# Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometerclient.openstack.common.apiclient import client from ceilometerclient.openstack.common.apiclient import fake_client from ceilometerclient.tests import utils import ceilometerclient.v2.trait_descriptions fixtures = { '/v2/event_types/Foo/traits': { 'GET': ( {}, [ {'name': 'trait_1', 'type': 'string'}, {'name': 'trait_2', 'type': 'integer'}, {'name': 'trait_3', 'type': 'datetime'} ] ), } } class TraitDescriptionManagerTest(utils.BaseTestCase): def setUp(self): super(TraitDescriptionManagerTest, self).setUp() self.http_client = fake_client.FakeHTTPClient(fixtures=fixtures) self.api = client.BaseClient(self.http_client) self.mgr = (ceilometerclient.v2.trait_descriptions. TraitDescriptionManager(self.api)) def test_list(self): trait_descriptions = list(self.mgr.list('Foo')) expect = [ 'GET', '/v2/event_types/Foo/traits' ] self.http_client.assert_called(*expect) self.assertEqual(len(trait_descriptions), 3) for i, vals in enumerate([('trait_1', 'string'), ('trait_2', 'integer'), ('trait_3', 'datetime')]): name, type = vals self.assertEqual(trait_descriptions[i].name, name) self.assertEqual(trait_descriptions[i].type, type)
ceilometerclient/tests/v2/test_trait_descriptions.py
2,121
Copyright 2014 Hewlett-Packard Development Company, L.P. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
606
en
0.856178